diff options
1503 files changed, 63544 insertions, 69579 deletions
diff --git a/Documentation/ABI/removed/ip_queue b/Documentation/ABI/removed/ip_queue new file mode 100644 index 00000000000..3243613bc2d --- /dev/null +++ b/Documentation/ABI/removed/ip_queue @@ -0,0 +1,9 @@ +What: ip_queue +Date: finally removed in kernel v3.5.0 +Contact: Pablo Neira Ayuso <pablo@netfilter.org> +Description: + ip_queue has been replaced by nfnetlink_queue which provides + more advanced queueing mechanism to user-space. The ip_queue + module was already announced to become obsolete years ago. + +Users: diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh index b218e0f8bdb..c81fe89c4c4 100644 --- a/Documentation/ABI/testing/sysfs-class-net-mesh +++ b/Documentation/ABI/testing/sysfs-class-net-mesh @@ -14,6 +14,15 @@ Description: mesh will be sent using multiple interfaces at the same time (if available). +What: /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance +Date: November 2011 +Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> +Description: + Indicates whether the bridge loop avoidance feature + is enabled. This feature detects and avoids loops + between the mesh and devices bridged with the soft + interface <mesh_iface>. + What: /sys/class/net/<mesh_iface>/mesh/fragmentation Date: October 2010 Contact: Andreas Langer <an.langer@gmx.de> diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index c5ac6929c41..f3e214f9e25 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -516,7 +516,7 @@ !Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe !Finclude/net/mac80211.h ieee80211_stop_tx_ba_session !Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe -!Finclude/net/mac80211.h rate_control_changed +!Finclude/net/mac80211.h ieee80211_rate_control_changed !Finclude/net/mac80211.h ieee80211_tx_rate_control !Finclude/net/mac80211.h rate_control_send_low </chapter> diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt new file mode 100644 index 00000000000..79384113c2b --- /dev/null +++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt @@ -0,0 +1,127 @@ +Properties for an MDIO bus multiplexer/switch controlled by GPIO pins. + +This is a special case of a MDIO bus multiplexer. One or more GPIO +lines are used to control which child bus is connected. + +Required properties in addition to the generic multiplexer properties: + +- compatible : mdio-mux-gpio. +- gpios : GPIO specifiers for each GPIO line. One or more must be specified. + + +Example : + + /* The parent MDIO bus. */ + smi1: mdio@1180000001900 { + compatible = "cavium,octeon-3860-mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x11800 0x00001900 0x0 0x40>; + }; + + /* + An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a + pair of GPIO lines. Child busses 2 and 3 populated with 4 + PHYs each. + */ + mdio-mux { + compatible = "mdio-mux-gpio"; + gpios = <&gpio1 3 0>, <&gpio1 4 0>; + mdio-parent-bus = <&smi1>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + phy11: ethernet-phy@1 { + reg = <1>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy12: ethernet-phy@2 { + reg = <2>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy13: ethernet-phy@3 { + reg = <3>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy14: ethernet-phy@4 { + reg = <4>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + }; + + mdio@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + phy21: ethernet-phy@1 { + reg = <1>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy22: ethernet-phy@2 { + reg = <2>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy23: ethernet-phy@3 { + reg = <3>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy24: ethernet-phy@4 { + reg = <4>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt new file mode 100644 index 00000000000..f65606f8d63 --- /dev/null +++ b/Documentation/devicetree/bindings/net/mdio-mux.txt @@ -0,0 +1,136 @@ +Common MDIO bus multiplexer/switch properties. + +An MDIO bus multiplexer/switch will have several child busses that are +numbered uniquely in a device dependent manner. The nodes for an MDIO +bus multiplexer/switch will have one child node for each child bus. + +Required properties: +- mdio-parent-bus : phandle to the parent MDIO bus. +- #address-cells = <1>; +- #size-cells = <0>; + +Optional properties: +- Other properties specific to the multiplexer/switch hardware. + +Required properties for child nodes: +- #address-cells = <1>; +- #size-cells = <0>; +- reg : The sub-bus number. + + +Example : + + /* The parent MDIO bus. */ + smi1: mdio@1180000001900 { + compatible = "cavium,octeon-3860-mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x11800 0x00001900 0x0 0x40>; + }; + + /* + An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a + pair of GPIO lines. Child busses 2 and 3 populated with 4 + PHYs each. + */ + mdio-mux { + compatible = "mdio-mux-gpio"; + gpios = <&gpio1 3 0>, <&gpio1 4 0>; + mdio-parent-bus = <&smi1>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + phy11: ethernet-phy@1 { + reg = <1>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy12: ethernet-phy@2 { + reg = <2>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy13: ethernet-phy@3 { + reg = <3>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + phy14: ethernet-phy@4 { + reg = <4>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <10 8>; /* Pin 10, active low */ + }; + }; + + mdio@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + phy21: ethernet-phy@1 { + reg = <1>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy22: ethernet-phy@2 { + reg = <2>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy23: ethernet-phy@3 { + reg = <3>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + phy24: ethernet-phy@4 { + reg = <4>; + compatible = "marvell,88e1149r"; + marvell,reg-init = <3 0x10 0 0x5777>, + <3 0x11 0 0x00aa>, + <3 0x12 0 0x4105>, + <3 0x13 0 0x0a60>; + interrupt-parent = <&gpio>; + interrupts = <12 8>; /* Pin 12, active low */ + }; + }; + }; diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index b7413cb46dc..ef088e55ab2 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -996,7 +996,6 @@ Table 1-9: Network info in /proc/net snmp SNMP data sockstat Socket statistics tcp TCP sockets - tr_rif Token ring RIF routing table udp UDP sockets unix UNIX domain sockets wireless Wireless interface data (Wavelan etc) diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 9ad9ddeb384..2cc3c7733a2 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -1,7 +1,5 @@ 00-INDEX - this file -3c359.txt - - information on the 3Com TokenLink Velocity XL (3c5359) driver. 3c505.txt - information on the 3Com EtherLink Plus (3c505) driver. 3c509.txt @@ -142,8 +140,6 @@ netif-msg.txt - Design of the network interface message level setting (NETIF_MSG_*). nfc.txt - The Linux Near Field Communication (NFS) subsystem. -olympic.txt - - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info. openvswitch.txt - Open vSwitch developer documentation. operstates.txt @@ -184,8 +180,6 @@ skfp.txt - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. smc9.txt - the driver for SMC's 9000 series of Ethernet cards -smctr.txt - - SMC TokenCard TokenRing Linux driver info. spider-net.txt - README for the Spidernet Driver (as found in PS3 / Cell BE). stmmac.txt @@ -200,8 +194,6 @@ tcp-thin.txt - kernel tuning options for low rate 'thin' TCP streams. tlan.txt - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. -tms380tr.txt - - SysKonnect Token Ring ISA/PCI adapter driver info. tproxy.txt - Transparent proxy support user guide. tuntap.txt diff --git a/Documentation/networking/3c359.txt b/Documentation/networking/3c359.txt deleted file mode 100644 index dadfe8147ab..00000000000 --- a/Documentation/networking/3c359.txt +++ /dev/null @@ -1,58 +0,0 @@ - -3COM PCI TOKEN LINK VELOCITY XL TOKEN RING CARDS README - -Release 0.9.0 - Release - Jul 17th 2000 Mike Phillips - - 1.2.0 - Final - Feb 17th 2002 Mike Phillips - Updated for submission to the 2.4.x kernel. - -Thanks: - Terry Murphy from 3Com for tech docs and support, - Adam D. Ligas for testing the driver. - -Note: - This driver will NOT work with the 3C339 Token Ring cards, you need -to use the tms380 driver instead. - -Options: - -The driver accepts three options: ringspeed, pkt_buf_sz and message_level. - -These options can be specified differently for each card found. - -ringspeed: Has one of three settings 0 (default), 4 or 16. 0 will -make the card autosense the ringspeed and join at the appropriate speed, -this will be the default option for most people. 4 or 16 allow you to -explicitly force the card to operate at a certain speed. The card will fail -if you try to insert it at the wrong speed. (Although some hubs will allow -this so be *very* careful). The main purpose for explicitly setting the ring -speed is for when the card is first on the ring. In autosense mode, if the card -cannot detect any active monitors on the ring it will open at the same speed as -its last opening. This can be hazardous if this speed does not match the speed -you want the ring to operate at. - -pkt_buf_sz: This is this initial receive buffer allocation size. This will -default to 4096 if no value is entered. You may increase performance of the -driver by setting this to a value larger than the network packet size, although -the driver now re-sizes buffers based on MTU settings as well. - -message_level: Controls level of messages created by the driver. Defaults to 0: -which only displays start-up and critical messages. Presently any non-zero -value will display all soft messages as well. NB This does not turn -debugging messages on, that must be done by modified the source code. - -Variable MTU size: - -The driver can handle a MTU size up to either 4500 or 18000 depending upon -ring speed. The driver also changes the size of the receive buffers as part -of the mtu re-sizing, so if you set mtu = 18000, you will need to be able -to allocate 16 * (sk_buff with 18000 buffer size) call it 18500 bytes per ring -position = 296,000 bytes of memory space, plus of course anything -necessary for the tx sk_buff's. Remember this is per card, so if you are -building routers, gateway's etc, you could start to use a lot of memory -real fast. - -2/17/02 Mike Phillips - diff --git a/Documentation/networking/3c509.txt b/Documentation/networking/3c509.txt index dcc9eaf5939..fbf722e15ac 100644 --- a/Documentation/networking/3c509.txt +++ b/Documentation/networking/3c509.txt @@ -25,7 +25,6 @@ models: 3c509B (later revision of the ISA card; supports full-duplex) 3c589 (PCMCIA) 3c589B (later revision of the 3c589; supports full-duplex) - 3c529 (MCA) 3c579 (EISA) Large portions of this documentation were heavily borrowed from the guide diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt index 221ad0cdf11..75a592365af 100644 --- a/Documentation/networking/batman-adv.txt +++ b/Documentation/networking/batman-adv.txt @@ -1,5 +1,3 @@ -[state: 21-08-2011] - BATMAN-ADV ---------- @@ -67,18 +65,19 @@ To deactivate an interface you have to write "none" into its All mesh wide settings can be found in batman's own interface folder: -# ls /sys/class/net/bat0/mesh/ -# aggregated_ogms fragmentation gw_sel_class vis_mode -# ap_isolation gw_bandwidth hop_penalty -# bonding gw_mode orig_interval +# ls /sys/class/net/bat0/mesh/ +# aggregated_ogms gw_bandwidth log_level +# ap_isolation gw_mode orig_interval +# bonding gw_sel_class routing_algo +# bridge_loop_avoidance hop_penalty vis_mode +# fragmentation There is a special folder for debugging information: # ls /sys/kernel/debug/batman_adv/bat0/ -# gateways socket transtable_global vis_data -# originators softif_neigh transtable_local - +# bla_claim_table log socket transtable_local +# gateways originators transtable_global vis_data Some of the files contain all sort of status information regard- ing the mesh network. For example, you can view the table of @@ -202,12 +201,13 @@ abled during run time. Following log_levels are defined: 1 - Enable messages related to routing / flooding / broadcasting 2 - Enable messages related to route added / changed / deleted 4 - Enable messages related to translation table operations -7 - Enable all messages +8 - Enable messages related to bridge loop avoidance +15 - enable all messages The debug output can be changed at runtime using the file /sys/class/net/bat0/mesh/log_level. e.g. -# echo 2 > /sys/class/net/bat0/mesh/log_level +# echo 6 > /sys/class/net/bat0/mesh/log_level will enable debug messages for when routes change. diff --git a/Documentation/networking/fore200e.txt b/Documentation/networking/fore200e.txt index f648eb26518..d52af53efdc 100644 --- a/Documentation/networking/fore200e.txt +++ b/Documentation/networking/fore200e.txt @@ -11,12 +11,10 @@ i386, alpha (untested), powerpc, sparc and sparc64 archs. The intent is to enable the use of different models of FORE adapters at the same time, by hosts that have several bus interfaces (such as PCI+SBUS, -PCI+MCA or PCI+EISA). +or PCI+EISA). Only PCI and SBUS devices are currently supported by the driver, but support -for other bus interfaces such as EISA should not be too hard to add (this may -be more tricky for the MCA bus, though, as FORE made some MCA-specific -modifications to the adapter's AALI interface). +for other bus interfaces such as EISA should not be too hard to add. Firmware Copyright Notice diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt index 1dc1c24a754..703cf4370c7 100644 --- a/Documentation/networking/ieee802154.txt +++ b/Documentation/networking/ieee802154.txt @@ -4,15 +4,22 @@ Introduction ============ +The IEEE 802.15.4 working group focuses on standartization of bottom +two layers: Medium Accsess Control (MAC) and Physical (PHY). And there +are mainly two options available for upper layers: + - ZigBee - proprietary protocol from ZigBee Alliance + - 6LowPAN - IPv6 networking over low rate personal area networks The Linux-ZigBee project goal is to provide complete implementation -of IEEE 802.15.4 / ZigBee / 6LoWPAN protocols. IEEE 802.15.4 is a stack +of IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack of protocols for organizing Low-Rate Wireless Personal Area Networks. -Currently only IEEE 802.15.4 layer is implemented. We have chosen -to use plain Berkeley socket API, the generic Linux networking stack -to transfer IEEE 802.15.4 messages and a special protocol over genetlink -for configuration/management +The stack is composed of three main parts: + - IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API, + the generic Linux networking stack to transfer IEEE 802.15.4 messages + and a special protocol over genetlink for configuration/management + - MAC - provides access to shared channel and reliable data delivery + - PHY - represents device drivers Socket API @@ -29,15 +36,6 @@ or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee). One can use SOCK_RAW for passing raw data towards device xmit function. YMMV. -MLME - MAC Level Management -============================ - -Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands. -See the include/net/nl802154.h header. Our userspace tools package -(see above) provides CLI configuration utility for radio interfaces and simple -coordinator for IEEE 802.15.4 networks as an example users of MLME protocol. - - Kernel side ============= @@ -51,6 +49,15 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4. Those types of devices require different approach to be hooked into Linux kernel. +MLME - MAC Level Management +============================ + +Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands. +See the include/net/nl802154.h header. Our userspace tools package +(see above) provides CLI configuration utility for radio interfaces and simple +coordinator for IEEE 802.15.4 networks as an example users of MLME protocol. + + HardMAC ======= @@ -73,11 +80,47 @@ We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c SoftMAC ======= -We are going to provide intermediate layer implementing IEEE 802.15.4 MAC -in software. This is currently WIP. +The MAC is the middle layer in the IEEE 802.15.4 Linux stack. This moment it +provides interface for drivers registration and management of slave interfaces. + +NOTE: Currently the only monitor device type is supported - it's IEEE 802.15.4 +stack interface for network sniffers (e.g. WireShark). + +This layer is going to be extended soon. See header include/net/mac802154.h and several drivers in drivers/ieee802154/. + +Device drivers API +================== + +The include/net/mac802154.h defines following functions: + - struct ieee802154_dev *ieee802154_alloc_device + (size_t priv_size, struct ieee802154_ops *ops): + allocation of IEEE 802.15.4 compatible device + + - void ieee802154_free_device(struct ieee802154_dev *dev): + freeing allocated device + + - int ieee802154_register_device(struct ieee802154_dev *dev): + register PHY in the system + + - void ieee802154_unregister_device(struct ieee802154_dev *dev): + freeing registered PHY + +Moreover IEEE 802.15.4 device operations structure should be filled. + +Fake drivers +============ + +In addition there are two drivers available which simulate real devices with +HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver) +interfaces. This option provides possibility to test and debug stack without +usage of real hardware. + +See sources in drivers/ieee802154 folder for more details. + + 6LoWPAN Linux implementation ============================ diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1619a8c8087..6f896b94abd 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -190,6 +190,20 @@ tcp_cookie_size - INTEGER tcp_dsack - BOOLEAN Allows TCP to send "duplicate" SACKs. +tcp_early_retrans - INTEGER + Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold + for triggering fast retransmit when the amount of outstanding data is + small and when no previously unsent data can be transmitted (such + that limited transmit could be used). + Possible values: + 0 disables ER + 1 enables ER + 2 enables ER but delays fast recovery and fast retransmit + by a fourth of RTT. This mitigates connection falsely + recovers when network has a small degree of reordering + (less than 3 packets). + Default: 2 + tcp_ecn - INTEGER Enable Explicit Congestion Notification (ECN) in TCP. ECN is only used when both ends of the TCP flow support it. It is useful to @@ -1287,13 +1301,22 @@ bridge-nf-call-ip6tables - BOOLEAN bridge-nf-filter-vlan-tagged - BOOLEAN 1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables. 0 : disable this. - Default: 1 + Default: 0 bridge-nf-filter-pppoe-tagged - BOOLEAN 1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables. 0 : disable this. - Default: 1 + Default: 0 +bridge-nf-pass-vlan-input-dev - BOOLEAN + 1: if bridge-nf-filter-vlan-tagged is enabled, try to find a vlan + interface on the bridge and set the netfilter input device to the vlan. + This allows use of e.g. "iptables -i br0.1" and makes the REDIRECT + target work with vlan-on-top-of-bridge interfaces. When no matching + vlan interface is found, or this switch is off, the input device is + set to the bridge interface. + 0: disable bridge netfilter vlan interface lookup. + Default: 0 proc/sys/net/sctp/* Variables: @@ -1484,11 +1507,8 @@ addr_scope_policy - INTEGER /proc/sys/net/core/* -dev_weight - INTEGER - The maximum number of packets that kernel can handle on a NAPI - interrupt, it's a Per-CPU variable. + Please see: Documentation/sysctl/net.txt for descriptions of these entries. - Default: 64 /proc/sys/net/unix/* max_dgram_qlen - INTEGER diff --git a/Documentation/networking/mac80211-auth-assoc-deauth.txt b/Documentation/networking/mac80211-auth-assoc-deauth.txt index e0a2aa585ca..d7a15fe91bf 100644 --- a/Documentation/networking/mac80211-auth-assoc-deauth.txt +++ b/Documentation/networking/mac80211-auth-assoc-deauth.txt @@ -23,7 +23,7 @@ BA session stop & deauth/disassoc frames end note end -mac80211->driver: config(channel, non-HT) +mac80211->driver: config(channel, channel type) mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap) mac80211->driver: sta_state(AP, exists) @@ -51,7 +51,7 @@ note over mac80211,driver: cleanup like for authenticate end alt not previously authenticated (FT) -mac80211->driver: config(channel, non-HT) +mac80211->driver: config(channel, channel type) mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap) mac80211->driver: sta_state(AP, exists) mac80211->driver: sta_state(AP, authenticated) @@ -67,10 +67,6 @@ end mac80211->driver: set up QoS parameters -alt is HT channel -mac80211->driver: config(channel, HT params) -end - mac80211->driver: bss_info_changed(QoS, HT, associated with AID) mac80211->userspace: associated @@ -95,5 +91,5 @@ mac80211->driver: sta_state(AP,exists) mac80211->driver: sta_state(AP,not-exists) mac80211->driver: turn off powersave mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...) -mac80211->driver: config(non-HT channel type) +mac80211->driver: config(channel type to non-HT) mac80211->userspace: disconnected diff --git a/Documentation/networking/olympic.txt b/Documentation/networking/olympic.txt deleted file mode 100644 index b95b5bf9675..00000000000 --- a/Documentation/networking/olympic.txt +++ /dev/null @@ -1,79 +0,0 @@ - -IBM PCI Pit/Pit-Phy/Olympic CHIPSET BASED TOKEN RING CARDS README - -Release 0.2.0 - Release - June 8th 1999 Peter De Schrijver & Mike Phillips -Release 0.9.C - Release - April 18th 2001 Mike Phillips - -Thanks: -Erik De Cock, Adrian Bridgett and Frank Fiene for their -patience and testing. -Donald Champion for the cardbus support -Kyle Lucke for the dma api changes. -Jonathon Bitner for hardware support. -Everybody on linux-tr for their continued support. - -Options: - -The driver accepts four options: ringspeed, pkt_buf_sz, -message_level and network_monitor. - -These options can be specified differently for each card found. - -ringspeed: Has one of three settings 0 (default), 4 or 16. 0 will -make the card autosense the ringspeed and join at the appropriate speed, -this will be the default option for most people. 4 or 16 allow you to -explicitly force the card to operate at a certain speed. The card will fail -if you try to insert it at the wrong speed. (Although some hubs will allow -this so be *very* careful). The main purpose for explicitly setting the ring -speed is for when the card is first on the ring. In autosense mode, if the card -cannot detect any active monitors on the ring it will not open, so you must -re-init the card at the appropriate speed. Unfortunately at present the only -way of doing this is rmmod and insmod which is a bit tough if it is compiled -in the kernel. - -pkt_buf_sz: This is this initial receive buffer allocation size. This will -default to 4096 if no value is entered. You may increase performance of the -driver by setting this to a value larger than the network packet size, although -the driver now re-sizes buffers based on MTU settings as well. - -message_level: Controls level of messages created by the driver. Defaults to 0: -which only displays start-up and critical messages. Presently any non-zero -value will display all soft messages as well. NB This does not turn -debugging messages on, that must be done by modified the source code. - -network_monitor: Any non-zero value will provide a quasi network monitoring -mode. All unexpected MAC frames (beaconing etc.) will be received -by the driver and the source and destination addresses printed. -Also an entry will be added in /proc/net called olympic_tr%d, where tr%d -is the registered device name, i.e tr0, tr1, etc. This displays low -level information about the configuration of the ring and the adapter. -This feature has been designed for network administrators to assist in -the diagnosis of network / ring problems. (This used to OLYMPIC_NETWORK_MONITOR, -but has now changed to allow each adapter to be configured differently and -to alleviate the necessity to re-compile olympic to turn the option on). - -Multi-card: - -The driver will detect multiple cards and will work with shared interrupts, -each card is assigned the next token ring device, i.e. tr0 , tr1, tr2. The -driver should also happily reside in the system with other drivers. It has -been tested with ibmtr.c running, and I personally have had one Olicom PCI -card and two IBM olympic cards (all on the same interrupt), all running -together. - -Variable MTU size: - -The driver can handle a MTU size up to either 4500 or 18000 depending upon -ring speed. The driver also changes the size of the receive buffers as part -of the mtu re-sizing, so if you set mtu = 18000, you will need to be able -to allocate 16 * (sk_buff with 18000 buffer size) call it 18500 bytes per ring -position = 296,000 bytes of memory space, plus of course anything -necessary for the tx sk_buff's. Remember this is per card, so if you are -building routers, gateway's etc, you could start to use a lot of memory -real fast. - - -6/8/99 Peter De Schrijver and Mike Phillips - diff --git a/Documentation/networking/smctr.txt b/Documentation/networking/smctr.txt deleted file mode 100644 index 9af25b810c1..00000000000 --- a/Documentation/networking/smctr.txt +++ /dev/null @@ -1,66 +0,0 @@ -Text File for the SMC TokenCard TokenRing Linux driver (smctr.c). - By Jay Schulist <jschlst@samba.org> - -The Linux SMC Token Ring driver works with the SMC TokenCard Elite (8115T) -ISA and SMC TokenCard Elite/A (8115T/A) MCA adapters. - -Latest information on this driver can be obtained on the Linux-SNA WWW site. -Please point your browser to: http://www.linux-sna.org - -This driver is rather simple to use. Select Y to Token Ring adapter support -in the kernel configuration. A choice for SMC Token Ring adapters will -appear. This drives supports all SMC ISA/MCA adapters. Choose this -option. I personally recommend compiling the driver as a module (M), but if you -you would like to compile it statically answer Y instead. - -This driver supports multiple adapters without the need to load multiple copies -of the driver. You should be able to load up to 7 adapters without any kernel -modifications, if you are in need of more please contact the maintainer of this -driver. - -Load the driver either by lilo/loadlin or as a module. When a module using the -following command will suffice for most: - -# modprobe smctr -smctr.c: v1.00 12/6/99 by jschlst@samba.org -tr0: SMC TokenCard 8115T at Io 0x300, Irq 10, Rom 0xd8000, Ram 0xcc000. - -Now just setup the device via ifconfig and set and routes you may have. After -this you are ready to start sending some tokens. - -Errata: -1). For anyone wondering where to pick up the SMC adapters please browse - to http://www.smc.com - -2). If you are the first/only Token Ring Client on a Token Ring LAN, please - specify the ringspeed with the ringspeed=[4/16] module option. If no - ringspeed is specified the driver will attempt to autodetect the ring - speed and/or if the adapter is the first/only station on the ring take - the appropriate actions. - - NOTE: Default ring speed is 16MB UTP. - -3). PnP support for this adapter sucks. I recommend hard setting the - IO/MEM/IRQ by the jumpers on the adapter. If this is not possible - load the module with the following io=[ioaddr] mem=[mem_addr] - irq=[irq_num]. - - The following IRQ, IO, and MEM settings are supported. - - IO ports: - 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300, - 0x320, 0x340, 0x360, 0x380. - - IRQs: - 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15 - - Memory addresses: - 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, - 0xB8000, 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, - 0xD0000, 0xD4000, 0xD8000, 0xDC000, 0xE0000, 0xE4000, - 0xE8000, 0xEC000, 0xF0000, 0xF4000, 0xF8000, 0xFC000 - -This driver is under the GNU General Public License. Its Firmware image is -included as an initialized C-array and is licensed by SMC to the Linux -users of this driver. However no warranty about its fitness is expressed or -implied by SMC. diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index d0aeeadd264..ab1e8d7004c 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt @@ -111,11 +111,12 @@ and detailed below as well: int phy_addr; int interface; struct stmmac_mdio_bus_data *mdio_bus_data; - int pbl; + struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; int enh_desc; int tx_coe; + int rx_coe; int bugged_jumbo; int pmt; int force_sf_dma_mode; @@ -136,10 +137,12 @@ Where: o pbl: the Programmable Burst Length is maximum number of beats to be transferred in one DMA transaction. GMAC also enables the 4xPBL by default. - o clk_csr: CSR Clock range selection. + o clk_csr: fixed CSR Clock range selection. o has_gmac: uses the GMAC core. o enh_desc: if sets the MAC will use the enhanced descriptor structure. o tx_coe: core is able to perform the tx csum in HW. + o rx_coe: the supports three check sum offloading engine types: + type_1, type_2 (full csum) and no RX coe. o bugged_jumbo: some HWs are not able to perform the csum in HW for over-sized frames due to limited buffer sizes. Setting this flag the csum will be done in SW on @@ -160,7 +163,7 @@ Where: o custom_cfg: this is a custom configuration that can be passed while initialising the resources. -The we have: +For MDIO bus The we have: struct stmmac_mdio_bus_data { int bus_id; @@ -177,10 +180,28 @@ Where: o irqs: list of IRQs, one per PHY. o probed_phy_irq: if irqs is NULL, use this for probed PHY. + +For DMA engine we have the following internal fields that should be +tuned according to the HW capabilities. + +struct stmmac_dma_cfg { + int pbl; + int fixed_burst; + int burst_len_supported; +}; + +Where: + o pbl: Programmable Burst Length + o fixed_burst: program the DMA to use the fixed burst mode + o burst_len: this is the value we put in the register + supported values are provided as macros in + linux/stmmac.h header file. + +--- + Below an example how the structures above are using on ST platforms. static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = { - .pbl = 32, .has_gmac = 0, .enh_desc = 0, .fix_mac_speed = stxYYY_ethernet_fix_mac_speed, diff --git a/Documentation/networking/tms380tr.txt b/Documentation/networking/tms380tr.txt deleted file mode 100644 index 1f73e13058d..00000000000 --- a/Documentation/networking/tms380tr.txt +++ /dev/null @@ -1,147 +0,0 @@ -Text file for the Linux SysKonnect Token Ring ISA/PCI Adapter Driver. - Text file by: Jay Schulist <jschlst@samba.org> - -The Linux SysKonnect Token Ring driver works with the SysKonnect TR4/16(+) ISA, -SysKonnect TR4/16(+) PCI, SysKonnect TR4/16 PCI, and older revisions of the -SK NET TR4/16 ISA card. - -Latest information on this driver can be obtained on the Linux-SNA WWW site. -Please point your browser to: -http://www.linux-sna.org - -Many thanks to Christoph Goos for his excellent work on this driver and -SysKonnect for donating the adapters to Linux-SNA for the testing and -maintenance of this device driver. - -Important information to be noted: -1. Adapters can be slow to open (~20 secs) and close (~5 secs), please be - patient. -2. This driver works very well when autoprobing for adapters. Why even - think about those nasty io/int/dma settings of modprobe when the driver - will do it all for you! - -This driver is rather simple to use. Select Y to Token Ring adapter support -in the kernel configuration. A choice for SysKonnect Token Ring adapters will -appear. This drives supports all SysKonnect ISA and PCI adapters. Choose this -option. I personally recommend compiling the driver as a module (M), but if you -you would like to compile it statically answer Y instead. - -This driver supports multiple adapters without the need to load multiple copies -of the driver. You should be able to load up to 7 adapters without any kernel -modifications, if you are in need of more please contact the maintainer of this -driver. - -Load the driver either by lilo/loadlin or as a module. When a module using the -following command will suffice for most: - -# modprobe sktr - -This will produce output similar to the following: (Output is user specific) - -sktr.c: v1.01 08/29/97 by Christoph Goos -tr0: SK NET TR 4/16 PCI found at 0x6100, using IRQ 17. -tr1: SK NET TR 4/16 PCI found at 0x6200, using IRQ 16. -tr2: SK NET TR 4/16 ISA found at 0xa20, using IRQ 10 and DMA 5. - -Now just setup the device via ifconfig and set and routes you may have. After -this you are ready to start sending some tokens. - -Errata: -For anyone wondering where to pick up the SysKonnect adapters please browse -to http://www.syskonnect.com - -This driver is under the GNU General Public License. Its Firmware image is -included as an initialized C-array and is licensed by SysKonnect to the Linux -users of this driver. However no warranty about its fitness is expressed or -implied by SysKonnect. - -Below find attached the setting for the SK NET TR 4/16 ISA adapters -------------------------------------------------------------------- - - *************************** - *** C O N T E N T S *** - *************************** - - 1) Location of DIP-Switch W1 - 2) Default settings - 3) DIP-Switch W1 description - - - ============================================================== - CHAPTER 1 LOCATION OF DIP-SWITCH - ============================================================== - -UÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄ¿ -þUÄÄÄÄÄÄ¿ UÄÄÄÄÄ¿ UÄÄÄ¿ þ -þAÄÄÄÄÄÄU W1 AÄÄÄÄÄU UÄÄÄÄ¿ þ þ þ -þUÄÄÄÄÄÄ¿ þ þ þ þ UÄÄÅ¿ -þAÄÄÄÄÄÄU UÄÄÄÄÄÄÄÄÄÄÄ¿ AÄÄÄÄU þ þ þ þþ -þUÄÄÄÄÄÄ¿ þ þ UÄÄÄ¿ AÄÄÄU AÄÄÅU -þAÄÄÄÄÄÄU þ TMS380C26 þ þ þ þ -þUÄÄÄÄÄÄ¿ þ þ AÄÄÄU AÄ¿ -þAÄÄÄÄÄÄU þ þ þ þ -þ AÄÄÄÄÄÄÄÄÄÄÄU þ þ -þ þ þ -þ AÄU -þ þ -þ þ -þ þ -þ þ -AÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄU - AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU - - ============================================================== - CHAPTER 2 DEFAULT SETTINGS - ============================================================== - - W1 1 2 3 4 5 6 7 8 - +------------------------------+ - | ON X | - | OFF X X X X X X X | - +------------------------------+ - - W1.1 = ON Adapter drives address lines SA17..19 - W1.2 - 1.5 = OFF BootROM disabled - W1.6 - 1.8 = OFF I/O address 0A20h - - ============================================================== - CHAPTER 3 DIP SWITCH W1 DESCRIPTION - ============================================================== - - UÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄ¿ ON - þ 1 þ 2 þ 3 þ 4 þ 5 þ 6 þ 7 þ 8 þ - AÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄU OFF - |AD | BootROM Addr. | I/O | - +-+-+-------+-------+-----+-----+ - | | | - | | +------ 6 7 8 - | | ON ON ON 1900h - | | ON ON OFF 0900h - | | ON OFF ON 1980h - | | ON OFF OFF 0980h - | | OFF ON ON 1b20h - | | OFF ON OFF 0b20h - | | OFF OFF ON 1a20h - | | OFF OFF OFF 0a20h (+) - | | - | | - | +-------- 2 3 4 5 - | OFF x x x disabled (+) - | ON ON ON ON C0000 - | ON ON ON OFF C4000 - | ON ON OFF ON C8000 - | ON ON OFF OFF CC000 - | ON OFF ON ON D0000 - | ON OFF ON OFF D4000 - | ON OFF OFF ON D8000 - | ON OFF OFF OFF DC000 - | - | - +----- 1 - OFF adapter does NOT drive SA<17..19> - ON adapter drives SA<17..19> (+) - - - (+) means default setting - - ******************************** diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt new file mode 100644 index 00000000000..216b7254fcc --- /dev/null +++ b/Documentation/nfc/nfc-hci.txt @@ -0,0 +1,155 @@ +HCI backend for NFC Core + +Author: Eric Lapuyade, Samuel Ortiz +Contact: eric.lapuyade@intel.com, samuel.ortiz@intel.com + +General +------- + +The HCI layer implements much of the ETSI TS 102 622 V10.2.0 specification. It +enables easy writing of HCI-based NFC drivers. The HCI layer runs as an NFC Core +backend, implementing an abstract nfc device and translating NFC Core API +to HCI commands and events. + +HCI +--- + +HCI registers as an nfc device with NFC Core. Requests coming from userspace are +routed through netlink sockets to NFC Core and then to HCI. From this point, +they are translated in a sequence of HCI commands sent to the HCI layer in the +host controller (the chip). The sending context blocks while waiting for the +response to arrive. +HCI events can also be received from the host controller. They will be handled +and a translation will be forwarded to NFC Core as needed. +HCI uses 2 execution contexts: +- one if for executing commands : nfc_hci_msg_tx_work(). Only one command +can be executing at any given moment. +- one if for dispatching received events and responses : nfc_hci_msg_rx_work() + +HCI Session initialization: +--------------------------- + +The Session initialization is an HCI standard which must unfortunately +support proprietary gates. This is the reason why the driver will pass a list +of proprietary gates that must be part of the session. HCI will ensure all +those gates have pipes connected when the hci device is set up. + +HCI Gates and Pipes +------------------- + +A gate defines the 'port' where some service can be found. In order to access +a service, one must create a pipe to that gate and open it. In this +implementation, pipes are totally hidden. The public API only knows gates. +This is consistent with the driver need to send commands to proprietary gates +without knowing the pipe connected to it. + +Driver interface +---------------- + +A driver would normally register itself with HCI and provide the following +entry points: + +struct nfc_hci_ops { + int (*open)(struct nfc_hci_dev *hdev); + void (*close)(struct nfc_hci_dev *hdev); + int (*xmit)(struct nfc_hci_dev *hdev, struct sk_buff *skb); + int (*start_poll)(struct nfc_hci_dev *hdev, u32 protocols); + int (*target_from_gate)(struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); +}; + +open() and close() shall turn the hardware on and off. xmit() shall simply +write a frame to the chip. start_poll() is an optional entrypoint that shall +set the hardware in polling mode. This must be implemented only if the hardware +uses proprietary gates or a mechanism slightly different from the HCI standard. +target_from_gate() is another optional entrypoint to return the protocols +corresponding to a proprietary gate. + +On the rx path, the driver is responsible to push incoming HCP frames to HCI +using nfc_hci_recv_frame(). HCI will take care of re-aggregation and handling +This must be done from a context that can sleep. + +SHDLC +----- + +Most chips use shdlc to ensure integrity and delivery ordering of the HCP +frames between the host controller (the chip) and hosts (entities connected +to the chip, like the cpu). In order to simplify writing the driver, an shdlc +layer is available for use by the driver. +When used, the driver actually registers with shdlc, and shdlc will register +with HCI. HCI sees shdlc as the driver and thus send its HCP frames +through shdlc->xmit. +SHDLC adds a new execution context (nfc_shdlc_sm_work()) to run its state +machine and handle both its rx and tx path. + +Included Drivers +---------------- + +An HCI based driver for an NXP PN544, connected through I2C bus, and using +shdlc is included. + +Execution Contexts +------------------ + +The execution contexts are the following: +- IRQ handler (IRQH): +fast, cannot sleep. stores incoming frames into an shdlc rx queue + +- SHDLC State Machine worker (SMW) +handles shdlc rx & tx queues. Dispatches HCI cmd responses. + +- HCI Tx Cmd worker (MSGTXWQ) +Serialize execution of HCI commands. Complete execution in case of resp timeout. + +- HCI Rx worker (MSGRXWQ) +Dispatches incoming HCI commands or events. + +- Syscall context from a userspace call (SYSCALL) +Any entrypoint in HCI called from NFC Core + +Workflow executing an HCI command (using shdlc) +----------------------------------------------- + +Executing an HCI command can easily be performed synchronously using the +following API: + +int nfc_hci_send_cmd (struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb) + +The API must be invoked from a context that can sleep. Most of the time, this +will be the syscall context. skb will return the result that was received in +the response. + +Internally, execution is asynchronous. So all this API does is to enqueue the +HCI command, setup a local wait queue on stack, and wait_event() for completion. +The wait is not interruptible because it is guaranteed that the command will +complete after some short timeout anyway. + +MSGTXWQ context will then be scheduled and invoke nfc_hci_msg_tx_work(). +This function will dequeue the next pending command and send its HCP fragments +to the lower layer which happens to be shdlc. It will then start a timer to be +able to complete the command with a timeout error if no response arrive. + +SMW context gets scheduled and invokes nfc_shdlc_sm_work(). This function +handles shdlc framing in and out. It uses the driver xmit to send frames and +receives incoming frames in an skb queue filled from the driver IRQ handler. +SHDLC I(nformation) frames payload are HCP fragments. They are agregated to +form complete HCI frames, which can be a response, command, or event. + +HCI Responses are dispatched immediately from this context to unblock +waiting command execution. Reponse processing involves invoking the completion +callback that was provided by nfc_hci_msg_tx_work() when it sent the command. +The completion callback will then wake the syscall context. + +Workflow receiving an HCI event or command +------------------------------------------ + +HCI commands or events are not dispatched from SMW context. Instead, they are +queued to HCI rx_queue and will be dispatched from HCI rx worker +context (MSGRXWQ). This is done this way to allow a cmd or event handler +to also execute other commands (for example, handling the +NFC_HCI_EVT_TARGET_DISCOVERED event from PN544 requires to issue an +ANY_GET_PARAMETER to the reader A gate to get information on the target +that was discovered). + +Typically, such an event will be propagated to NFC Core from MSGRXWQ context. diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 3201a7097e4..98335b7a533 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -43,6 +43,13 @@ Values : 1 - enable the JIT 2 - enable the JIT and ask the compiler to emit traces on kernel log. +dev_weight +-------------- + +The maximum number of packets that kernel can handle on a NAPI interrupt, +it's a Per-CPU variable. +Default: 64 + rmem_default ------------ diff --git a/MAINTAINERS b/MAINTAINERS index b3627098650..490dd6e640a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1431,6 +1431,7 @@ F: include/linux/backlight.h BATMAN ADVANCED M: Marek Lindner <lindner_marek@yahoo.de> M: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> +M: Antonio Quartulli <ordex@autistici.org> L: b.a.t.m.a.n@lists.open-mesh.org W: http://www.open-mesh.org/ S: Maintained @@ -3518,12 +3519,6 @@ M: Deepak Saxena <dsaxena@plexity.net> S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c -INTEL IXP2000 ETHERNET DRIVER -M: Lennert Buytenhek <kernel@wantstofly.org> -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/ethernet/xscale/ixp2000/ - INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> M: Jesse Brandeburg <jesse.brandeburg@intel.com> @@ -5208,7 +5203,7 @@ S: Maintained F: include/linux/personality.h PHONET PROTOCOL -M: Remi Denis-Courmont <remi.denis-courmont@nokia.com> +M: Remi Denis-Courmont <courmisch@gmail.com> S: Supported F: Documentation/networking/phonet.txt F: include/linux/phonet.h @@ -6676,6 +6671,16 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/codecs/twl4030* +TI WILINK WIRELESS DRIVERS +M: Luciano Coelho <coelho@ti.com> +L: linux-wireless@vger.kernel.org +W: http://wireless.kernel.org/en/users/Drivers/wl12xx +W: http://wireless.kernel.org/en/users/Drivers/wl1251 +T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git +S: Maintained +F: drivers/net/wireless/ti/ +F: include/linux/wl12xx.h + TIPC NETWORK LAYER M: Jon Maloy <jon.maloy@ericsson.com> M: Allan Stephens <allan.stephens@windriver.com> @@ -7432,23 +7437,6 @@ M: Miloslav Trmac <mitr@volny.cz> S: Maintained F: drivers/input/misc/wistron_btns.c -WL1251 WIRELESS DRIVER -M: Luciano Coelho <coelho@ti.com> -L: linux-wireless@vger.kernel.org -W: http://wireless.kernel.org/en/users/Drivers/wl1251 -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git -S: Maintained -F: drivers/net/wireless/wl1251/* - -WL1271 WIRELESS DRIVER -M: Luciano Coelho <coelho@ti.com> -L: linux-wireless@vger.kernel.org -W: http://wireless.kernel.org/en/users/Drivers/wl12xx -T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git -S: Maintained -F: drivers/net/wireless/wl12xx/ -F: include/linux/wl12xx.h - WL3501 WIRELESS PCMCIA CARD DRIVER M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> L: linux-wireless@vger.kernel.org diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 364c19357e6..89106792d06 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -26,15 +26,6 @@ #include "clock.h" #include "sam9_smc.h" -static struct map_desc at91rm9200_io_desc[] __initdata = { - { - .virtual = AT91_VA_BASE_EMAC, - .pfn = __phys_to_pfn(AT91RM9200_BASE_EMAC), - .length = SZ_16K, - .type = MT_DEVICE, - }, -}; - /* -------------------------------------------------------------------- * Clocks * -------------------------------------------------------------------- */ @@ -315,7 +306,6 @@ static void __init at91rm9200_map_io(void) { /* Map peripherals */ at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE); - iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); } static void __init at91rm9200_ioremap_registers(void) diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 05774e5b1cb..60c472861e5 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -140,8 +140,8 @@ static struct macb_platform_data eth_data; static struct resource eth_resources[] = { [0] = { - .start = AT91_VA_BASE_EMAC, - .end = AT91_VA_BASE_EMAC + SZ_16K - 1, + .start = AT91RM9200_BASE_EMAC, + .end = AT91RM9200_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h index e9e29a6c386..01db372be8e 100644 --- a/arch/arm/mach-at91/include/mach/hardware.h +++ b/arch/arm/mach-at91/include/mach/hardware.h @@ -94,7 +94,6 @@ * Virtual to Physical Address mapping for IO devices. */ #define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS) -#define AT91_VA_BASE_EMAC AT91_IO_P2V(AT91RM9200_BASE_EMAC) /* Internal SRAM is mapped below the IO devices */ #define AT91_SRAM_MAX SZ_1M diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h index 292d55ed211..cf03614d250 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h @@ -75,4 +75,7 @@ struct ixp46x_ts_regs { #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) +/* The ptp_ixp46x module will set this variable */ +extern int ixp46x_phc_index; + #endif diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 807c97eed8a..46c61edcdf7 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -346,11 +346,8 @@ CONFIG_CHELSIO_T1=m CONFIG_IXGB=m CONFIG_S2IO=m CONFIG_MYRI10GE=m -CONFIG_TR=y CONFIG_IBMOL=m CONFIG_IBMLS=m -CONFIG_3C359=m -CONFIG_TMS380TR=m CONFIG_TMSPCI=m CONFIG_ABYSS=m CONFIG_USB_CATC=m @@ -376,7 +373,6 @@ CONFIG_PCMCIA_SMC91C92=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_PCMCIA_AXNET=m CONFIG_ARCNET_COM20020_CS=m -CONFIG_PCMCIA_IBMTR=m CONFIG_WAN=y CONFIG_LANMEDIA=m CONFIG_HDLC=m diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6c0683d3fcb..5f6acce45a0 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -30,6 +30,7 @@ config SPARC select USE_GENERIC_SMP_HELPERS if SMP select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 + select HAVE_BPF_JIT config SPARC32 def_bool !64BIT diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index eddcfb36aaf..0e5de13b56c 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -66,6 +66,7 @@ head-y += arch/sparc/kernel/init_task.o core-y += arch/sparc/kernel/ core-y += arch/sparc/mm/ arch/sparc/math-emu/ +core-y += arch/sparc/net/ libs-y += arch/sparc/prom/ libs-y += arch/sparc/lib/ diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile new file mode 100644 index 00000000000..1306a58ac54 --- /dev/null +++ b/arch/sparc/net/Makefile @@ -0,0 +1,4 @@ +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit.h new file mode 100644 index 00000000000..33d6b375ff1 --- /dev/null +++ b/arch/sparc/net/bpf_jit.h @@ -0,0 +1,68 @@ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +/* Conventions: + * %g1 : temporary + * %g2 : Secondary temporary used by SKB data helper stubs. + * %g3 : packet offset passed into SKB data helper stubs. + * %o0 : pointer to skb (first argument given to JIT function) + * %o1 : BPF A accumulator + * %o2 : BPF X accumulator + * %o3 : Holds saved %o7 so we can call helper functions without needing + * to allocate a register window. + * %o4 : skb->len - skb->data_len + * %o5 : skb->data + */ + +#ifndef __ASSEMBLER__ +#define G0 0x00 +#define G1 0x01 +#define G3 0x03 +#define G6 0x06 +#define O0 0x08 +#define O1 0x09 +#define O2 0x0a +#define O3 0x0b +#define O4 0x0c +#define O5 0x0d +#define SP 0x0e +#define O7 0x0f +#define FP 0x1e + +#define r_SKB O0 +#define r_A O1 +#define r_X O2 +#define r_saved_O7 O3 +#define r_HEADLEN O4 +#define r_SKB_DATA O5 +#define r_TMP G1 +#define r_TMP2 G2 +#define r_OFF G3 + +/* assembly code in arch/sparc/net/bpf_jit_asm.S */ +extern u32 bpf_jit_load_word[]; +extern u32 bpf_jit_load_half[]; +extern u32 bpf_jit_load_byte[]; +extern u32 bpf_jit_load_byte_msh[]; +extern u32 bpf_jit_load_word_positive_offset[]; +extern u32 bpf_jit_load_half_positive_offset[]; +extern u32 bpf_jit_load_byte_positive_offset[]; +extern u32 bpf_jit_load_byte_msh_positive_offset[]; +extern u32 bpf_jit_load_word_negative_offset[]; +extern u32 bpf_jit_load_half_negative_offset[]; +extern u32 bpf_jit_load_byte_negative_offset[]; +extern u32 bpf_jit_load_byte_msh_negative_offset[]; + +#else +#define r_SKB %o0 +#define r_A %o1 +#define r_X %o2 +#define r_saved_O7 %o3 +#define r_HEADLEN %o4 +#define r_SKB_DATA %o5 +#define r_TMP %g1 +#define r_TMP2 %g2 +#define r_OFF %g3 +#endif + +#endif /* _BPF_JIT_H */ diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S new file mode 100644 index 00000000000..9d016c7017f --- /dev/null +++ b/arch/sparc/net/bpf_jit_asm.S @@ -0,0 +1,205 @@ +#include <asm/ptrace.h> + +#include "bpf_jit.h" + +#ifdef CONFIG_SPARC64 +#define SAVE_SZ 176 +#define SCRATCH_OFF STACK_BIAS + 128 +#define BE_PTR(label) be,pn %xcc, label +#else +#define SAVE_SZ 96 +#define SCRATCH_OFF 72 +#define BE_PTR(label) be label +#endif + +#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ + + .text + .globl bpf_jit_load_word +bpf_jit_load_word: + cmp r_OFF, 0 + bl bpf_slow_path_word_neg + nop + .globl bpf_jit_load_word_positive_offset +bpf_jit_load_word_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 3 + ble bpf_slow_path_word + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_A +load_word_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x2], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x3], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_A + + .globl bpf_jit_load_half +bpf_jit_load_half: + cmp r_OFF, 0 + bl bpf_slow_path_half_neg + nop + .globl bpf_jit_load_half_positive_offset +bpf_jit_load_half_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 1 + ble bpf_slow_path_half + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_A +load_half_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_A + + .globl bpf_jit_load_byte +bpf_jit_load_byte: + cmp r_OFF, 0 + bl bpf_slow_path_byte_neg + nop + .globl bpf_jit_load_byte_positive_offset +bpf_jit_load_byte_positive_offset: + cmp r_OFF, r_HEADLEN + bge bpf_slow_path_byte + nop + retl + ldub [r_SKB_DATA + r_OFF], r_A + + .globl bpf_jit_load_byte_msh +bpf_jit_load_byte_msh: + cmp r_OFF, 0 + bl bpf_slow_path_byte_msh_neg + nop + .globl bpf_jit_load_byte_msh_positive_offset +bpf_jit_load_byte_msh_positive_offset: + cmp r_OFF, r_HEADLEN + bge bpf_slow_path_byte_msh + nop + ldub [r_SKB_DATA + r_OFF], r_OFF + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +#define bpf_slow_path_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov r_OFF, %o1; \ + add %fp, SCRATCH_OFF, %o2; \ + call skb_copy_bits; \ + mov (LEN), %o3; \ + cmp %o0, 0; \ + restore; + +bpf_slow_path_word: + bpf_slow_path_common(4) + bl bpf_error + ld [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_half: + bpf_slow_path_common(2) + bl bpf_error + lduh [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_byte: + bpf_slow_path_common(1) + bl bpf_error + ldub [%sp + SCRATCH_OFF], r_A + retl + nop +bpf_slow_path_byte_msh: + bpf_slow_path_common(1) + bl bpf_error + ldub [%sp + SCRATCH_OFF], r_A + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +#define bpf_negative_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov r_OFF, %o1; \ + call bpf_internal_load_pointer_neg_helper; \ + mov (LEN), %o2; \ + mov %o0, r_TMP; \ + cmp %o0, 0; \ + BE_PTR(bpf_error); \ + restore; + +bpf_slow_path_word_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_word_negative_offset +bpf_jit_load_word_negative_offset: + bpf_negative_common(4) + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_A + +bpf_slow_path_half_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_half_negative_offset +bpf_jit_load_half_negative_offset: + bpf_negative_common(2) + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_A + +bpf_slow_path_byte_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_byte_negative_offset +bpf_jit_load_byte_negative_offset: + bpf_negative_common(1) + retl + ldub [r_TMP], r_A + +bpf_slow_path_byte_msh_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_byte_msh_negative_offset +bpf_jit_load_byte_msh_negative_offset: + bpf_negative_common(1) + ldub [r_TMP], r_OFF + and r_OFF, 0xf, r_OFF + retl + sll r_OFF, 2, r_X + +bpf_error: + /* Make the JIT program return zero. The JIT epilogue + * stores away the original %o7 into r_saved_O7. The + * normal leaf function return is to use "retl" which + * would evalute to "jmpl %o7 + 8, %g0" but we want to + * use the saved value thus the sequence you see here. + */ + jmpl r_saved_O7 + 8, %g0 + clr %o0 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c new file mode 100644 index 00000000000..1a69244e785 --- /dev/null +++ b/arch/sparc/net/bpf_jit_comp.c @@ -0,0 +1,802 @@ +#include <linux/moduleloader.h> +#include <linux/workqueue.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/cache.h> + +#include <asm/cacheflush.h> +#include <asm/ptrace.h> + +#include "bpf_jit.h" + +int bpf_jit_enable __read_mostly; + +static inline bool is_simm13(unsigned int value) +{ + return value + 0x1000 < 0x2000; +} + +static void bpf_flush_icache(void *start_, void *end_) +{ +#ifdef CONFIG_SPARC64 + /* Cheetah's I-cache is fully coherent. */ + if (tlb_type == spitfire) { + unsigned long start = (unsigned long) start_; + unsigned long end = (unsigned long) end_; + + start &= ~7UL; + end = (end + 7UL) & ~7UL; + while (start < end) { + flushi(start); + start += 32; + } + } +#endif +} + +#define SEEN_DATAREF 1 /* might call external helpers */ +#define SEEN_XREG 2 /* ebx is used */ +#define SEEN_MEM 4 /* use mem[] for temporary storage */ + +#define S13(X) ((X) & 0x1fff) +#define IMMED 0x00002000 +#define RD(X) ((X) << 25) +#define RS1(X) ((X) << 14) +#define RS2(X) ((X)) +#define OP(X) ((X) << 30) +#define OP2(X) ((X) << 22) +#define OP3(X) ((X) << 19) +#define COND(X) ((X) << 25) +#define F1(X) OP(X) +#define F2(X, Y) (OP(X) | OP2(Y)) +#define F3(X, Y) (OP(X) | OP3(Y)) + +#define CONDN COND(0x0) +#define CONDE COND(0x1) +#define CONDLE COND(0x2) +#define CONDL COND(0x3) +#define CONDLEU COND(0x4) +#define CONDCS COND(0x5) +#define CONDNEG COND(0x6) +#define CONDVC COND(0x7) +#define CONDA COND(0x8) +#define CONDNE COND(0x9) +#define CONDG COND(0xa) +#define CONDGE COND(0xb) +#define CONDGU COND(0xc) +#define CONDCC COND(0xd) +#define CONDPOS COND(0xe) +#define CONDVS COND(0xf) + +#define CONDGEU CONDCC +#define CONDLU CONDCS + +#define WDISP22(X) (((X) >> 2) & 0x3fffff) + +#define BA (F2(0, 2) | CONDA) +#define BGU (F2(0, 2) | CONDGU) +#define BLEU (F2(0, 2) | CONDLEU) +#define BGEU (F2(0, 2) | CONDGEU) +#define BLU (F2(0, 2) | CONDLU) +#define BE (F2(0, 2) | CONDE) +#define BNE (F2(0, 2) | CONDNE) + +#ifdef CONFIG_SPARC64 +#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) +#else +#define BNE_PTR BNE +#endif + +#define SETHI(K, REG) \ + (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) +#define OR_LO(K, REG) \ + (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) + +#define ADD F3(2, 0x00) +#define AND F3(2, 0x01) +#define ANDCC F3(2, 0x11) +#define OR F3(2, 0x02) +#define SUB F3(2, 0x04) +#define SUBCC F3(2, 0x14) +#define MUL F3(2, 0x0a) /* umul */ +#define DIV F3(2, 0x0e) /* udiv */ +#define SLL F3(2, 0x25) +#define SRL F3(2, 0x26) +#define JMPL F3(2, 0x38) +#define CALL F1(1) +#define BR F2(0, 0x01) +#define RD_Y F3(2, 0x28) +#define WR_Y F3(2, 0x30) + +#define LD32 F3(3, 0x00) +#define LD8 F3(3, 0x01) +#define LD16 F3(3, 0x02) +#define LD64 F3(3, 0x0b) +#define ST32 F3(3, 0x04) + +#ifdef CONFIG_SPARC64 +#define LDPTR LD64 +#define BASE_STACKFRAME 176 +#else +#define LDPTR LD32 +#define BASE_STACKFRAME 96 +#endif + +#define LD32I (LD32 | IMMED) +#define LD8I (LD8 | IMMED) +#define LD16I (LD16 | IMMED) +#define LD64I (LD64 | IMMED) +#define LDPTRI (LDPTR | IMMED) +#define ST32I (ST32 | IMMED) + +#define emit_nop() \ +do { \ + *prog++ = SETHI(0, G0); \ +} while (0) + +#define emit_neg() \ +do { /* sub %g0, r_A, r_A */ \ + *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \ +} while (0) + +#define emit_reg_move(FROM, TO) \ +do { /* or %g0, FROM, TO */ \ + *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \ +} while (0) + +#define emit_clear(REG) \ +do { /* or %g0, %g0, REG */ \ + *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \ +} while (0) + +#define emit_set_const(K, REG) \ +do { /* sethi %hi(K), REG */ \ + *prog++ = SETHI(K, REG); \ + /* or REG, %lo(K), REG */ \ + *prog++ = OR_LO(K, REG); \ +} while (0) + + /* Emit + * + * OP r_A, r_X, r_A + */ +#define emit_alu_X(OPCODE) \ +do { \ + seen |= SEEN_XREG; \ + *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \ +} while (0) + + /* Emit either: + * + * OP r_A, K, r_A + * + * or + * + * sethi %hi(K), r_TMP + * or r_TMP, %lo(K), r_TMP + * OP r_A, r_TMP, r_A + * + * depending upon whether K fits in a signed 13-bit + * immediate instruction field. Emit nothing if K + * is zero. + */ +#define emit_alu_K(OPCODE, K) \ +do { \ + if (K) { \ + unsigned int _insn = OPCODE; \ + _insn |= RS1(r_A) | RD(r_A); \ + if (is_simm13(K)) { \ + *prog++ = _insn | IMMED | S13(K); \ + } else { \ + emit_set_const(K, r_TMP); \ + *prog++ = _insn | RS2(r_TMP); \ + } \ + } \ +} while (0) + +#define emit_loadimm(K, DEST) \ +do { \ + if (is_simm13(K)) { \ + /* or %g0, K, DEST */ \ + *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \ + } else { \ + emit_set_const(K, DEST); \ + } \ +} while (0) + +#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \ + *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load32(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \ + *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load16(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \ + *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define __emit_load8(BASE, STRUCT, FIELD, DEST) \ +do { unsigned int _off = offsetof(STRUCT, FIELD); \ + *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \ +} while (0) + +#define emit_load8(BASE, STRUCT, FIELD, DEST) \ +do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ + __emit_load8(BASE, STRUCT, FIELD, DEST); \ +} while (0) + +#define emit_ldmem(OFF, DEST) \ +do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \ +} while (0) + +#define emit_stmem(OFF, SRC) \ +do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \ +} while (0) + +#ifdef CONFIG_SMP +#ifdef CONFIG_SPARC64 +#define emit_load_cpu(REG) \ + emit_load16(G6, struct thread_info, cpu, REG) +#else +#define emit_load_cpu(REG) \ + emit_load32(G6, struct thread_info, cpu, REG) +#endif +#else +#define emit_load_cpu(REG) emit_clear(REG) +#endif + +#define emit_skb_loadptr(FIELD, DEST) \ + emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load32(FIELD, DEST) \ + emit_load32(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load16(FIELD, DEST) \ + emit_load16(r_SKB, struct sk_buff, FIELD, DEST) +#define __emit_skb_load8(FIELD, DEST) \ + __emit_load8(r_SKB, struct sk_buff, FIELD, DEST) +#define emit_skb_load8(FIELD, DEST) \ + emit_load8(r_SKB, struct sk_buff, FIELD, DEST) + +#define emit_jmpl(BASE, IMM_OFF, LREG) \ + *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG)) + +#define emit_call(FUNC) \ +do { void *_here = image + addrs[i] - 8; \ + unsigned int _off = (void *)(FUNC) - _here; \ + *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \ + emit_nop(); \ +} while (0) + +#define emit_branch(BR_OPC, DEST) \ +do { unsigned int _here = addrs[i] - 8; \ + *prog++ = BR_OPC | WDISP22((DEST) - _here); \ +} while (0) + +#define emit_branch_off(BR_OPC, OFF) \ +do { *prog++ = BR_OPC | WDISP22(OFF); \ +} while (0) + +#define emit_jump(DEST) emit_branch(BA, DEST) + +#define emit_read_y(REG) *prog++ = RD_Y | RD(REG) +#define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0) + +#define emit_cmp(R1, R2) \ + *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0)) + +#define emit_cmpi(R1, IMM) \ + *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); + +#define emit_btst(R1, R2) \ + *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0)) + +#define emit_btsti(R1, IMM) \ + *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); + +#define emit_sub(R1, R2, R3) \ + *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3)) + +#define emit_subi(R1, IMM, R3) \ + *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) + +#define emit_add(R1, R2, R3) \ + *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3)) + +#define emit_addi(R1, IMM, R3) \ + *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) + +#define emit_alloc_stack(SZ) \ + *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP)) + +#define emit_release_stack(SZ) \ + *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP)) + +/* A note about branch offset calculations. The addrs[] array, + * indexed by BPF instruction, records the address after all the + * sparc instructions emitted for that BPF instruction. + * + * The most common case is to emit a branch at the end of such + * a code sequence. So this would be two instructions, the + * branch and it's delay slot. + * + * Therefore by default the branch emitters calculate the branch + * offset field as: + * + * destination - (addrs[i] - 8) + * + * This "addrs[i] - 8" is the address of the branch itself or + * what "." would be in assembler notation. The "8" part is + * how we take into consideration the branch and it's delay + * slot mentioned above. + * + * Sometimes we need to emit a branch earlier in the code + * sequence. And in these situations we adjust "destination" + * to accomodate this difference. For example, if we needed + * to emit a branch (and it's delay slot) right before the + * final instruction emitted for a BPF opcode, we'd use + * "destination + 4" instead of just plain "destination" above. + * + * This is why you see all of these funny emit_branch() and + * emit_jump() calls with adjusted offsets. + */ + +void bpf_jit_compile(struct sk_filter *fp) +{ + unsigned int cleanup_addr, proglen, oldproglen = 0; + u32 temp[8], *prog, *func, seen = 0, pass; + const struct sock_filter *filter = fp->insns; + int i, flen = fp->len, pc_ret0 = -1; + unsigned int *addrs; + void *image; + + if (!bpf_jit_enable) + return; + + addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); + if (addrs == NULL) + return; + + /* Before first pass, make a rough estimation of addrs[] + * each bpf instruction is translated to less than 64 bytes + */ + for (proglen = 0, i = 0; i < flen; i++) { + proglen += 64; + addrs[i] = proglen; + } + cleanup_addr = proglen; /* epilogue address */ + image = NULL; + for (pass = 0; pass < 10; pass++) { + u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; + + /* no prologue/epilogue for trivial filters (RET something) */ + proglen = 0; + prog = temp; + + /* Prologue */ + if (seen_or_pass0) { + if (seen_or_pass0 & SEEN_MEM) { + unsigned int sz = BASE_STACKFRAME; + sz += BPF_MEMWORDS * sizeof(u32); + emit_alloc_stack(sz); + } + + /* Make sure we dont leek kernel memory. */ + if (seen_or_pass0 & SEEN_XREG) + emit_clear(r_X); + + /* If this filter needs to access skb data, + * load %o4 and %o5 with: + * %o4 = skb->len - skb->data_len + * %o5 = skb->data + * And also back up %o7 into r_saved_O7 so we can + * invoke the stubs using 'call'. + */ + if (seen_or_pass0 & SEEN_DATAREF) { + emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); + emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); + emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); + emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); + } + } + emit_reg_move(O7, r_saved_O7); + + switch (filter[0].code) { + case BPF_S_RET_K: + case BPF_S_LD_W_LEN: + case BPF_S_ANC_PROTOCOL: + case BPF_S_ANC_PKTTYPE: + case BPF_S_ANC_IFINDEX: + case BPF_S_ANC_MARK: + case BPF_S_ANC_RXHASH: + case BPF_S_ANC_CPU: + case BPF_S_ANC_QUEUE: + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: + /* The first instruction sets the A register (or is + * a "RET 'constant'") + */ + break; + default: + /* Make sure we dont leak kernel information to the + * user. + */ + emit_clear(r_A); /* A = 0 */ + } + + for (i = 0; i < flen; i++) { + unsigned int K = filter[i].k; + unsigned int t_offset; + unsigned int f_offset; + u32 t_op, f_op; + int ilen; + + switch (filter[i].code) { + case BPF_S_ALU_ADD_X: /* A += X; */ + emit_alu_X(ADD); + break; + case BPF_S_ALU_ADD_K: /* A += K; */ + emit_alu_K(ADD, K); + break; + case BPF_S_ALU_SUB_X: /* A -= X; */ + emit_alu_X(SUB); + break; + case BPF_S_ALU_SUB_K: /* A -= K */ + emit_alu_K(SUB, K); + break; + case BPF_S_ALU_AND_X: /* A &= X */ + emit_alu_X(AND); + break; + case BPF_S_ALU_AND_K: /* A &= K */ + emit_alu_K(AND, K); + break; + case BPF_S_ALU_OR_X: /* A |= X */ + emit_alu_X(OR); + break; + case BPF_S_ALU_OR_K: /* A |= K */ + emit_alu_K(OR, K); + break; + case BPF_S_ALU_LSH_X: /* A <<= X */ + emit_alu_X(SLL); + break; + case BPF_S_ALU_LSH_K: /* A <<= K */ + emit_alu_K(SLL, K); + break; + case BPF_S_ALU_RSH_X: /* A >>= X */ + emit_alu_X(SRL); + break; + case BPF_S_ALU_RSH_K: /* A >>= K */ + emit_alu_K(SRL, K); + break; + case BPF_S_ALU_MUL_X: /* A *= X; */ + emit_alu_X(MUL); + break; + case BPF_S_ALU_MUL_K: /* A *= K */ + emit_alu_K(MUL, K); + break; + case BPF_S_ALU_DIV_K: /* A /= K */ + emit_alu_K(MUL, K); + emit_read_y(r_A); + break; + case BPF_S_ALU_DIV_X: /* A /= X; */ + emit_cmpi(r_X, 0); + if (pc_ret0 > 0) { + t_offset = addrs[pc_ret0 - 1]; +#ifdef CONFIG_SPARC32 + emit_branch(BE, t_offset + 20); +#else + emit_branch(BE, t_offset + 8); +#endif + emit_nop(); /* delay slot */ + } else { + emit_branch_off(BNE, 16); + emit_nop(); +#ifdef CONFIG_SPARC32 + emit_jump(cleanup_addr + 20); +#else + emit_jump(cleanup_addr + 8); +#endif + emit_clear(r_A); + } + emit_write_y(G0); +#ifdef CONFIG_SPARC32 + /* The Sparc v8 architecture requires + * three instructions between a %y + * register write and the first use. + */ + emit_nop(); + emit_nop(); + emit_nop(); +#endif + emit_alu_X(DIV); + break; + case BPF_S_ALU_NEG: + emit_neg(); + break; + case BPF_S_RET_K: + if (!K) { + if (pc_ret0 == -1) + pc_ret0 = i; + emit_clear(r_A); + } else { + emit_loadimm(K, r_A); + } + /* Fallthrough */ + case BPF_S_RET_A: + if (seen_or_pass0) { + if (i != flen - 1) { + emit_jump(cleanup_addr); + emit_nop(); + break; + } + if (seen_or_pass0 & SEEN_MEM) { + unsigned int sz = BASE_STACKFRAME; + sz += BPF_MEMWORDS * sizeof(u32); + emit_release_stack(sz); + } + } + /* jmpl %r_saved_O7 + 8, %g0 */ + emit_jmpl(r_saved_O7, 8, G0); + emit_reg_move(r_A, O0); /* delay slot */ + break; + case BPF_S_MISC_TAX: + seen |= SEEN_XREG; + emit_reg_move(r_A, r_X); + break; + case BPF_S_MISC_TXA: + seen |= SEEN_XREG; + emit_reg_move(r_X, r_A); + break; + case BPF_S_ANC_CPU: + emit_load_cpu(r_A); + break; + case BPF_S_ANC_PROTOCOL: + emit_skb_load16(protocol, r_A); + break; +#if 0 + /* GCC won't let us take the address of + * a bit field even though we very much + * know what we are doing here. + */ + case BPF_S_ANC_PKTTYPE: + __emit_skb_load8(pkt_type, r_A); + emit_alu_K(SRL, 5); + break; +#endif + case BPF_S_ANC_IFINDEX: + emit_skb_loadptr(dev, r_A); + emit_cmpi(r_A, 0); + emit_branch(BNE_PTR, cleanup_addr + 4); + emit_nop(); + emit_load32(r_A, struct net_device, ifindex, r_A); + break; + case BPF_S_ANC_MARK: + emit_skb_load32(mark, r_A); + break; + case BPF_S_ANC_QUEUE: + emit_skb_load16(queue_mapping, r_A); + break; + case BPF_S_ANC_HATYPE: + emit_skb_loadptr(dev, r_A); + emit_cmpi(r_A, 0); + emit_branch(BNE_PTR, cleanup_addr + 4); + emit_nop(); + emit_load16(r_A, struct net_device, type, r_A); + break; + case BPF_S_ANC_RXHASH: + emit_skb_load32(rxhash, r_A); + break; + + case BPF_S_LD_IMM: + emit_loadimm(K, r_A); + break; + case BPF_S_LDX_IMM: + emit_loadimm(K, r_X); + break; + case BPF_S_LD_MEM: + emit_ldmem(K * 4, r_A); + break; + case BPF_S_LDX_MEM: + emit_ldmem(K * 4, r_X); + break; + case BPF_S_ST: + emit_stmem(K * 4, r_A); + break; + case BPF_S_STX: + emit_stmem(K * 4, r_X); + break; + +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + + case BPF_S_LD_W_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); +common_load: seen |= SEEN_DATAREF; + emit_loadimm(K, r_OFF); + emit_call(func); + break; + case BPF_S_LD_H_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); + goto common_load; + case BPF_S_LD_B_ABS: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); + goto common_load; + case BPF_S_LDX_B_MSH: + func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); + goto common_load; + case BPF_S_LD_W_IND: + func = bpf_jit_load_word; +common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; + if (K) { + if (is_simm13(K)) { + emit_addi(r_X, K, r_OFF); + } else { + emit_loadimm(K, r_TMP); + emit_add(r_X, r_TMP, r_OFF); + } + } else { + emit_reg_move(r_X, r_OFF); + } + emit_call(func); + break; + case BPF_S_LD_H_IND: + func = bpf_jit_load_half; + goto common_load_ind; + case BPF_S_LD_B_IND: + func = bpf_jit_load_byte; + goto common_load_ind; + case BPF_S_JMP_JA: + emit_jump(addrs[i + K]); + emit_nop(); + break; + +#define COND_SEL(CODE, TOP, FOP) \ + case CODE: \ + t_op = TOP; \ + f_op = FOP; \ + goto cond_branch + + COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); + COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); + COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); + COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); + COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); + COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); + COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); + COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); + +cond_branch: f_offset = addrs[i + filter[i].jf]; + t_offset = addrs[i + filter[i].jt]; + + /* same targets, can avoid doing the test :) */ + if (filter[i].jt == filter[i].jf) { + emit_jump(t_offset); + emit_nop(); + break; + } + + switch (filter[i].code) { + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JEQ_X: + seen |= SEEN_XREG; + emit_cmp(r_A, r_X); + break; + case BPF_S_JMP_JSET_X: + seen |= SEEN_XREG; + emit_btst(r_A, r_X); + break; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGE_K: + if (is_simm13(K)) { + emit_cmpi(r_A, K); + } else { + emit_loadimm(K, r_TMP); + emit_cmp(r_A, r_TMP); + } + break; + case BPF_S_JMP_JSET_K: + if (is_simm13(K)) { + emit_btsti(r_A, K); + } else { + emit_loadimm(K, r_TMP); + emit_btst(r_A, r_TMP); + } + break; + } + if (filter[i].jt != 0) { + if (filter[i].jf) + t_offset += 8; + emit_branch(t_op, t_offset); + emit_nop(); /* delay slot */ + if (filter[i].jf) { + emit_jump(f_offset); + emit_nop(); + } + break; + } + emit_branch(f_op, f_offset); + emit_nop(); /* delay slot */ + break; + + default: + /* hmm, too complex filter, give up with jit compiler */ + goto out; + } + ilen = (void *) prog - (void *) temp; + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpb_jit_compile fatal error\n"); + kfree(addrs); + module_free(NULL, image); + return; + } + memcpy(image + proglen, temp, ilen); + } + proglen += ilen; + addrs[i] = proglen; + prog = temp; + } + /* last bpf instruction is always a RET : + * use it to give the cleanup instruction(s) addr + */ + cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ + if (seen_or_pass0 & SEEN_MEM) + cleanup_addr -= 4; /* add %sp, X, %sp; */ + + if (image) { + if (proglen != oldproglen) + pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", + proglen, oldproglen); + break; + } + if (proglen == oldproglen) { + image = module_alloc(max_t(unsigned int, + proglen, + sizeof(struct work_struct))); + if (!image) + goto out; + } + oldproglen = proglen; + } + + if (bpf_jit_enable > 1) + pr_err("flen=%d proglen=%u pass=%d image=%p\n", + flen, proglen, pass, image); + + if (image) { + if (bpf_jit_enable > 1) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + 16, 1, image, proglen, false); + bpf_flush_icache(image, image + proglen); + fp->bpf_func = (void *)image; + } +out: + kfree(addrs); + return; +} + +static void jit_free_defer(struct work_struct *arg) +{ + module_free(NULL, arg); +} + +/* run from softirq, we must use a work_struct to call + * module_free() from process context + */ +void bpf_jit_free(struct sk_filter *fp) +{ + if (fp->bpf_func != sk_run_filter) { + struct work_struct *work = (struct work_struct *)fp->bpf_func; + + INIT_WORK(work, jit_free_defer); + schedule_work(work); + } +} diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig index b90038e40dd..a182a4e6d68 100644 --- a/arch/xtensa/configs/common_defconfig +++ b/arch/xtensa/configs/common_defconfig @@ -333,11 +333,6 @@ CONFIG_XT2000_SONIC=y # CONFIG_S2IO is not set # -# Token Ring devices -# -# CONFIG_TR is not set - -# # Wireless LAN (non-hamradio) # CONFIG_NET_RADIO=y diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 8d3a056ebee..533de9550a8 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -397,9 +397,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, - sizeof(struct crypto_report_blkcipher), &rblkcipher); - + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; return 0; nla_put_failure: @@ -478,9 +478,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, - sizeof(struct crypto_report_blkcipher), &rblkcipher); - + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/aead.c b/crypto/aead.c index e4cb35159be..0b8121ebec0 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -125,9 +125,9 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) raead.maxauthsize = aead->maxauthsize; raead.ivsize = aead->ivsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, - sizeof(struct crypto_report_aead), &raead); - + if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, + sizeof(struct crypto_report_aead), &raead)) + goto nla_put_failure; return 0; nla_put_failure: @@ -210,9 +210,9 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) raead.maxauthsize = aead->maxauthsize; raead.ivsize = aead->ivsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, - sizeof(struct crypto_report_aead), &raead); - + if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, + sizeof(struct crypto_report_aead), &raead)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/ahash.c b/crypto/ahash.c index 33bc9b62e9a..3887856c2dd 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -409,9 +409,9 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, - sizeof(struct crypto_report_hash), &rhash); - + if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, + sizeof(struct crypto_report_hash), &rhash)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 4dd80c72549..a8d85a1d670 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -508,9 +508,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; rblkcipher.ivsize = alg->cra_blkcipher.ivsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, - sizeof(struct crypto_report_blkcipher), &rblkcipher); - + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index f1ea0a06413..5a37eadb4e5 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -81,9 +81,9 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; rcipher.max_keysize = alg->cra_cipher.cia_max_keysize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER, - sizeof(struct crypto_report_cipher), &rcipher); - + if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER, + sizeof(struct crypto_report_cipher), &rcipher)) + goto nla_put_failure; return 0; nla_put_failure: @@ -96,9 +96,9 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression"); - NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, - sizeof(struct crypto_report_comp), &rcomp); - + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, + sizeof(struct crypto_report_comp), &rcomp)) + goto nla_put_failure; return 0; nla_put_failure: @@ -117,16 +117,16 @@ static int crypto_report_one(struct crypto_alg *alg, ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); - NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority); - + if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) + goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); - NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL, - sizeof(struct crypto_report_larval), &rl); - + if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, + sizeof(struct crypto_report_larval), &rl)) + goto nla_put_failure; goto out; } diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 2e458e5482d..04e083ff537 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -55,9 +55,9 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp"); - NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, - sizeof(struct crypto_report_comp), &rpcomp); - + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, + sizeof(struct crypto_report_comp), &rpcomp)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/rng.c b/crypto/rng.c index 64f864fa804..f3b7894dec0 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -69,9 +69,9 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) rrng.seedsize = alg->cra_rng.seedsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG, - sizeof(struct crypto_report_rng), &rrng); - + if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, + sizeof(struct crypto_report_rng), &rrng)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/crypto/shash.c b/crypto/shash.c index 21fc12e2378..32067f47e6c 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -534,9 +534,9 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) rhash.blocksize = alg->cra_blocksize; rhash.digestsize = salg->digestsize; - NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, - sizeof(struct crypto_report_hash), &rhash); - + if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, + sizeof(struct crypto_report_hash), &rhash)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index f8f41e0e8a8..89b30f32ba6 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool, } // cast needed as there is no %? for pointer differences PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", - skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); + skb, skb->head, (long) skb_end_offset(skb)); rx.handle = virt_to_bus (skb); rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); if (rx_give (dev, &rx, pool)) diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 75fd691cd43..7d01c2a7525 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2182,7 +2182,6 @@ static int hrz_open (struct atm_vcc *atm_vcc) default: PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!"); return -EINVAL; - break; } // TX traffic parameters @@ -2357,7 +2356,6 @@ static int hrz_open (struct atm_vcc *atm_vcc) default: { PRINTD (DBG_QOS, "unsupported TX traffic class"); return -EINVAL; - break; } } } @@ -2433,7 +2431,6 @@ static int hrz_open (struct atm_vcc *atm_vcc) default: { PRINTD (DBG_QOS, "unsupported RX traffic class"); return -EINVAL; - break; } } } @@ -2581,7 +2578,6 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, // break; default: return -ENOPROTOOPT; - break; }; break; } @@ -2601,7 +2597,6 @@ static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, // break; default: return -ENOPROTOOPT; - break; }; break; } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 1c052127548..8974bd2b961 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card) tail = readl(SAR_REG_RAWCT); pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), - skb_end_pointer(queue) - queue->head - 16, + skb_end_offset(queue) - 16, PCI_DMA_FROMDEVICE); while (head != tail) { diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 43beaca5317..436f519bed1 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -664,7 +664,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) timeo = mdev->net_conf->try_connect_int * HZ; timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ - s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ + s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ s_listen->sk->sk_rcvtimeo = timeo; s_listen->sk->sk_sndtimeo = timeo; drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, @@ -841,8 +841,8 @@ retry: } } while (1); - msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ - sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ + msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ + sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ sock->sk->sk_allocation = GFP_NOIO; msock->sk->sk_allocation = GFP_NOIO; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 8af25a097d7..7233c88f01b 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -30,37 +30,6 @@ #include "hyperv_vmbus.h" -/* #defines */ - - -/* Amount of space to write to */ -#define BYTES_AVAIL_TO_WRITE(r, w, z) \ - ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)) - - -/* - * - * hv_get_ringbuffer_availbytes() - * - * Get number of bytes available to read and to write to - * for the specified ring buffer - */ -static inline void -hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, - u32 *read, u32 *write) -{ - u32 read_loc, write_loc; - - smp_read_barrier_depends(); - - /* Capture the read/write indices before they changed */ - read_loc = rbi->ring_buffer->read_index; - write_loc = rbi->ring_buffer->write_index; - - *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize); - *read = rbi->ring_datasize - *write; -} - /* * hv_get_next_write_location() * diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig index 9b9f43aa2f8..15c06407370 100644 --- a/drivers/ieee802154/Kconfig +++ b/drivers/ieee802154/Kconfig @@ -19,4 +19,12 @@ config IEEE802154_FAKEHARD This driver can also be built as a module. To do so say M here. The module will be called 'fakehard'. +config IEEE802154_FAKELB + depends on IEEE802154_DRIVERS && MAC802154 + tristate "IEEE 802.15.4 loopback driver" + ---help--- + Say Y here to enable the fake driver that can emulate a net + of several interconnected radio devices. + This driver can also be built as a module. To do so say M here. + The module will be called 'fakelb'. diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile index 800a3894af0..ea784ea6f0f 100644 --- a/drivers/ieee802154/Makefile +++ b/drivers/ieee802154/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o +obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o diff --git a/drivers/ieee802154/fakelb.c b/drivers/ieee802154/fakelb.c new file mode 100644 index 00000000000..e7456fcd091 --- /dev/null +++ b/drivers/ieee802154/fakelb.c @@ -0,0 +1,294 @@ +/* + * Loopback IEEE 802.15.4 interface + * + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin <slapin@ossfans.org> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +static int numlbs = 1; + +struct fakelb_dev_priv { + struct ieee802154_dev *dev; + + struct list_head list; + struct fakelb_priv *fake; + + spinlock_t lock; + bool working; +}; + +struct fakelb_priv { + struct list_head list; + rwlock_t lock; +}; + +static int +fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) +{ + might_sleep(); + BUG_ON(!level); + *level = 0xbe; + + return 0; +} + +static int +fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel) +{ + pr_debug("set channel to %d\n", channel); + + might_sleep(); + dev->phy->current_page = page; + dev->phy->current_channel = channel; + + return 0; +} + +static void +fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *newskb; + + spin_lock(&priv->lock); + if (priv->working) { + newskb = pskb_copy(skb, GFP_ATOMIC); + ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc); + } + spin_unlock(&priv->lock); +} + +static int +fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct fakelb_dev_priv *priv = dev->priv; + struct fakelb_priv *fake = priv->fake; + + might_sleep(); + + read_lock_bh(&fake->lock); + if (priv->list.next == priv->list.prev) { + /* we are the only one device */ + fakelb_hw_deliver(priv, skb); + } else { + struct fakelb_dev_priv *dp; + list_for_each_entry(dp, &priv->fake->list, list) { + if (dp != priv && + (dp->dev->phy->current_channel == + priv->dev->phy->current_channel)) + fakelb_hw_deliver(dp, skb); + } + } + read_unlock_bh(&fake->lock); + + return 0; +} + +static int +fakelb_hw_start(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + int ret = 0; + + spin_lock(&priv->lock); + if (priv->working) + ret = -EBUSY; + else + priv->working = 1; + spin_unlock(&priv->lock); + + return ret; +} + +static void +fakelb_hw_stop(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + + spin_lock(&priv->lock); + priv->working = 0; + spin_unlock(&priv->lock); +} + +static struct ieee802154_ops fakelb_ops = { + .owner = THIS_MODULE, + .xmit = fakelb_hw_xmit, + .ed = fakelb_hw_ed, + .set_channel = fakelb_hw_channel, + .start = fakelb_hw_start, + .stop = fakelb_hw_stop, +}; + +/* Number of dummy devices to be set up by this module. */ +module_param(numlbs, int, 0); +MODULE_PARM_DESC(numlbs, " number of pseudo devices"); + +static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) +{ + struct fakelb_dev_priv *priv; + int err; + struct ieee802154_dev *ieee; + + ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops); + if (!ieee) + return -ENOMEM; + + priv = ieee->priv; + priv->dev = ieee; + + /* 868 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 1; + /* 915 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7fe; + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7FFF800; + /* 868 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 1; + /* 915 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 0x7fe; + /* 868 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 1; + /* 915 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 0x7fe; + /* 2.4 GHz CSS 802.15.4a-2007 */ + ieee->phy->channels_supported[3] |= 0x3fff; + /* UWB Sub-gigahertz 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 1; + /* UWB Low band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0x1e; + /* UWB High band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0xffe0; + /* 750 MHz O-QPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf; + /* 750 MHz MPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf0; + /* 950 MHz BPSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ff; + /* 950 MHz GFSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ffc00; + + INIT_LIST_HEAD(&priv->list); + priv->fake = fake; + + spin_lock_init(&priv->lock); + + ieee->parent = dev; + + err = ieee802154_register_device(ieee); + if (err) + goto err_reg; + + write_lock_bh(&fake->lock); + list_add_tail(&priv->list, &fake->list); + write_unlock_bh(&fake->lock); + + return 0; + +err_reg: + ieee802154_free_device(priv->dev); + return err; +} + +static void fakelb_del(struct fakelb_dev_priv *priv) +{ + write_lock_bh(&priv->fake->lock); + list_del(&priv->list); + write_unlock_bh(&priv->fake->lock); + + ieee802154_unregister_device(priv->dev); + ieee802154_free_device(priv->dev); +} + +static int __devinit fakelb_probe(struct platform_device *pdev) +{ + struct fakelb_priv *priv; + struct fakelb_dev_priv *dp; + int err = -ENOMEM; + int i; + + priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); + if (!priv) + goto err_alloc; + + INIT_LIST_HEAD(&priv->list); + rwlock_init(&priv->lock); + + for (i = 0; i < numlbs; i++) { + err = fakelb_add_one(&pdev->dev, priv); + if (err < 0) + goto err_slave; + } + + platform_set_drvdata(pdev, priv); + dev_info(&pdev->dev, "added ieee802154 hardware\n"); + return 0; + +err_slave: + list_for_each_entry(dp, &priv->list, list) + fakelb_del(dp); + kfree(priv); +err_alloc: + return err; +} + +static int __devexit fakelb_remove(struct platform_device *pdev) +{ + struct fakelb_priv *priv = platform_get_drvdata(pdev); + struct fakelb_dev_priv *dp, *temp; + + list_for_each_entry_safe(dp, temp, &priv->list, list) + fakelb_del(dp); + kfree(priv); + + return 0; +} + +static struct platform_device *ieee802154fake_dev; + +static struct platform_driver ieee802154fake_driver = { + .probe = fakelb_probe, + .remove = __devexit_p(fakelb_remove), + .driver = { + .name = "ieee802154fakelb", + .owner = THIS_MODULE, + }, +}; + +static __init int fakelb_init_module(void) +{ + ieee802154fake_dev = platform_device_register_simple( + "ieee802154fakelb", -1, NULL, 0); + return platform_driver_register(&ieee802154fake_driver); +} + +static __exit void fake_remove_module(void) +{ + platform_driver_unregister(&ieee802154fake_driver); + platform_device_unregister(ieee802154fake_dev); +} + +module_init(fakelb_init_module); +module_exit(fake_remove_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e3e470fecaa..59fbd704a1e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -42,6 +42,7 @@ #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/module.h> +#include <net/route.h> #include <net/tcp.h> #include <net/ipv6.h> @@ -1826,7 +1827,10 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) route->path_rec->reversible = 1; route->path_rec->pkey = cpu_to_be16(0xffff); route->path_rec->mtu_selector = IB_SA_EQ; - route->path_rec->sl = id_priv->tos >> 5; + route->path_rec->sl = netdev_get_prio_tc_map( + ndev->priv_flags & IFF_802_1Q_VLAN ? + vlan_dev_real_dev(ndev) : ndev, + rt_tos2priority(id_priv->tos)); route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 396e2937030..e497dfbee43 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -125,7 +125,8 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned char *prev_tail; prev_tail = skb_tail_pointer(skb); - NLA_PUT(skb, type, len, data); + if (nla_put(skb, type, len, data)) + goto nla_put_failure; nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; return 0; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5861cdb22b7..8002ae642cf 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -66,12 +66,6 @@ static ctl_table ucma_ctl_table[] = { { } }; -static struct ctl_path ucma_ctl_path[] = { - { .procname = "net" }, - { .procname = "rdma_ucm" }, - { } -}; - struct ucma_file { struct mutex mut; struct file *filp; @@ -1392,7 +1386,7 @@ static int __init ucma_init(void) goto err1; } - ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); + ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; @@ -1408,7 +1402,7 @@ err1: static void __exit ucma_cleanup(void) { - unregister_sysctl_table(ucma_ctl_table_hdr); + unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index b902794bbf0..38c4bd87b2c 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -336,11 +336,6 @@ static inline void capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { } static inline void capincci_free_minor(struct capincci *np) { } -static inline unsigned int capincci_minor_opencount(struct capincci *np) -{ - return 0; -} - #endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */ static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci) @@ -372,6 +367,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci) } } +#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE static struct capincci *capincci_find(struct capidev *cdev, u32 ncci) { struct capincci *np; @@ -382,7 +378,6 @@ static struct capincci *capincci_find(struct capidev *cdev, u32 ncci) return NULL; } -#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE /* -------- handle data queue --------------------------------------- */ static struct sk_buff * @@ -578,8 +573,8 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) struct tty_struct *tty; struct capiminor *mp; u16 datahandle; -#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ struct capincci *np; +#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ mutex_lock(&cdev->lock); @@ -597,6 +592,12 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) goto unlock_out; } +#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE + skb_queue_tail(&cdev->recvqueue, skb); + wake_up_interruptible(&cdev->recvwait); + +#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */ + np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data)); if (!np) { printk(KERN_ERR "BUG: capi_signal: ncci not found\n"); @@ -605,12 +606,6 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) goto unlock_out; } -#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE - skb_queue_tail(&cdev->recvqueue, skb); - wake_up_interruptible(&cdev->recvwait); - -#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */ - mp = np->minorp; if (!mp) { skb_queue_tail(&cdev->recvqueue, skb); @@ -786,7 +781,6 @@ register_out: return retval; case CAPI_GET_VERSION: - { if (copy_from_user(&data.contr, argp, sizeof(data.contr))) return -EFAULT; @@ -796,11 +790,9 @@ register_out: if (copy_to_user(argp, &data.version, sizeof(data.version))) return -EFAULT; - } - return 0; + return 0; case CAPI_GET_SERIAL: - { if (copy_from_user(&data.contr, argp, sizeof(data.contr))) return -EFAULT; @@ -810,10 +802,9 @@ register_out: if (copy_to_user(argp, data.serial, sizeof(data.serial))) return -EFAULT; - } - return 0; + return 0; + case CAPI_GET_PROFILE: - { if (copy_from_user(&data.contr, argp, sizeof(data.contr))) return -EFAULT; @@ -837,11 +828,9 @@ register_out: } if (retval) return -EFAULT; - } - return 0; + return 0; case CAPI_GET_MANUFACTURER: - { if (copy_from_user(&data.contr, argp, sizeof(data.contr))) return -EFAULT; @@ -853,8 +842,8 @@ register_out: sizeof(data.manufacturer))) return -EFAULT; - } - return 0; + return 0; + case CAPI_GET_ERRCODE: data.errcode = cdev->errcode; cdev->errcode = CAPI_NOERROR; @@ -870,8 +859,7 @@ register_out: return 0; return -ENXIO; - case CAPI_MANUFACTURER_CMD: - { + case CAPI_MANUFACTURER_CMD: { struct capi_manufacturer_cmd mcmd; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -879,8 +867,6 @@ register_out: return -EFAULT; return capi20_manufacturer(mcmd.cmd, mcmd.data); } - return 0; - case CAPI_SET_FLAGS: case CAPI_CLR_FLAGS: { unsigned userflags; @@ -902,6 +888,11 @@ register_out: return -EFAULT; return 0; +#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE + case CAPI_NCCI_OPENCOUNT: + return 0; + +#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */ case CAPI_NCCI_OPENCOUNT: { struct capincci *nccip; unsigned ncci; @@ -918,7 +909,6 @@ register_out: return count; } -#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE case CAPI_NCCI_GETUNIT: { struct capincci *nccip; struct capiminor *mp; diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 6f5016b479f..832bc807ed2 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -1593,7 +1593,7 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card) return capidrv_ioctl(c, card); switch (c->command) { - case ISDN_CMD_DIAL:{ + case ISDN_CMD_DIAL: { u8 calling[ISDN_MSNLEN + 3]; u8 called[ISDN_MSNLEN + 2]; @@ -2072,7 +2072,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) card->interface.writebuf_skb = if_sendbuf; card->interface.writecmd = NULL; card->interface.readstat = if_readstat; - card->interface.features = ISDN_FEATURE_L2_HDLC | + card->interface.features = + ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L2_TRANS | ISDN_FEATURE_L3_TRANS | ISDN_FEATURE_P_UNKNOWN | @@ -2080,7 +2081,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) ISDN_FEATURE_L2_X75UI | ISDN_FEATURE_L2_X75BUI; if (profp->support1 & (1 << 2)) - card->interface.features |= ISDN_FEATURE_L2_V11096 | + card->interface.features |= + ISDN_FEATURE_L2_V11096 | ISDN_FEATURE_L2_V11019 | ISDN_FEATURE_L2_V11038; if (profp->support1 & (1 << 8)) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index afa080258bf..3b9278b333b 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -410,10 +410,10 @@ static void check_pending(struct bas_cardstate *ucs) if (!(ucs->basstate & BS_RESETTING)) ucs->pending = 0; break; - /* - * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately - * and should never end up here - */ + /* + * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately + * and should never end up here + */ default: dev_warn(&ucs->interface->dev, "unknown pending request 0x%02x cleared\n", @@ -877,8 +877,7 @@ static void read_iso_callback(struct urb *urb) for (i = 0; i < BAS_NUMFRAMES; i++) { ubc->isoinlost += urb->iso_frame_desc[i].actual_length; if (unlikely(urb->iso_frame_desc[i].status != 0 && - urb->iso_frame_desc[i].status != - -EINPROGRESS)) + urb->iso_frame_desc[i].status != -EINPROGRESS)) ubc->loststatus = urb->iso_frame_desc[i].status; urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; @@ -2078,16 +2077,14 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) /* Free hardware dependent part of the B channel structure * parameter: * bcs B channel structure - * return value: - * !=0 on success */ -static int gigaset_freebcshw(struct bc_state *bcs) +static void gigaset_freebcshw(struct bc_state *bcs) { struct bas_bc_state *ubc = bcs->hw.bas; int i; if (!ubc) - return 0; + return; /* kill URBs and tasklets before freeing - better safe than sorry */ ubc->running = 0; @@ -2105,14 +2102,13 @@ static int gigaset_freebcshw(struct bc_state *bcs) kfree(ubc->isooutbuf); kfree(ubc); bcs->hw.bas = NULL; - return 1; } /* Initialize hardware dependent part of the B channel structure * parameter: * bcs B channel structure * return value: - * !=0 on success + * 0 on success, error code < 0 on failure */ static int gigaset_initbcshw(struct bc_state *bcs) { @@ -2122,7 +2118,7 @@ static int gigaset_initbcshw(struct bc_state *bcs) bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); if (!ubc) { pr_err("out of memory\n"); - return 0; + return -ENOMEM; } ubc->running = 0; @@ -2139,7 +2135,7 @@ static int gigaset_initbcshw(struct bc_state *bcs) pr_err("out of memory\n"); kfree(ubc); bcs->hw.bas = NULL; - return 0; + return -ENOMEM; } tasklet_init(&ubc->sent_tasklet, write_iso_tasklet, (unsigned long) bcs); @@ -2164,7 +2160,7 @@ static int gigaset_initbcshw(struct bc_state *bcs) ubc->stolen0s = 0; tasklet_init(&ubc->rcvd_tasklet, read_iso_tasklet, (unsigned long) bcs); - return 1; + return 0; } static void gigaset_reinitbcshw(struct bc_state *bcs) @@ -2187,6 +2183,12 @@ static void gigaset_freecshw(struct cardstate *cs) cs->hw.bas = NULL; } +/* Initialize hardware dependent part of the cardstate structure + * parameter: + * cs cardstate structure + * return value: + * 0 on success, error code < 0 on failure + */ static int gigaset_initcshw(struct cardstate *cs) { struct bas_cardstate *ucs; @@ -2194,13 +2196,13 @@ static int gigaset_initcshw(struct cardstate *cs) cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); - return 0; + return -ENOMEM; } ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL); if (!ucs->int_in_buf) { kfree(ucs); pr_err("out of memory\n"); - return 0; + return -ENOMEM; } ucs->urb_cmd_in = NULL; @@ -2219,7 +2221,7 @@ static int gigaset_initcshw(struct cardstate *cs) init_waitqueue_head(&ucs->waitqueue); INIT_WORK(&ucs->int_in_wq, int_in_work); - return 1; + return 0; } /* freeurbs @@ -2379,18 +2381,20 @@ static int gigaset_probe(struct usb_interface *interface, /* save address of controller structure */ usb_set_intfdata(interface, cs); - if (!gigaset_start(cs)) + rc = gigaset_start(cs); + if (rc < 0) goto error; return 0; allocerr: dev_err(cs->dev, "could not allocate URBs\n"); + rc = -ENOMEM; error: freeurbs(cs); usb_set_intfdata(interface, NULL); gigaset_freecs(cs); - return -ENODEV; + return rc; } /* gigaset_disconnect diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 343b5c80cb7..27e4a3e21d6 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -14,6 +14,7 @@ #include "gigaset.h" #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/ratelimit.h> #include <linux/isdn/capilli.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> @@ -108,51 +109,35 @@ static struct { u8 *bc; u8 *hlc; } cip2bchlc[] = { - [1] = { "8090A3", NULL }, - /* Speech (A-law) */ - [2] = { "8890", NULL }, - /* Unrestricted digital information */ - [3] = { "8990", NULL }, - /* Restricted digital information */ - [4] = { "9090A3", NULL }, - /* 3,1 kHz audio (A-law) */ - [5] = { "9190", NULL }, - /* 7 kHz audio */ - [6] = { "9890", NULL }, - /* Video */ - [7] = { "88C0C6E6", NULL }, - /* Packet mode */ - [8] = { "8890218F", NULL }, - /* 56 kbit/s rate adaptation */ - [9] = { "9190A5", NULL }, - /* Unrestricted digital information with tones/announcements */ - [16] = { "8090A3", "9181" }, - /* Telephony */ - [17] = { "9090A3", "9184" }, - /* Group 2/3 facsimile */ - [18] = { "8890", "91A1" }, - /* Group 4 facsimile Class 1 */ - [19] = { "8890", "91A4" }, - /* Teletex service basic and mixed mode - and Group 4 facsimile service Classes II and III */ - [20] = { "8890", "91A8" }, - /* Teletex service basic and processable mode */ - [21] = { "8890", "91B1" }, - /* Teletex service basic mode */ - [22] = { "8890", "91B2" }, - /* International interworking for Videotex */ - [23] = { "8890", "91B5" }, - /* Telex */ - [24] = { "8890", "91B8" }, - /* Message Handling Systems in accordance with X.400 */ - [25] = { "8890", "91C1" }, - /* OSI application in accordance with X.200 */ - [26] = { "9190A5", "9181" }, - /* 7 kHz telephony */ - [27] = { "9190A5", "916001" }, - /* Video telephony, first connection */ - [28] = { "8890", "916002" }, - /* Video telephony, second connection */ + [1] = { "8090A3", NULL }, /* Speech (A-law) */ + [2] = { "8890", NULL }, /* Unrestricted digital information */ + [3] = { "8990", NULL }, /* Restricted digital information */ + [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */ + [5] = { "9190", NULL }, /* 7 kHz audio */ + [6] = { "9890", NULL }, /* Video */ + [7] = { "88C0C6E6", NULL }, /* Packet mode */ + [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */ + [9] = { "9190A5", NULL }, /* Unrestricted digital information + * with tones/announcements */ + [16] = { "8090A3", "9181" }, /* Telephony */ + [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */ + [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */ + [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode + * and Group 4 facsimile service + * Classes II and III */ + [20] = { "8890", "91A8" }, /* Teletex service basic and + * processable mode */ + [21] = { "8890", "91B1" }, /* Teletex service basic mode */ + [22] = { "8890", "91B2" }, /* International interworking for + * Videotex */ + [23] = { "8890", "91B5" }, /* Telex */ + [24] = { "8890", "91B8" }, /* Message Handling Systems + * in accordance with X.400 */ + [25] = { "8890", "91C1" }, /* OSI application + * in accordance with X.200 */ + [26] = { "9190A5", "9181" }, /* 7 kHz telephony */ + [27] = { "9190A5", "916001" }, /* Video telephony, first connection */ + [28] = { "8890", "916002" }, /* Video telephony, second connection */ }; /* @@ -223,10 +208,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl) static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) { #ifdef CONFIG_GIGASET_DEBUG + /* dump at most 20 messages in 20 secs */ + static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20); _cdebbuf *cdb; if (!(gigaset_debuglevel & level)) return; + if (!___ratelimit(&msg_dump_ratelimit, tag)) + return; cdb = capi_cmsg2str(p); if (cdb) { @@ -1192,7 +1181,9 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, confparam[3] = 2; /* length */ capimsg_setu16(confparam, 4, CapiSuccess); break; - /* ToDo: add supported services */ + + /* ToDo: add supported services */ + default: dev_notice(cs->dev, "%s: unsupported supplementary service function 0x%04x\n", @@ -1766,7 +1757,8 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif, /* NCPI parameter: not applicable for B3 Transparent */ ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); - send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? + send_conf(iif, ap, skb, + (cmsg->NCPI && cmsg->NCPI[0]) ? CapiNcpiNotSupportedByProtocol : CapiSuccess); } @@ -1882,6 +1874,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif, /* check for active logical connection */ if (bcs->apconnstate >= APCONN_ACTIVE) { + /* clear it */ + bcs->apconnstate = APCONN_SETUP; + /* * emit DISCONNECT_B3_IND with cause 0x3301 * use separate cmsg structure, as the content of iif->acmsg @@ -1906,6 +1901,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif, } capi_cmsg2message(b3cmsg, __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); + dump_cmsg(DEBUG_CMD, __func__, b3cmsg); kfree(b3cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); } @@ -1966,7 +1962,8 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, /* NCPI parameter: not applicable for B3 Transparent */ ignore_cstruct_param(cs, cmsg->NCPI, "DISCONNECT_B3_REQ", "NCPI"); - send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? + send_conf(iif, ap, skb, + (cmsg->NCPI && cmsg->NCPI[0]) ? CapiNcpiNotSupportedByProtocol : CapiSuccess); } @@ -2059,12 +2056,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif, } /* - * dump unsupported/ignored messages at most twice per minute, - * some apps send those very frequently - */ -static unsigned long ignored_msg_dump_time; - -/* * unsupported CAPI message handler */ static void do_unsupported(struct gigaset_capi_ctr *iif, @@ -2073,8 +2064,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif, { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); - if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) - dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); + dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); } @@ -2085,11 +2075,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { - if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { - /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); - dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); - } + /* decode message */ + capi_message2cmsg(&iif->acmsg, skb->data); + dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); dev_kfree_skb_any(skb); } @@ -2358,7 +2346,7 @@ static const struct file_operations gigaset_proc_fops = { * @cs: device descriptor structure. * @isdnid: device name. * - * Return value: 1 for success, 0 for failure + * Return value: 0 on success, error code < 0 on failure */ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { @@ -2368,7 +2356,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) iif = kmalloc(sizeof(*iif), GFP_KERNEL); if (!iif) { pr_err("%s: out of memory\n", __func__); - return 0; + return -ENOMEM; } /* prepare controller structure */ @@ -2392,12 +2380,12 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) if (rc) { pr_err("attach_capi_ctr failed (%d)\n", rc); kfree(iif); - return 0; + return rc; } cs->iif = iif; cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; - return 1; + return 0; } /** diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 76792707f99..aa41485bc59 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c @@ -194,13 +194,13 @@ int gigaset_get_channel(struct bc_state *bcs) gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); - return 0; + return -EBUSY; } ++bcs->use_count; bcs->busy = 1; gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); - return 1; + return 0; } struct bc_state *gigaset_get_free_channel(struct cardstate *cs) @@ -258,7 +258,7 @@ int gigaset_get_channels(struct cardstate *cs) spin_unlock_irqrestore(&cs->lock, flags); gig_dbg(DEBUG_CHANNEL, "could not allocate all channels"); - return 0; + return -EBUSY; } for (i = 0; i < cs->channels; ++i) ++cs->bcs[i].use_count; @@ -266,7 +266,7 @@ int gigaset_get_channels(struct cardstate *cs) gig_dbg(DEBUG_CHANNEL, "allocated all channels"); - return 1; + return 0; } void gigaset_free_channels(struct cardstate *cs) @@ -362,7 +362,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs, } EXPORT_SYMBOL_GPL(gigaset_add_event); -static void free_strings(struct at_state_t *at_state) +static void clear_at_state(struct at_state_t *at_state) { int i; @@ -372,18 +372,13 @@ static void free_strings(struct at_state_t *at_state) } } -static void clear_at_state(struct at_state_t *at_state) -{ - free_strings(at_state); -} - -static void dealloc_at_states(struct cardstate *cs) +static void dealloc_temp_at_states(struct cardstate *cs) { struct at_state_t *cur, *next; list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { list_del(&cur->list); - free_strings(cur); + clear_at_state(cur); kfree(cur); } } @@ -393,8 +388,7 @@ static void gigaset_freebcs(struct bc_state *bcs) int i; gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); - if (!bcs->cs->ops->freebcshw(bcs)) - gig_dbg(DEBUG_INIT, "failed"); + bcs->cs->ops->freebcshw(bcs); gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); clear_at_state(&bcs->at_state); @@ -512,7 +506,7 @@ void gigaset_freecs(struct cardstate *cs) case 1: /* error when registering to LL */ gig_dbg(DEBUG_INIT, "clearing at_state"); clear_at_state(&cs->at_state); - dealloc_at_states(cs); + dealloc_temp_at_states(cs); /* fall through */ case 0: /* error in basic setup */ @@ -571,6 +565,8 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs) * @inbuf: buffer structure. * @src: received data. * @numbytes: number of bytes received. + * + * Return value: !=0 if some data was appended */ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, unsigned numbytes) @@ -614,8 +610,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, EXPORT_SYMBOL_GPL(gigaset_fill_inbuf); /* Initialize the b-channel structure */ -static struct bc_state *gigaset_initbcs(struct bc_state *bcs, - struct cardstate *cs, int channel) +static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs, + int channel) { int i; @@ -654,11 +650,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, bcs->apconnstate = 0; gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); - if (cs->ops->initbcshw(bcs)) - return bcs; - - gig_dbg(DEBUG_INIT, " failed"); - return NULL; + return cs->ops->initbcshw(bcs); } /** @@ -757,7 +749,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, cs->cmdbytes = 0; gig_dbg(DEBUG_INIT, "setting up iif"); - if (!gigaset_isdn_regdev(cs, modulename)) { + if (gigaset_isdn_regdev(cs, modulename) < 0) { pr_err("error registering ISDN device\n"); goto error; } @@ -765,7 +757,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, make_valid(cs, VALID_ID); ++cs->cs_init; gig_dbg(DEBUG_INIT, "setting up hw"); - if (!cs->ops->initcshw(cs)) + if (cs->ops->initcshw(cs) < 0) goto error; ++cs->cs_init; @@ -779,7 +771,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, /* set up channel data structures */ for (i = 0; i < channels; ++i) { gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i); - if (!gigaset_initbcs(cs->bcs + i, cs, i)) { + if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) { pr_err("could not allocate channel %d data\n", i); goto error; } @@ -848,8 +840,7 @@ static void cleanup_cs(struct cardstate *cs) cs->mstate = MS_UNINITIALIZED; clear_at_state(&cs->at_state); - dealloc_at_states(cs); - free_strings(&cs->at_state); + dealloc_temp_at_states(cs); gigaset_at_init(&cs->at_state, NULL, cs, 0); cs->inbuf->inputstate = INS_command; @@ -875,7 +866,7 @@ static void cleanup_cs(struct cardstate *cs) for (i = 0; i < cs->channels; ++i) { gigaset_freebcs(cs->bcs + i); - if (!gigaset_initbcs(cs->bcs + i, cs, i)) + if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) pr_err("could not allocate channel %d data\n", i); } @@ -896,14 +887,14 @@ static void cleanup_cs(struct cardstate *cs) * waiting for completion of the initialization. * * Return value: - * 1 - success, 0 - error + * 0 on success, error code < 0 on failure */ int gigaset_start(struct cardstate *cs) { unsigned long flags; if (mutex_lock_interruptible(&cs->mutex)) - return 0; + return -EBUSY; spin_lock_irqsave(&cs->lock, flags); cs->connected = 1; @@ -927,11 +918,11 @@ int gigaset_start(struct cardstate *cs) wait_event(cs->waitqueue, !cs->waiting); mutex_unlock(&cs->mutex); - return 1; + return 0; error: mutex_unlock(&cs->mutex); - return 0; + return -ENOMEM; } EXPORT_SYMBOL_GPL(gigaset_start); @@ -943,7 +934,7 @@ EXPORT_SYMBOL_GPL(gigaset_start); * waiting for completion of the shutdown. * * Return value: - * 0 - success, -1 - error (no device associated) + * 0 - success, -ENODEV - error (no device associated) */ int gigaset_shutdown(struct cardstate *cs) { @@ -951,7 +942,7 @@ int gigaset_shutdown(struct cardstate *cs) if (!(cs->flags & VALID_MINOR)) { mutex_unlock(&cs->mutex); - return -1; + return -ENODEV; } cs->waiting = 1; diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c index 19b1c779d50..570c2d53b84 100644 --- a/drivers/isdn/gigaset/dummyll.c +++ b/drivers/isdn/gigaset/dummyll.c @@ -60,7 +60,7 @@ void gigaset_isdn_stop(struct cardstate *cs) int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { - return 1; + return 0; } void gigaset_isdn_unregdev(struct cardstate *cs) diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 624a8256a77..2e6963dc740 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -153,103 +153,104 @@ struct reply_t gigaset_tab_nocid[] = * action, command */ /* initialize device, set cid mode if possible */ - {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} }, + {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} }, - {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"}, - {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING}, - "+GMR\r"}, + {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"}, + {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING}, + "+GMR\r"}, - {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"}, - {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"}, + {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"}, + {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"}, - {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1}, - "^SDLE=0\r"}, - {RSP_OK, 108, 108, -1, 104, -1}, - {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"}, - {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} }, - {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} }, + {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1}, + "^SDLE=0\r"}, + {RSP_OK, 108, 108, -1, 104, -1}, + {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"}, + {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} }, + {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} }, - {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0, - ACT_HUPMODEM, - ACT_TIMEOUT} }, - {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"}, + {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0, + ACT_HUPMODEM, + ACT_TIMEOUT} }, + {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"}, - {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"}, - {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} }, - {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, - {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, + {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"}, + {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} }, + {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, + {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, - {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, - {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, + {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, + {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, - {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} }, + {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} }, - {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER, - ACT_INIT} }, - {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER, - ACT_INIT} }, - {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER, - ACT_INIT} }, + {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER, + ACT_INIT} }, + {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER, + ACT_INIT} }, + {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER, + ACT_INIT} }, + {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} }, /* leave dle mode */ - {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, - {RSP_OK, 201, 201, -1, 202, -1}, - {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} }, - {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} }, - {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, - {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, + {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, + {RSP_OK, 201, 201, -1, 202, -1}, + {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} }, + {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} }, + {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, + {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, /* enter dle mode */ - {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, - {RSP_OK, 251, 251, -1, 252, -1}, - {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} }, - {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, - {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, + {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, + {RSP_OK, 251, 251, -1, 252, -1}, + {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} }, + {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, + {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, /* incoming call */ - {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} }, + {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} }, /* get cid */ - {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, - {RSP_OK, 301, 301, -1, 302, -1}, - {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} }, - {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} }, - {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} }, + {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, + {RSP_OK, 301, 301, -1, 302, -1}, + {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} }, + {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} }, + {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} }, /* enter cid mode */ - {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, - {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} }, - {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, - {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, + {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, + {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} }, + {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, + {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, /* leave cid mode */ - {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"}, - {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} }, - {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, - {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, + {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"}, + {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} }, + {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, + {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, /* abort getting cid */ - {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} }, + {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} }, /* reset */ - {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, - {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} }, - {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, - {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, - {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} }, - - {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} }, - {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} }, - {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} }, - {EV_START, -1, -1, -1, -1, -1, {ACT_START} }, - {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} }, - {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} }, + {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, + {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} }, + {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, + {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, + {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} }, + + {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} }, + {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} }, + {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} }, + {EV_START, -1, -1, -1, -1, -1, {ACT_START} }, + {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} }, + {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} }, /* misc. */ - {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, - {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, - {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, + {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, + {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, + {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, + {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, {RSP_LAST} }; @@ -261,90 +262,90 @@ struct reply_t gigaset_tab_cid[] = * action, command */ /* dial */ - {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, - {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} }, - {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} }, - {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} }, - {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} }, - {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, - {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, - {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, - {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, - {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, - {RSP_OK, 608, 608, -1, 609, -1}, - {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} }, - {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, - - {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, - {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, + {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, + {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} }, + {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} }, + {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} }, + {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} }, + {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, + {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, + {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, + {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, + {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, + {RSP_OK, 608, 608, -1, 609, -1}, + {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} }, + {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, + + {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, + {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, /* optional dialing responses */ - {EV_BC_OPEN, 650, 650, -1, 651, -1}, - {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, - {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} }, + {EV_BC_OPEN, 650, 650, -1, 651, -1}, + {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, + {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, + {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, + {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} }, /* connect */ - {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, - {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, - ACT_NOTIFY_BC_UP} }, - {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, - {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, - ACT_NOTIFY_BC_UP} }, - {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} }, + {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, + {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, + ACT_NOTIFY_BC_UP} }, + {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, + {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, + ACT_NOTIFY_BC_UP} }, + {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} }, /* remote hangup */ - {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} }, - {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, - {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, + {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} }, + {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, + {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, /* hangup */ - {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} }, - {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, - {RSP_OK, 401, 401, -1, 402, 5}, - {RSP_ZVLS, 402, 402, 0, 403, 5}, - {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, - {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, - {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, - {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} }, - {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} }, - - {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, + {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} }, + {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, + {RSP_OK, 401, 401, -1, 402, 5}, + {RSP_ZVLS, 402, 402, 0, 403, 5}, + {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, + {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, + {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, + {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} }, + {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} }, + + {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, /* ring */ - {RSP_ZBC, 700, 700, -1, -1, -1, {0} }, - {RSP_ZHLC, 700, 700, -1, -1, -1, {0} }, - {RSP_NMBR, 700, 700, -1, -1, -1, {0} }, - {RSP_ZCPN, 700, 700, -1, -1, -1, {0} }, - {RSP_ZCTP, 700, 700, -1, -1, -1, {0} }, - {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} }, - {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, + {RSP_ZBC, 700, 700, -1, -1, -1, {0} }, + {RSP_ZHLC, 700, 700, -1, -1, -1, {0} }, + {RSP_NMBR, 700, 700, -1, -1, -1, {0} }, + {RSP_ZCPN, 700, 700, -1, -1, -1, {0} }, + {RSP_ZCTP, 700, 700, -1, -1, -1, {0} }, + {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} }, + {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, /*accept icall*/ - {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} }, - {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} }, - {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} }, - {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"}, - {RSP_OK, 723, 723, -1, 724, 5, {0} }, - {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} }, - {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, - {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, - {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} }, - {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} }, - {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} }, - - {EV_BC_OPEN, 750, 750, -1, 751, -1}, - {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} }, + {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} }, + {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} }, + {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} }, + {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"}, + {RSP_OK, 723, 723, -1, 724, 5, {0} }, + {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} }, + {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, + {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, + {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} }, + {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} }, + {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} }, + + {EV_BC_OPEN, 750, 750, -1, 751, -1}, + {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} }, /* B channel closed (general case) */ - {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} }, + {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} }, /* misc. */ - {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, - {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, + {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, + {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, + {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, + {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, {RSP_LAST} }; @@ -648,16 +649,16 @@ static void disconnect(struct at_state_t **at_state_p) static inline struct at_state_t *get_free_channel(struct cardstate *cs, int cid) /* cids: >0: siemens-cid - 0: without cid - -1: no cid assigned yet -*/ + * 0: without cid + * -1: no cid assigned yet + */ { unsigned long flags; int i; struct at_state_t *ret; for (i = 0; i < cs->channels; ++i) - if (gigaset_get_channel(cs->bcs + i)) { + if (gigaset_get_channel(cs->bcs + i) >= 0) { ret = &cs->bcs[i].at_state; ret->cid = cid; return ret; @@ -922,18 +923,18 @@ static void do_stop(struct cardstate *cs) * channel >= 0: getting cid for the channel failed * channel < 0: entering cid mode failed * - * returns 0 on failure + * returns 0 on success, <0 on failure */ static int reinit_and_retry(struct cardstate *cs, int channel) { int i; if (--cs->retry_count <= 0) - return 0; + return -EFAULT; for (i = 0; i < cs->channels; ++i) if (cs->bcs[i].at_state.cid > 0) - return 0; + return -EBUSY; if (channel < 0) dev_warn(cs->dev, @@ -944,7 +945,7 @@ static int reinit_and_retry(struct cardstate *cs, int channel) cs->bcs[channel].at_state.pending_commands |= PC_CID; } schedule_init(cs, MS_INIT); - return 1; + return 0; } static int at_state_invalid(struct cardstate *cs, @@ -1015,7 +1016,7 @@ static int do_lock(struct cardstate *cs) if (cs->bcs[i].at_state.pending_commands) return -EBUSY; - if (!gigaset_get_channels(cs)) + if (gigaset_get_channels(cs) < 0) return -EBUSY; break; @@ -1124,7 +1125,7 @@ static void do_action(int action, struct cardstate *cs, init_failed(cs, M_UNKNOWN); break; } - if (!reinit_and_retry(cs, -1)) + if (reinit_and_retry(cs, -1) < 0) schedule_init(cs, MS_RECOVER); break; case ACT_FAILUMODE: @@ -1267,7 +1268,7 @@ static void do_action(int action, struct cardstate *cs, case ACT_FAILCID: cs->cur_at_seq = SEQ_NONE; channel = cs->curchannel; - if (!reinit_and_retry(cs, channel)) { + if (reinit_and_retry(cs, channel) < 0) { dev_warn(cs->dev, "Could not get a call ID. Cannot dial.\n"); at_state2 = &cs->bcs[channel].at_state; @@ -1314,8 +1315,9 @@ static void do_action(int action, struct cardstate *cs, s = ev->ptr; if (!strcmp(s, "OK")) { + /* OK without version string: assume old response */ *p_genresp = 1; - *p_resp_code = RSP_ERROR; + *p_resp_code = RSP_NONE; break; } @@ -1372,7 +1374,8 @@ static void do_action(int action, struct cardstate *cs, ev->parameter, at_state->ConState); break; - /* events from the LL */ + /* events from the LL */ + case ACT_DIAL: start_dial(at_state, ev->ptr, ev->parameter); break; @@ -1385,7 +1388,8 @@ static void do_action(int action, struct cardstate *cs, cs->commands_pending = 1; break; - /* hotplug events */ + /* hotplug events */ + case ACT_STOP: do_stop(cs); break; @@ -1393,7 +1397,8 @@ static void do_action(int action, struct cardstate *cs, do_start(cs); break; - /* events from the interface */ + /* events from the interface */ + case ACT_IF_LOCK: cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); cs->waiting = 0; @@ -1412,7 +1417,8 @@ static void do_action(int action, struct cardstate *cs, wake_up(&cs->waitqueue); break; - /* events from the proc file system */ + /* events from the proc file system */ + case ACT_PROC_CIDMODE: spin_lock_irqsave(&cs->lock, flags); if (ev->parameter != cs->cidmode) { @@ -1431,7 +1437,8 @@ static void do_action(int action, struct cardstate *cs, wake_up(&cs->waitqueue); break; - /* events from the hardware drivers */ + /* events from the hardware drivers */ + case ACT_NOTIFY_BC_DOWN: bchannel_down(bcs); break; diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 1dc25131e67..8e2fc8f31d1 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h @@ -163,8 +163,8 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, #define BAS_LOWFRAME 5 /* " " with negative flow control */ #define BAS_CORRFRAMES 4 /* flow control multiplicator */ -#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) -/* size of isoc in buf per URB */ +#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf + * per URB */ #define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */ #define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */ @@ -471,18 +471,18 @@ struct cardstate { for */ int commands_pending; /* flag(s) in xxx.commands_pending have been set */ - struct tasklet_struct event_tasklet; - /* tasklet for serializing AT commands. - * Scheduled - * -> for modem reponses (and - * incoming data for M10x) - * -> on timeout - * -> after setting bits in - * xxx.at_state.pending_command - * (e.g. command from LL) */ - struct tasklet_struct write_tasklet; - /* tasklet for serial output - * (not used in base driver) */ + struct tasklet_struct + event_tasklet; /* tasklet for serializing AT commands. + * Scheduled + * -> for modem reponses (and + * incoming data for M10x) + * -> on timeout + * -> after setting bits in + * xxx.at_state.pending_command + * (e.g. command from LL) */ + struct tasklet_struct + write_tasklet; /* tasklet for serial output + * (not used in base driver) */ /* event queue */ struct event_t events[MAX_EVENTS]; @@ -583,7 +583,7 @@ struct gigaset_ops { int (*initbcshw)(struct bc_state *bcs); /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ - int (*freebcshw)(struct bc_state *bcs); + void (*freebcshw)(struct bc_state *bcs); /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */ void (*reinitbcshw)(struct bc_state *bcs); diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index 0f13eb1de65..2d75329007f 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c @@ -229,7 +229,7 @@ static int command_from_LL(isdn_ctrl *cntrl) return -EINVAL; } bcs = cs->bcs + ch; - if (!gigaset_get_channel(bcs)) { + if (gigaset_get_channel(bcs) < 0) { dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); return -EBUSY; } @@ -618,7 +618,7 @@ void gigaset_isdn_stop(struct cardstate *cs) * @cs: device descriptor structure. * @isdnid: device name. * - * Return value: 1 for success, 0 for failure + * Return value: 0 on success, error code < 0 on failure */ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { @@ -627,14 +627,14 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) iif = kmalloc(sizeof *iif, GFP_KERNEL); if (!iif) { pr_err("out of memory\n"); - return 0; + return -ENOMEM; } if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) >= sizeof iif->id) { pr_err("ID too long: %s\n", isdnid); kfree(iif); - return 0; + return -EINVAL; } iif->owner = THIS_MODULE; @@ -656,13 +656,13 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) if (!register_isdn(iif)) { pr_err("register_isdn failed\n"); kfree(iif); - return 0; + return -EINVAL; } cs->iif = iif; cs->myid = iif->channels; /* Set my device id */ cs->hw_hdr_len = HW_HDR_LEN; - return 1; + return 0; } /** diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index a351c16705b..bc29f1d52a2 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -56,7 +56,7 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb) /* start writing * acquire the write semaphore - * return true if acquired, false if busy + * return 0 if acquired, <0 if busy */ static inline int isowbuf_startwrite(struct isowbuf_t *iwb) { @@ -64,12 +64,12 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb) atomic_inc(&iwb->writesem); gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore", __func__); - return 0; + return -EBUSY; } gig_dbg(DEBUG_ISO, "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", __func__, iwb->data[iwb->write], iwb->wbits); - return 1; + return 0; } /* finish writing @@ -158,7 +158,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) /* no wraparound in valid data */ if (limit >= write) { /* append idle frame */ - if (!isowbuf_startwrite(iwb)) + if (isowbuf_startwrite(iwb) < 0) return -EBUSY; /* write position could have changed */ write = iwb->write; @@ -403,7 +403,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, unsigned char c; if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || - !isowbuf_startwrite(iwb)) { + isowbuf_startwrite(iwb) < 0) { gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", __func__, isowbuf_freebytes(iwb)); return -EAGAIN; @@ -457,7 +457,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb, return iwb->write; if (isowbuf_freebytes(iwb) < count || - !isowbuf_startwrite(iwb)) { + isowbuf_startwrite(iwb) < 0) { gig_dbg(DEBUG_ISO, "can't put %d bytes", count); return -EAGAIN; } diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 6f3fd4cf437..8c91fd5eb6f 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -340,17 +340,16 @@ static int gigaset_initbcshw(struct bc_state *bcs) { /* unused */ bcs->hw.ser = NULL; - return 1; + return 0; } /* * Free B channel structure * Called by "gigaset_freebcs" in common.c */ -static int gigaset_freebcshw(struct bc_state *bcs) +static void gigaset_freebcshw(struct bc_state *bcs) { /* unused */ - return 1; } /* @@ -398,7 +397,7 @@ static int gigaset_initcshw(struct cardstate *cs) scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL); if (!scs) { pr_err("out of memory\n"); - return 0; + return -ENOMEM; } cs->hw.ser = scs; @@ -410,13 +409,13 @@ static int gigaset_initcshw(struct cardstate *cs) pr_err("error %d registering platform device\n", rc); kfree(cs->hw.ser); cs->hw.ser = NULL; - return 0; + return rc; } dev_set_drvdata(&cs->hw.ser->dev.dev, cs); tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); - return 1; + return 0; } /* @@ -503,6 +502,7 @@ static int gigaset_tty_open(struct tty_struct *tty) { struct cardstate *cs; + int rc; gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101"); @@ -515,8 +515,10 @@ gigaset_tty_open(struct tty_struct *tty) /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); - if (!cs) + if (!cs) { + rc = -ENODEV; goto error; + } cs->dev = &cs->hw.ser->dev.dev; cs->hw.ser->tty = tty; @@ -530,7 +532,8 @@ gigaset_tty_open(struct tty_struct *tty) */ if (startmode == SM_LOCKED) cs->mstate = MS_LOCKED; - if (!gigaset_start(cs)) { + rc = gigaset_start(cs); + if (rc < 0) { tasklet_kill(&cs->write_tasklet); goto error; } @@ -542,7 +545,7 @@ error: gig_dbg(DEBUG_INIT, "Startup of HLL failed"); tty->disc_data = NULL; gigaset_freecs(cs); - return -ENODEV; + return rc; } /* diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 049da67f639..bb12d805173 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -549,10 +549,9 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) 0, 0, &buf, 6, 2000); } -static int gigaset_freebcshw(struct bc_state *bcs) +static void gigaset_freebcshw(struct bc_state *bcs) { /* unused */ - return 1; } /* Initialize the b-channel structure */ @@ -560,7 +559,7 @@ static int gigaset_initbcshw(struct bc_state *bcs) { /* unused */ bcs->hw.usb = NULL; - return 1; + return 0; } static void gigaset_reinitbcshw(struct bc_state *bcs) @@ -582,7 +581,7 @@ static int gigaset_initcshw(struct cardstate *cs) kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); - return 0; + return -ENOMEM; } ucs->bchars[0] = 0; @@ -597,7 +596,7 @@ static int gigaset_initcshw(struct cardstate *cs) tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); - return 1; + return 0; } /* Send data from current skb to the device. */ @@ -766,9 +765,9 @@ static int gigaset_probe(struct usb_interface *interface, if (startmode == SM_LOCKED) cs->mstate = MS_LOCKED; - if (!gigaset_start(cs)) { + retval = gigaset_start(cs); + if (retval < 0) { tasklet_kill(&cs->write_tasklet); - retval = -ENODEV; goto error; } return 0; @@ -898,8 +897,10 @@ static int __init usb_gigaset_init(void) driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); - if (driver == NULL) + if (driver == NULL) { + result = -ENOMEM; goto error; + } /* register this driver with the USB subsystem */ result = usb_register(&gigaset_usb_driver); @@ -915,7 +916,7 @@ error: if (driver) gigaset_freedriver(driver); driver = NULL; - return -1; + return result; } /* diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c index a576f32e663..7a0bdbdd87e 100644 --- a/drivers/isdn/hardware/eicon/capifunc.c +++ b/drivers/isdn/hardware/eicon/capifunc.c @@ -1120,7 +1120,7 @@ int fax_head_line_time(char *buffer) /* * init (alloc) main structures */ -static int DIVA_INIT_FUNCTION init_main_structs(void) +static int __init init_main_structs(void) { if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) { DBG_ERR(("init: failed alloc mapped_msg.")) @@ -1181,7 +1181,7 @@ static void do_api_remove_start(void) /* * init */ -int DIVA_INIT_FUNCTION init_capifunc(void) +int __init init_capifunc(void) { diva_os_initialize_spin_lock(&api_lock, "capifunc"); memset(ControllerMap, 0, MAX_DESCRIPTORS + 1); @@ -1209,7 +1209,7 @@ int DIVA_INIT_FUNCTION init_capifunc(void) /* * finit */ -void DIVA_EXIT_FUNCTION finit_capifunc(void) +void __exit finit_capifunc(void) { do_api_remove_start(); divacapi_disconnect_didd(); diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c index eabe0fa1b62..997d46abf5b 100644 --- a/drivers/isdn/hardware/eicon/capimain.c +++ b/drivers/isdn/hardware/eicon/capimain.c @@ -118,7 +118,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl) /* * module init */ -static int DIVA_INIT_FUNCTION divacapi_init(void) +static int __init divacapi_init(void) { char tmprev[32]; int ret = 0; @@ -144,7 +144,7 @@ static int DIVA_INIT_FUNCTION divacapi_init(void) /* * module exit */ -static void DIVA_EXIT_FUNCTION divacapi_exit(void) +static void __exit divacapi_exit(void) { finit_capifunc(); printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME); diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c index c4c8220c9d7..b0b23ed8b37 100644 --- a/drivers/isdn/hardware/eicon/diddfunc.c +++ b/drivers/isdn/hardware/eicon/diddfunc.c @@ -47,7 +47,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, /* * connect to didd */ -static int DIVA_INIT_FUNCTION connect_didd(void) +static int __init connect_didd(void) { int x = 0; int dadapter = 0; @@ -79,7 +79,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void) /* * disconnect from didd */ -static void DIVA_EXIT_FUNCTION disconnect_didd(void) +static void __exit disconnect_didd(void) { IDI_SYNC_REQ req; @@ -92,7 +92,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void) /* * init */ -int DIVA_INIT_FUNCTION diddfunc_init(void) +int __init diddfunc_init(void) { diva_didd_load_time_init(); @@ -107,7 +107,7 @@ int DIVA_INIT_FUNCTION diddfunc_init(void) /* * finit */ -void DIVA_EXIT_FUNCTION diddfunc_finit(void) +void __exit diddfunc_finit(void) { DbgDeregister(); disconnect_didd(); diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c index d1d3de03cce..fab6ccfb00d 100644 --- a/drivers/isdn/hardware/eicon/diva_didd.c +++ b/drivers/isdn/hardware/eicon/diva_didd.c @@ -91,7 +91,7 @@ static const struct file_operations divadidd_proc_fops = { .release = single_release, }; -static int DIVA_INIT_FUNCTION create_proc(void) +static int __init create_proc(void) { proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); @@ -109,7 +109,7 @@ static void remove_proc(void) remove_proc_entry("eicon", init_net.proc_net); } -static int DIVA_INIT_FUNCTION divadidd_init(void) +static int __init divadidd_init(void) { char tmprev[32]; int ret = 0; @@ -141,7 +141,7 @@ out: return (ret); } -static void DIVA_EXIT_FUNCTION divadidd_exit(void) +static void __exit divadidd_exit(void) { diddfunc_finit(); remove_proc(); diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c index ffa0c31be74..48db08d0bb3 100644 --- a/drivers/isdn/hardware/eicon/divamnt.c +++ b/drivers/isdn/hardware/eicon/divamnt.c @@ -184,7 +184,7 @@ static void divas_maint_unregister_chrdev(void) unregister_chrdev(major, DEVNAME); } -static int DIVA_INIT_FUNCTION divas_maint_register_chrdev(void) +static int __init divas_maint_register_chrdev(void) { if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0) { @@ -207,7 +207,7 @@ void diva_maint_wakeup_read(void) /* * Driver Load */ -static int DIVA_INIT_FUNCTION maint_init(void) +static int __init maint_init(void) { char tmprev[50]; int ret = 0; @@ -245,7 +245,7 @@ out: /* ** Driver Unload */ -static void DIVA_EXIT_FUNCTION maint_exit(void) +static void __exit maint_exit(void) { divas_maint_unregister_chrdev(); mntfunc_finit(); diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c index 60aaf958095..4be5f881477 100644 --- a/drivers/isdn/hardware/eicon/divasfunc.c +++ b/drivers/isdn/hardware/eicon/divasfunc.c @@ -153,7 +153,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, /* * connect to didd */ -static int DIVA_INIT_FUNCTION connect_didd(void) +static int __init connect_didd(void) { int x = 0; int dadapter = 0; @@ -209,7 +209,7 @@ static void disconnect_didd(void) /* * init */ -int DIVA_INIT_FUNCTION divasfunc_init(int dbgmask) +int __init divasfunc_init(int dbgmask) { char *version; diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c index a5c8f90b3b3..4103a8c178d 100644 --- a/drivers/isdn/hardware/eicon/divasi.c +++ b/drivers/isdn/hardware/eicon/divasi.c @@ -114,7 +114,7 @@ static const struct file_operations um_idi_proc_fops = { .release = single_release, }; -static int DIVA_INIT_FUNCTION create_um_idi_proc(void) +static int __init create_um_idi_proc(void) { um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon, &um_idi_proc_fops); @@ -146,7 +146,7 @@ static void divas_idi_unregister_chrdev(void) unregister_chrdev(major, DEVNAME); } -static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void) +static int __init divas_idi_register_chrdev(void) { if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0) { @@ -161,7 +161,7 @@ static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void) /* ** Driver Load */ -static int DIVA_INIT_FUNCTION divasi_init(void) +static int __init divasi_init(void) { char tmprev[50]; int ret = 0; @@ -202,7 +202,7 @@ out: /* ** Driver Unload */ -static void DIVA_EXIT_FUNCTION divasi_exit(void) +static void __exit divasi_exit(void) { idifunc_finit(); remove_um_idi_proc(); diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index 7eaab06276f..ca6d276bb25 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -673,7 +673,7 @@ static void divas_unregister_chrdev(void) unregister_chrdev(major, DEVNAME); } -static int DIVA_INIT_FUNCTION divas_register_chrdev(void) +static int __init divas_register_chrdev(void) { if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0) { @@ -767,7 +767,7 @@ static void __devexit divas_remove_one(struct pci_dev *pdev) /* -------------------------------------------------------------------------- Driver Load / Startup -------------------------------------------------------------------------- */ -static int DIVA_INIT_FUNCTION divas_init(void) +static int __init divas_init(void) { char tmprev[50]; int ret = 0; @@ -831,7 +831,7 @@ out: /* -------------------------------------------------------------------------- Driver Unload -------------------------------------------------------------------------- */ -static void DIVA_EXIT_FUNCTION divas_exit(void) +static void __exit divas_exit(void) { pci_unregister_driver(&diva_pci_driver); remove_divas_proc(); diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c index d153e3cdecf..fef6586fe5a 100644 --- a/drivers/isdn/hardware/eicon/idifunc.c +++ b/drivers/isdn/hardware/eicon/idifunc.c @@ -133,7 +133,7 @@ static void um_remove_card(DESCRIPTOR *d) /* * remove all adapter */ -static void DIVA_EXIT_FUNCTION remove_all_idi_proc(void) +static void __exit remove_all_idi_proc(void) { udiva_card *card; diva_os_spin_lock_magic_t old_irql; @@ -181,7 +181,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, /* * connect DIDD */ -static int DIVA_INIT_FUNCTION connect_didd(void) +static int __init connect_didd(void) { int x = 0; int dadapter = 0; @@ -225,7 +225,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void) /* * Disconnect from DIDD */ -static void DIVA_EXIT_FUNCTION disconnect_didd(void) +static void __exit disconnect_didd(void) { IDI_SYNC_REQ req; @@ -240,7 +240,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void) /* * init */ -int DIVA_INIT_FUNCTION idifunc_init(void) +int __init idifunc_init(void) { diva_os_initialize_spin_lock(&ll_lock, "idifunc"); @@ -260,7 +260,7 @@ int DIVA_INIT_FUNCTION idifunc_init(void) /* * finit */ -void DIVA_EXIT_FUNCTION idifunc_finit(void) +void __exit idifunc_finit(void) { diva_user_mode_idi_finit(); disconnect_didd(); diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c index d6072607305..1cd9affb605 100644 --- a/drivers/isdn/hardware/eicon/mntfunc.c +++ b/drivers/isdn/hardware/eicon/mntfunc.c @@ -72,7 +72,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter, /* * connect to didd */ -static int DIVA_INIT_FUNCTION connect_didd(void) +static int __init connect_didd(void) { int x = 0; int dadapter = 0; @@ -114,7 +114,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void) /* * disconnect from didd */ -static void DIVA_EXIT_FUNCTION disconnect_didd(void) +static void __exit disconnect_didd(void) { IDI_SYNC_REQ req; @@ -300,7 +300,7 @@ int maint_read_write(void __user *buf, int count) /* * init */ -int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer, +int __init mntfunc_init(int *buffer_length, void **buffer, unsigned long diva_dbg_mem) { if (*buffer_length < 64) { @@ -348,7 +348,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer, /* * exit */ -void DIVA_EXIT_FUNCTION mntfunc_finit(void) +void __exit mntfunc_finit(void) { void *buffer; int i = 100; diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h index 7331c3b14a5..b2edb7590dd 100644 --- a/drivers/isdn/hardware/eicon/platform.h +++ b/drivers/isdn/hardware/eicon/platform.h @@ -38,9 +38,6 @@ #define DIVA_NO_DEBUGLIB #endif -#define DIVA_INIT_FUNCTION __init -#define DIVA_EXIT_FUNCTION __exit - #define DIVA_USER_MODE_CARD_CONFIG 1 #define USE_EXTENDED_DEBUGS 1 diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index c0b8c960ee3..c08fc605e56 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -30,7 +30,7 @@ #include "ipac.h" -#define AVMFRITZ_REV "2.1" +#define AVMFRITZ_REV "2.3" static int AVM_cnt; static int debug; @@ -69,6 +69,7 @@ enum { #define HDLC_MODE_TRANS 0x02 #define HDLC_MODE_CCR_7 0x04 #define HDLC_MODE_CCR_16 0x08 +#define HDLC_FIFO_SIZE_128 0x20 #define HDLC_MODE_TESTLOOP 0x80 #define HDLC_INT_XPR 0x80 @@ -80,13 +81,16 @@ enum { #define HDLC_STAT_RDO 0x10 #define HDLC_STAT_CRCVFRRAB 0x0E #define HDLC_STAT_CRCVFR 0x06 -#define HDLC_STAT_RML_MASK 0x3f00 +#define HDLC_STAT_RML_MASK_V1 0x3f00 +#define HDLC_STAT_RML_MASK_V2 0x7f00 #define HDLC_CMD_XRS 0x80 #define HDLC_CMD_XME 0x01 #define HDLC_CMD_RRS 0x20 #define HDLC_CMD_XML_MASK 0x3f00 -#define HDLC_FIFO_SIZE 32 + +#define HDLC_FIFO_SIZE_V1 32 +#define HDLC_FIFO_SIZE_V2 128 /* Fritz PCI v2.0 */ @@ -346,11 +350,14 @@ modehdlc(struct bchannel *bch, int protocol) { struct fritzcard *fc = bch->hw; struct hdlc_hw *hdlc; + u8 mode; hdlc = &fc->hdlc[(bch->nr - 1) & 1]; pr_debug("%s: hdlc %c protocol %x-->%x ch %d\n", fc->name, '@' + bch->nr, bch->state, protocol, bch->nr); hdlc->ctrl.ctrl = 0; + mode = (fc->type == AVM_FRITZ_PCIV2) ? HDLC_FIFO_SIZE_128 : 0; + switch (protocol) { case -1: /* used for init */ bch->state = -1; @@ -358,7 +365,7 @@ modehdlc(struct bchannel *bch, int protocol) if (bch->state == ISDN_P_NONE) break; hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; - hdlc->ctrl.sr.mode = HDLC_MODE_TRANS; + hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS; write_ctrl(bch, 5); bch->state = ISDN_P_NONE; test_and_clear_bit(FLG_HDLC, &bch->Flags); @@ -367,7 +374,7 @@ modehdlc(struct bchannel *bch, int protocol) case ISDN_P_B_RAW: bch->state = protocol; hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; - hdlc->ctrl.sr.mode = HDLC_MODE_TRANS; + hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS; write_ctrl(bch, 5); hdlc->ctrl.sr.cmd = HDLC_CMD_XRS; write_ctrl(bch, 1); @@ -377,7 +384,7 @@ modehdlc(struct bchannel *bch, int protocol) case ISDN_P_B_HDLC: bch->state = protocol; hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; - hdlc->ctrl.sr.mode = HDLC_MODE_ITF_FLG; + hdlc->ctrl.sr.mode = mode | HDLC_MODE_ITF_FLG; write_ctrl(bch, 5); hdlc->ctrl.sr.cmd = HDLC_CMD_XRS; write_ctrl(bch, 1); @@ -397,39 +404,40 @@ hdlc_empty_fifo(struct bchannel *bch, int count) u32 *ptr; u8 *p; u32 val, addr; - int cnt = 0; + int cnt; struct fritzcard *fc = bch->hw; pr_debug("%s: %s %d\n", fc->name, __func__, count); - if (!bch->rx_skb) { - bch->rx_skb = mI_alloc_skb(bch->maxlen, GFP_ATOMIC); - if (!bch->rx_skb) { - pr_info("%s: B receive out of memory\n", - fc->name); + if (test_bit(FLG_RX_OFF, &bch->Flags)) { + p = NULL; + bch->dropcnt += count; + } else { + cnt = bchannel_get_rxbuf(bch, count); + if (cnt < 0) { + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + fc->name, bch->nr, count); return; } + p = skb_put(bch->rx_skb, count); } - if ((bch->rx_skb->len + count) > bch->maxlen) { - pr_debug("%s: overrun %d\n", fc->name, - bch->rx_skb->len + count); - return; - } - p = skb_put(bch->rx_skb, count); ptr = (u32 *)p; - if (AVM_FRITZ_PCIV2 == fc->type) + if (fc->type == AVM_FRITZ_PCIV2) addr = fc->addr + (bch->nr == 2 ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1); else { addr = fc->addr + CHIP_WINDOW; outl(bch->nr == 2 ? AVM_HDLC_2 : AVM_HDLC_1, fc->addr); } + cnt = 0; while (cnt < count) { val = le32_to_cpu(inl(addr)); - put_unaligned(val, ptr); - ptr++; + if (p) { + put_unaligned(val, ptr); + ptr++; + } cnt += 4; } - if (debug & DEBUG_HW_BFIFO) { + if (p && (debug & DEBUG_HW_BFIFO)) { snprintf(fc->log, LOG_SIZE, "B%1d-recv %s %d ", bch->nr, fc->name, count); print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count); @@ -441,30 +449,43 @@ hdlc_fill_fifo(struct bchannel *bch) { struct fritzcard *fc = bch->hw; struct hdlc_hw *hdlc; - int count, cnt = 0; + int count, fs, cnt = 0, idx, fillempty = 0; u8 *p; u32 *ptr, val, addr; - hdlc = &fc->hdlc[(bch->nr - 1) & 1]; - if (!bch->tx_skb) - return; - count = bch->tx_skb->len - bch->tx_idx; - if (count <= 0) - return; - p = bch->tx_skb->data + bch->tx_idx; + idx = (bch->nr - 1) & 1; + hdlc = &fc->hdlc[idx]; + fs = (fc->type == AVM_FRITZ_PCIV2) ? + HDLC_FIFO_SIZE_V2 : HDLC_FIFO_SIZE_V1; + if (!bch->tx_skb) { + if (!test_bit(FLG_TX_EMPTY, &bch->Flags)) + return; + count = fs; + p = bch->fill; + fillempty = 1; + } else { + count = bch->tx_skb->len - bch->tx_idx; + if (count <= 0) + return; + p = bch->tx_skb->data + bch->tx_idx; + } hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XME; - if (count > HDLC_FIFO_SIZE) { - count = HDLC_FIFO_SIZE; + if (count > fs) { + count = fs; } else { if (test_bit(FLG_HDLC, &bch->Flags)) hdlc->ctrl.sr.cmd |= HDLC_CMD_XME; } - pr_debug("%s: %s %d/%d/%d", fc->name, __func__, count, - bch->tx_idx, bch->tx_skb->len); ptr = (u32 *)p; - bch->tx_idx += count; - hdlc->ctrl.sr.xml = ((count == HDLC_FIFO_SIZE) ? 0 : count); - if (AVM_FRITZ_PCIV2 == fc->type) { + if (fillempty) { + pr_debug("%s.B%d: %d/%d/%d", fc->name, bch->nr, count, + bch->tx_idx, bch->tx_skb->len); + bch->tx_idx += count; + } else { + pr_debug("%s.B%d: fillempty %d\n", fc->name, bch->nr, count); + } + hdlc->ctrl.sr.xml = ((count == fs) ? 0 : count); + if (fc->type == AVM_FRITZ_PCIV2) { __write_ctrl_pciv2(fc, hdlc, bch->nr); addr = fc->addr + (bch->nr == 2 ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1); @@ -472,13 +493,21 @@ hdlc_fill_fifo(struct bchannel *bch) __write_ctrl_pci(fc, hdlc, bch->nr); addr = fc->addr + CHIP_WINDOW; } - while (cnt < count) { - val = get_unaligned(ptr); - outl(cpu_to_le32(val), addr); - ptr++; - cnt += 4; + if (fillempty) { + while (cnt < count) { + /* all bytes the same - no worry about endian */ + outl(*ptr, addr); + cnt += 4; + } + } else { + while (cnt < count) { + val = get_unaligned(ptr); + outl(cpu_to_le32(val), addr); + ptr++; + cnt += 4; + } } - if (debug & DEBUG_HW_BFIFO) { + if ((debug & DEBUG_HW_BFIFO) && !fillempty) { snprintf(fc->log, LOG_SIZE, "B%1d-send %s %d ", bch->nr, fc->name, count); print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count); @@ -488,17 +517,17 @@ hdlc_fill_fifo(struct bchannel *bch) static void HDLC_irq_xpr(struct bchannel *bch) { - if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) + if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) { hdlc_fill_fifo(bch); - else { - if (bch->tx_skb) { - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &bch->Flags)) - confirm_Bsend(bch); + } else { + if (bch->tx_skb) dev_kfree_skb(bch->tx_skb); - } - if (get_next_bframe(bch)) + if (get_next_bframe(bch)) { hdlc_fill_fifo(bch); + test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags); + } else if (test_bit(FLG_TX_EMPTY, &bch->Flags)) { + hdlc_fill_fifo(bch); + } } } @@ -506,13 +535,23 @@ static void HDLC_irq(struct bchannel *bch, u32 stat) { struct fritzcard *fc = bch->hw; - int len; + int len, fs; + u32 rmlMask; struct hdlc_hw *hdlc; hdlc = &fc->hdlc[(bch->nr - 1) & 1]; pr_debug("%s: ch%d stat %#x\n", fc->name, bch->nr, stat); + if (fc->type == AVM_FRITZ_PCIV2) { + rmlMask = HDLC_STAT_RML_MASK_V2; + fs = HDLC_FIFO_SIZE_V2; + } else { + rmlMask = HDLC_STAT_RML_MASK_V1; + fs = HDLC_FIFO_SIZE_V1; + } if (stat & HDLC_INT_RPR) { if (stat & HDLC_STAT_RDO) { + pr_warning("%s: ch%d stat %x RDO\n", + fc->name, bch->nr, stat); hdlc->ctrl.sr.xml = 0; hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS; write_ctrl(bch, 1); @@ -521,21 +560,21 @@ HDLC_irq(struct bchannel *bch, u32 stat) if (bch->rx_skb) skb_trim(bch->rx_skb, 0); } else { - len = (stat & HDLC_STAT_RML_MASK) >> 8; + len = (stat & rmlMask) >> 8; if (!len) - len = 32; + len = fs; hdlc_empty_fifo(bch, len); if (!bch->rx_skb) goto handle_tx; - if ((stat & HDLC_STAT_RME) || test_bit(FLG_TRANSPARENT, - &bch->Flags)) { - if (((stat & HDLC_STAT_CRCVFRRAB) == - HDLC_STAT_CRCVFR) || - test_bit(FLG_TRANSPARENT, &bch->Flags)) { - recv_Bchannel(bch, 0); + if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { + recv_Bchannel(bch, 0, false); + } else if (stat & HDLC_STAT_RME) { + if ((stat & HDLC_STAT_CRCVFRRAB) == + HDLC_STAT_CRCVFR) { + recv_Bchannel(bch, 0, false); } else { - pr_debug("%s: got invalid frame\n", - fc->name); + pr_warning("%s: got invalid frame\n", + fc->name); skb_trim(bch->rx_skb, 0); } } @@ -547,16 +586,13 @@ handle_tx: * restart transmitting the whole frame on HDLC * in transparent mode we send the next data */ - if (bch->tx_skb) - pr_debug("%s: ch%d XDU len(%d) idx(%d) Flags(%lx)\n", - fc->name, bch->nr, bch->tx_skb->len, - bch->tx_idx, bch->Flags); - else - pr_debug("%s: ch%d XDU no tx_skb Flags(%lx)\n", - fc->name, bch->nr, bch->Flags); + pr_warning("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr, + stat, bch->tx_skb ? "tx_skb" : "no tx_skb"); if (bch->tx_skb && bch->tx_skb->len) { if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) bch->tx_idx = 0; + } else if (test_bit(FLG_FILLEMPTY, &bch->Flags)) { + test_and_set_bit(FLG_TX_EMPTY, &bch->Flags); } hdlc->ctrl.sr.xml = 0; hdlc->ctrl.sr.cmd |= HDLC_CMD_XRS; @@ -659,22 +695,17 @@ avm_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) struct fritzcard *fc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); - u32 id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&fc->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ hdlc_fill_fifo(bch); ret = 0; - spin_unlock_irqrestore(&fc->lock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(&fc->lock, flags); + } + spin_unlock_irqrestore(&fc->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&fc->lock, flags); @@ -783,7 +814,7 @@ init_card(struct fritzcard *fc) inithdlc(fc); enable_hwirq(fc); /* RESET Receiver and Transmitter */ - if (AVM_FRITZ_PCIV2 == fc->type) { + if (fc->type == AVM_FRITZ_PCIV2) { WriteISAC_V2(fc, ISACX_MASK, 0); WriteISAC_V2(fc, ISACX_CMDRD, 0x41); } else { @@ -810,21 +841,7 @@ init_card(struct fritzcard *fc) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - struct fritzcard *fc = bch->hw; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = 0; - break; - /* Nothing implemented yet */ - case MISDN_CTRL_FILL_EMPTY: - default: - pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } static int @@ -839,14 +856,10 @@ avm_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) { - spin_lock_irqsave(&fc->lock, flags); - mISDN_freebchannel(bch); - test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); - test_and_clear_bit(FLG_ACTIVE, &bch->Flags); - modehdlc(bch, ISDN_P_NONE); - spin_unlock_irqrestore(&fc->lock, flags); - } + spin_lock_irqsave(&fc->lock, flags); + mISDN_freebchannel(bch); + modehdlc(bch, ISDN_P_NONE); + spin_unlock_irqrestore(&fc->lock, flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -868,7 +881,7 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_LOOP; + cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_LOOP: /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ @@ -878,6 +891,9 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq) } ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); break; + case MISDN_CTRL_L1_TIMER3: + ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1); + break; default: pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); ret = -EINVAL; @@ -898,7 +914,6 @@ open_bchannel(struct fritzcard *fc, struct channel_req *rq) bch = &fc->bch[rq->adr.channel - 1]; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; return 0; @@ -1021,6 +1036,7 @@ static int __devinit setup_instance(struct fritzcard *card) { int i, err; + unsigned short minsize; u_long flags; snprintf(card->name, MISDN_MAX_IDLEN - 1, "AVM.%d", AVM_cnt + 1); @@ -1040,7 +1056,11 @@ setup_instance(struct fritzcard *card) for (i = 0; i < 2; i++) { card->bch[i].nr = i + 1; set_channelmap(i + 1, card->isac.dch.dev.channelmap); - mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); + if (AVM_FRITZ_PCIV2 == card->type) + minsize = HDLC_FIFO_SIZE_V2; + else + minsize = HDLC_FIFO_SIZE_V1; + mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, minsize); card->bch[i].hw = card; card->bch[i].ch.send = avm_l2l1B; card->bch[i].ch.ctrl = avm_bctrl; diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h index b0588acbb47..c601f880141 100644 --- a/drivers/isdn/hardware/mISDN/hfc_multi.h +++ b/drivers/isdn/hardware/mISDN/hfc_multi.h @@ -205,18 +205,22 @@ struct hfc_multi { u_int slots; /* number of PCM slots */ u_int leds; /* type of leds */ - u_int ledcount; /* used to animate leds */ u_long ledstate; /* save last state of leds */ int opticalsupport; /* has the e1 board */ /* an optical Interface */ - int dslot; /* channel # of d-channel (E1) default 16 */ + + u_int bmask[32]; /* bitmask of bchannels for port */ + u_char dnum[32]; /* array of used dchannel numbers for port */ + u_char created[32]; /* what port is created */ + u_int activity_tx; /* if there is data TX / RX */ + u_int activity_rx; /* bitmask according to port number */ + /* (will be cleared after */ + /* showing led-states) */ + u_int flash[8]; /* counter for flashing 8 leds on activity */ u_long wdcount; /* every 500 ms we need to */ /* send the watchdog a signal */ u_char wdbyte; /* watchdog toggle byte */ - u_int activity[8]; /* if there is any action on this */ - /* port (will be cleared after */ - /* showing led-states) */ int e1_state; /* keep track of last state */ int e1_getclock; /* if sync is retrieved from interface */ int syncronized; /* keep track of existing sync interface */ @@ -233,7 +237,6 @@ struct hfc_multi { * the bch->channel is equvalent to the hfc-channel */ struct hfc_chan chan[32]; - u_char created[8]; /* what port is created */ signed char slot_owner[256]; /* owner channel of slot */ }; diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 033223180b5..5e402cf2e79 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -103,14 +103,26 @@ * Omit this value, if all cards are interconnected or none is connected. * If unsure, don't give this parameter. * - * dslot: - * NOTE: only one dslot value must be given for every card. - * Also this value must be given for non-E1 cards. If omitted, the E1 - * card has D-channel on time slot 16, which is default. - * If 1..15 or 17..31, an alternate time slot is used for D-channel. - * In this case, the application must be able to handle this. - * If -1 is given, the D-channel is disabled and all 31 slots can be used - * for B-channel. (only for specific applications) + * dmask and bmask: + * NOTE: One dmask value must be given for every HFC-E1 card. + * If omitted, the E1 card has D-channel on time slot 16, which is default. + * dmask is a 32 bit mask. The bit must be set for an alternate time slot. + * If multiple bits are set, multiple virtual card fragments are created. + * For each bit set, a bmask value must be given. Each bit on the bmask + * value stands for a B-channel. The bmask may not overlap with dmask or + * with other bmask values for that card. + * Example: dmask=0x00020002 bmask=0x0000fffc,0xfffc0000 + * This will create one fragment with D-channel on slot 1 with + * B-channels on slots 2..15, and a second fragment with D-channel + * on slot 17 with B-channels on slot 18..31. Slot 16 is unused. + * If bit 0 is set (dmask=0x00000001) the D-channel is on slot 0 and will + * not function. + * Example: dmask=0x00000001 bmask=0xfffffffe + * This will create a port with all 31 usable timeslots as + * B-channels. + * If no bits are set on bmask, no B-channel is created for that fragment. + * Example: dmask=0xfffffffe bmask=0,0,0,0.... (31 0-values for bmask) + * This will create 31 ports with one D-channel only. * If you don't know how to use it, you don't need it! * * iomode: @@ -172,6 +184,7 @@ #define MAX_CARDS 8 #define MAX_PORTS (8 * MAX_CARDS) +#define MAX_FRAGS (32 * MAX_CARDS) static LIST_HEAD(HFClist); static spinlock_t HFClock; /* global hfc list lock */ @@ -203,7 +216,8 @@ static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 }; static uint type[MAX_CARDS]; static int pcm[MAX_CARDS]; -static int dslot[MAX_CARDS]; +static uint dmask[MAX_CARDS]; +static uint bmask[MAX_FRAGS]; static uint iomode[MAX_CARDS]; static uint port[MAX_PORTS]; static uint debug; @@ -218,7 +232,7 @@ static uint clockdelay_nt = CLKDEL_NT; #define HWID_MINIP16 3 static uint hwid = HWID_NONE; -static int HFC_cnt, Port_cnt, PCM_cnt = 99; +static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99; MODULE_AUTHOR("Andreas Eversberg"); MODULE_LICENSE("GPL"); @@ -231,7 +245,8 @@ module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR); module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR); module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR); -module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR); +module_param_array(dmask, uint, NULL, S_IRUGO | S_IWUSR); +module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */ @@ -1156,7 +1171,7 @@ init_chip(struct hfc_multi *hc) hc->DTMFbase = 0x1000; if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) { if (debug & DEBUG_HFCMULTI_INIT) - printk(KERN_DEBUG "%s: changing to 128K extenal RAM\n", + printk(KERN_DEBUG "%s: changing to 128K external RAM\n", __func__); hc->hw.r_ctrl |= V_EXT_RAM; hc->hw.r_ram_sz = 1; @@ -1167,7 +1182,7 @@ init_chip(struct hfc_multi *hc) } if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) { if (debug & DEBUG_HFCMULTI_INIT) - printk(KERN_DEBUG "%s: changing to 512K extenal RAM\n", + printk(KERN_DEBUG "%s: changing to 512K external RAM\n", __func__); hc->hw.r_ctrl |= V_EXT_RAM; hc->hw.r_ram_sz = 2; @@ -1607,40 +1622,46 @@ hfcmulti_leds(struct hfc_multi *hc) struct dchannel *dch; int led[4]; - hc->ledcount += poll; - if (hc->ledcount > 4096) { - hc->ledcount -= 4096; - hc->ledstate = 0xAFFEAFFE; - } - switch (hc->leds) { case 1: /* HFC-E1 OEM */ - /* 2 red blinking: NT mode deactivate - * 2 red steady: TE mode deactivate - * left green: L1 active - * left red: frame sync, but no L1 - * right green: L2 active + /* 2 red steady: LOS + * 1 red steady: L1 not active + * 2 green steady: L1 active + * 1st green flashing: activity on TX + * 2nd green flashing: activity on RX */ - if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */ - if (hc->chan[hc->dslot].dch->dev.D.protocol - != ISDN_P_NT_E1) { - led[0] = 1; + led[0] = 0; + led[1] = 0; + led[2] = 0; + led[3] = 0; + dch = hc->chan[hc->dnum[0]].dch; + if (dch) { + if (hc->chan[hc->dnum[0]].los) led[1] = 1; - } else if (hc->ledcount >> 11) { + if (hc->e1_state != 1) { led[0] = 1; - led[1] = 1; + hc->flash[2] = 0; + hc->flash[3] = 0; } else { - led[0] = 0; - led[1] = 0; + led[2] = 1; + led[3] = 1; + if (!hc->flash[2] && hc->activity_tx) + hc->flash[2] = poll; + if (!hc->flash[3] && hc->activity_rx) + hc->flash[3] = poll; + if (hc->flash[2] && hc->flash[2] < 1024) + led[2] = 0; + if (hc->flash[3] && hc->flash[3] < 1024) + led[3] = 0; + if (hc->flash[2] >= 2048) + hc->flash[2] = 0; + if (hc->flash[3] >= 2048) + hc->flash[3] = 0; + if (hc->flash[2]) + hc->flash[2] += poll; + if (hc->flash[3]) + hc->flash[3] += poll; } - led[2] = 0; - led[3] = 0; - } else { /* with frame sync */ - /* TODO make it work */ - led[0] = 0; - led[1] = 0; - led[2] = 0; - led[3] = 1; } leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF; /* leds are inverted */ @@ -1651,9 +1672,9 @@ hfcmulti_leds(struct hfc_multi *hc) break; case 2: /* HFC-4S OEM */ - /* red blinking = PH_DEACTIVATE NT Mode - * red steady = PH_DEACTIVATE TE Mode - * green steady = PH_ACTIVATE + /* red steady: PH_DEACTIVATE + * green steady: PH_ACTIVATE + * green flashing: activity on TX */ for (i = 0; i < 4; i++) { state = 0; @@ -1669,17 +1690,20 @@ hfcmulti_leds(struct hfc_multi *hc) if (state) { if (state == active) { led[i] = 1; /* led green */ - } else - if (dch->dev.D.protocol == ISDN_P_TE_S0) - /* TE mode: led red */ - led[i] = 2; - else - if (hc->ledcount >> 11) - /* led red */ - led[i] = 2; - else - /* led off */ - led[i] = 0; + hc->activity_tx |= hc->activity_rx; + if (!hc->flash[i] && + (hc->activity_tx & (1 << i))) + hc->flash[i] = poll; + if (hc->flash[i] && hc->flash[i] < 1024) + led[i] = 0; /* led off */ + if (hc->flash[i] >= 2048) + hc->flash[i] = 0; + if (hc->flash[i]) + hc->flash[i] += poll; + } else { + led[i] = 2; /* led red */ + hc->flash[i] = 0; + } } else led[i] = 0; /* led off */ } @@ -1712,9 +1736,9 @@ hfcmulti_leds(struct hfc_multi *hc) break; case 3: /* HFC 1S/2S Beronet */ - /* red blinking = PH_DEACTIVATE NT Mode - * red steady = PH_DEACTIVATE TE Mode - * green steady = PH_ACTIVATE + /* red steady: PH_DEACTIVATE + * green steady: PH_ACTIVATE + * green flashing: activity on TX */ for (i = 0; i < 2; i++) { state = 0; @@ -1730,22 +1754,23 @@ hfcmulti_leds(struct hfc_multi *hc) if (state) { if (state == active) { led[i] = 1; /* led green */ - } else - if (dch->dev.D.protocol == ISDN_P_TE_S0) - /* TE mode: led red */ - led[i] = 2; - else - if (hc->ledcount >> 11) - /* led red */ - led[i] = 2; - else - /* led off */ - led[i] = 0; + hc->activity_tx |= hc->activity_rx; + if (!hc->flash[i] && + (hc->activity_tx & (1 << i))) + hc->flash[i] = poll; + if (hc->flash[i] < 1024) + led[i] = 0; /* led off */ + if (hc->flash[i] >= 2048) + hc->flash[i] = 0; + if (hc->flash[i]) + hc->flash[i] += poll; + } else { + led[i] = 2; /* led red */ + hc->flash[i] = 0; + } } else led[i] = 0; /* led off */ } - - leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2) | ((led[1]&1) << 3); if (leds != (int)hc->ledstate) { @@ -1757,8 +1782,11 @@ hfcmulti_leds(struct hfc_multi *hc) } break; case 8: /* HFC 8S+ Beronet */ - lled = 0; - + /* off: PH_DEACTIVATE + * steady: PH_ACTIVATE + * flashing: activity on TX + */ + lled = 0xff; /* leds off */ for (i = 0; i < 8; i++) { state = 0; active = -1; @@ -1772,14 +1800,20 @@ hfcmulti_leds(struct hfc_multi *hc) } if (state) { if (state == active) { - lled |= 0 << i; + lled &= ~(1 << i); /* led on */ + hc->activity_tx |= hc->activity_rx; + if (!hc->flash[i] && + (hc->activity_tx & (1 << i))) + hc->flash[i] = poll; + if (hc->flash[i] < 1024) + lled |= 1 << i; /* led off */ + if (hc->flash[i] >= 2048) + hc->flash[i] = 0; + if (hc->flash[i]) + hc->flash[i] += poll; } else - if (hc->ledcount >> 11) - lled |= 0 << i; - else - lled |= 1 << i; - } else - lled |= 1 << i; + hc->flash[i] = 0; + } } leddw = lled << 24 | lled << 16 | lled << 8 | lled; if (leddw != hc->ledstate) { @@ -1794,6 +1828,8 @@ hfcmulti_leds(struct hfc_multi *hc) } break; } + hc->activity_tx = 0; + hc->activity_rx = 0; } /* * read dtmf coefficients @@ -2093,7 +2129,8 @@ next_frame: *txpending = 1; /* show activity */ - hc->activity[hc->chan[ch].port] = 1; + if (dch) + hc->activity_tx |= 1 << hc->chan[ch].port; /* fill fifo to what we have left */ ii = len; @@ -2129,13 +2166,9 @@ next_frame: HFC_wait_nodebug(hc); } - /* send confirm, since get_net_bframe will not do it with trans */ - if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags)) - confirm_Bsend(bch); - - /* check for next frame */ dev_kfree_skb(*sp); - if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */ + /* check for next frame */ + if (bch && get_next_bframe(bch)) { len = (*sp)->len; goto next_frame; } @@ -2163,24 +2196,20 @@ hfcmulti_rx(struct hfc_multi *hc, int ch) int f1 = 0, f2 = 0; /* = 0, to make GCC happy */ int again = 0; struct bchannel *bch; - struct dchannel *dch; + struct dchannel *dch = NULL; struct sk_buff *skb, **sp = NULL; int maxlen; bch = hc->chan[ch].bch; - dch = hc->chan[ch].dch; - if ((!dch) && (!bch)) - return; - if (dch) { + if (bch) { + if (!test_bit(FLG_ACTIVE, &bch->Flags)) + return; + } else if (hc->chan[ch].dch) { + dch = hc->chan[ch].dch; if (!test_bit(FLG_ACTIVE, &dch->Flags)) return; - sp = &dch->rx_skb; - maxlen = dch->maxlen; } else { - if (!test_bit(FLG_ACTIVE, &bch->Flags)) - return; - sp = &bch->rx_skb; - maxlen = bch->maxlen; + return; } next_frame: /* on first AND before getting next valid frame, R_FIFO must be written @@ -2195,8 +2224,11 @@ next_frame: HFC_wait_nodebug(hc); /* ignore if rx is off BUT change fifo (above) to start pending TX */ - if (hc->chan[ch].rx_off) + if (hc->chan[ch].rx_off) { + if (bch) + bch->dropcnt += poll; /* not exact but fair enough */ return; + } if (dch || test_bit(FLG_HDLC, &bch->Flags)) { f1 = HFC_inb_nodebug(hc, A_F1); @@ -2227,16 +2259,30 @@ next_frame: if (Zsize <= 0) return; - if (*sp == NULL) { - *sp = mI_alloc_skb(maxlen + 3, GFP_ATOMIC); - if (*sp == NULL) { - printk(KERN_DEBUG "%s: No mem for rx_skb\n", - __func__); + if (bch) { + maxlen = bchannel_get_rxbuf(bch, Zsize); + if (maxlen < 0) { + pr_warning("card%d.B%d: No bufferspace for %d bytes\n", + hc->id + 1, bch->nr, Zsize); return; } + sp = &bch->rx_skb; + maxlen = bch->maxlen; + } else { /* Dchannel */ + sp = &dch->rx_skb; + maxlen = dch->maxlen + 3; + if (*sp == NULL) { + *sp = mI_alloc_skb(maxlen, GFP_ATOMIC); + if (*sp == NULL) { + pr_warning("card%d: No mem for dch rx_skb\n", + hc->id + 1); + return; + } + } } /* show activity */ - hc->activity[hc->chan[ch].port] = 1; + if (dch) + hc->activity_rx |= 1 << hc->chan[ch].port; /* empty fifo with what we have */ if (dch || test_bit(FLG_HDLC, &bch->Flags)) { @@ -2247,7 +2293,7 @@ next_frame: Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE", f1, f2, Zsize + (*sp)->len, again); /* HDLC */ - if ((Zsize + (*sp)->len) > (maxlen + 3)) { + if ((Zsize + (*sp)->len) > maxlen) { if (debug & DEBUG_HFCMULTI_FIFO) printk(KERN_DEBUG "%s(card %d): hdlc-frame too large.\n", @@ -2309,7 +2355,7 @@ next_frame: if (dch) recv_Dchannel(dch); else - recv_Bchannel(bch, MISDN_ID_ANY); + recv_Bchannel(bch, MISDN_ID_ANY, false); *sp = skb; again++; goto next_frame; @@ -2317,32 +2363,14 @@ next_frame: /* there is an incomplete frame */ } else { /* transparent */ - if (Zsize > skb_tailroom(*sp)) - Zsize = skb_tailroom(*sp); hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize); - if (((*sp)->len) < MISDN_COPY_SIZE) { - skb = *sp; - *sp = mI_alloc_skb(skb->len, GFP_ATOMIC); - if (*sp) { - memcpy(skb_put(*sp, skb->len), - skb->data, skb->len); - skb_trim(skb, 0); - } else { - printk(KERN_DEBUG "%s: No mem\n", __func__); - *sp = skb; - skb = NULL; - } - } else { - skb = NULL; - } if (debug & DEBUG_HFCMULTI_FIFO) printk(KERN_DEBUG "%s(card %d): fifo(%d) reading %d bytes " "(z1=%04x, z2=%04x) TRANS\n", __func__, hc->id + 1, ch, Zsize, z1, z2); /* only bch is transparent */ - recv_Bchannel(bch, hc->chan[ch].Zfill); - *sp = skb; + recv_Bchannel(bch, hc->chan[ch].Zfill, false); } } @@ -2430,55 +2458,55 @@ handle_timer_irq(struct hfc_multi *hc) } } if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) { - dch = hc->chan[hc->dslot].dch; - if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { - /* LOS */ - temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS; - if (!temp && hc->chan[hc->dslot].los) + dch = hc->chan[hc->dnum[0]].dch; + /* LOS */ + temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS; + hc->chan[hc->dnum[0]].los = temp; + if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) { + if (!temp && hc->chan[hc->dnum[0]].los) signal_state_up(dch, L1_SIGNAL_LOS_ON, "LOS detected"); - if (temp && !hc->chan[hc->dslot].los) + if (temp && !hc->chan[hc->dnum[0]].los) signal_state_up(dch, L1_SIGNAL_LOS_OFF, "LOS gone"); - hc->chan[hc->dslot].los = temp; } - if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) { + if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dnum[0]].cfg)) { /* AIS */ temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS; - if (!temp && hc->chan[hc->dslot].ais) + if (!temp && hc->chan[hc->dnum[0]].ais) signal_state_up(dch, L1_SIGNAL_AIS_ON, "AIS detected"); - if (temp && !hc->chan[hc->dslot].ais) + if (temp && !hc->chan[hc->dnum[0]].ais) signal_state_up(dch, L1_SIGNAL_AIS_OFF, "AIS gone"); - hc->chan[hc->dslot].ais = temp; + hc->chan[hc->dnum[0]].ais = temp; } - if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) { + if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dnum[0]].cfg)) { /* SLIP */ temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX; - if (!temp && hc->chan[hc->dslot].slip_rx) + if (!temp && hc->chan[hc->dnum[0]].slip_rx) signal_state_up(dch, L1_SIGNAL_SLIP_RX, " bit SLIP detected RX"); - hc->chan[hc->dslot].slip_rx = temp; + hc->chan[hc->dnum[0]].slip_rx = temp; temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX; - if (!temp && hc->chan[hc->dslot].slip_tx) + if (!temp && hc->chan[hc->dnum[0]].slip_tx) signal_state_up(dch, L1_SIGNAL_SLIP_TX, " bit SLIP detected TX"); - hc->chan[hc->dslot].slip_tx = temp; + hc->chan[hc->dnum[0]].slip_tx = temp; } - if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) { + if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dnum[0]].cfg)) { /* RDI */ temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A; - if (!temp && hc->chan[hc->dslot].rdi) + if (!temp && hc->chan[hc->dnum[0]].rdi) signal_state_up(dch, L1_SIGNAL_RDI_ON, "RDI detected"); - if (temp && !hc->chan[hc->dslot].rdi) + if (temp && !hc->chan[hc->dnum[0]].rdi) signal_state_up(dch, L1_SIGNAL_RDI_OFF, "RDI gone"); - hc->chan[hc->dslot].rdi = temp; + hc->chan[hc->dnum[0]].rdi = temp; } temp = HFC_inb_nodebug(hc, R_JATT_DIR); - switch (hc->chan[hc->dslot].sync) { + switch (hc->chan[hc->dnum[0]].sync) { case 0: if ((temp & 0x60) == 0x60) { if (debug & DEBUG_HFCMULTI_SYNC) @@ -2487,10 +2515,10 @@ handle_timer_irq(struct hfc_multi *hc) "in clock sync\n", __func__, hc->id); HFC_outb(hc, R_RX_OFF, - hc->chan[hc->dslot].jitter | V_RX_INIT); + hc->chan[hc->dnum[0]].jitter | V_RX_INIT); HFC_outb(hc, R_TX_OFF, - hc->chan[hc->dslot].jitter | V_RX_INIT); - hc->chan[hc->dslot].sync = 1; + hc->chan[hc->dnum[0]].jitter | V_RX_INIT); + hc->chan[hc->dnum[0]].sync = 1; goto check_framesync; } break; @@ -2501,7 +2529,7 @@ handle_timer_irq(struct hfc_multi *hc) "%s: (id=%d) E1 " "lost clock sync\n", __func__, hc->id); - hc->chan[hc->dslot].sync = 0; + hc->chan[hc->dnum[0]].sync = 0; break; } check_framesync: @@ -2512,7 +2540,7 @@ handle_timer_irq(struct hfc_multi *hc) "%s: (id=%d) E1 " "now in frame sync\n", __func__, hc->id); - hc->chan[hc->dslot].sync = 2; + hc->chan[hc->dnum[0]].sync = 2; } break; case 2: @@ -2522,7 +2550,7 @@ handle_timer_irq(struct hfc_multi *hc) "%s: (id=%d) E1 lost " "clock & frame sync\n", __func__, hc->id); - hc->chan[hc->dslot].sync = 0; + hc->chan[hc->dnum[0]].sync = 0; break; } temp = HFC_inb_nodebug(hc, R_SYNC_STA); @@ -2532,7 +2560,7 @@ handle_timer_irq(struct hfc_multi *hc) "%s: (id=%d) E1 " "lost frame sync\n", __func__, hc->id); - hc->chan[hc->dslot].sync = 1; + hc->chan[hc->dnum[0]].sync = 1; } break; } @@ -2673,7 +2701,7 @@ hfcmulti_interrupt(int intno, void *dev_id) int i; void __iomem *plx_acc; u_short wval; - u_char e1_syncsta, temp; + u_char e1_syncsta, temp, temp2; u_long flags; if (!hc) { @@ -2748,7 +2776,7 @@ hfcmulti_interrupt(int intno, void *dev_id) if (r_irq_misc & V_STA_IRQ) { if (hc->ctype == HFC_TYPE_E1) { /* state machine */ - dch = hc->chan[hc->dslot].dch; + dch = hc->chan[hc->dnum[0]].dch; e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA); if (test_bit(HFC_CHIP_PLXSD, &hc->chip) && hc->e1_getclock) { @@ -2758,23 +2786,26 @@ hfcmulti_interrupt(int intno, void *dev_id) hc->syncronized = 0; } /* undocumented: status changes during read */ - dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA); - while (dch->state != (temp = + temp = HFC_inb_nodebug(hc, R_E1_RD_STA); + while (temp != (temp2 = HFC_inb_nodebug(hc, R_E1_RD_STA))) { if (debug & DEBUG_HFCMULTI_STATE) printk(KERN_DEBUG "%s: reread " "STATE because %d!=%d\n", - __func__, temp, - dch->state); - dch->state = temp; /* repeat */ + __func__, temp, temp2); + temp = temp2; /* repeat */ } - dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA) - & 0x7; - schedule_event(dch, FLG_PHCHANGE); + /* broadcast state change to all fragments */ if (debug & DEBUG_HFCMULTI_STATE) printk(KERN_DEBUG "%s: E1 (id=%d) newstate %x\n", - __func__, hc->id, dch->state); + __func__, hc->id, temp & 0x7); + for (i = 0; i < hc->ports; i++) { + dch = hc->chan[hc->dnum[i]].dch; + dch->state = temp & 0x7; + schedule_event(dch, FLG_PHCHANGE); + } + if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) plxsd_checksync(hc, 0); } @@ -3018,8 +3049,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); HFC_outb(hc, A_SUBCH_CFG, 0); HFC_outb(hc, A_IRQ_MSK, 0); - HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); - HFC_wait(hc); + if (hc->chan[ch].protocol != protocol) { + HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); + HFC_wait(hc); + } HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) + ((ch % 4) * 4) + 1) << 1) | 1); HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1); @@ -3039,8 +3072,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); HFC_outb(hc, A_SUBCH_CFG, 0); HFC_outb(hc, A_IRQ_MSK, 0); - HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); - HFC_wait(hc); + if (hc->chan[ch].protocol != protocol) { + HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); + HFC_wait(hc); + } /* tx silence */ HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + @@ -3059,8 +3094,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, V_HDLC_TRP | V_IFF); HFC_outb(hc, A_SUBCH_CFG, 0); HFC_outb(hc, A_IRQ_MSK, 0); - HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); - HFC_wait(hc); + if (hc->chan[ch].protocol != protocol) { + HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); + HFC_wait(hc); + } /* tx silence */ HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); /* enable RX fifo */ @@ -3075,8 +3112,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, V_HDLC_TRP); HFC_outb(hc, A_SUBCH_CFG, 0); HFC_outb(hc, A_IRQ_MSK, 0); - HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); - HFC_wait(hc); + if (hc->chan[ch].protocol != protocol) { + HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); + HFC_wait(hc); + } } if (hc->ctype != HFC_TYPE_E1) { hc->hw.a_st_ctrl0[hc->chan[ch].port] |= @@ -3433,8 +3472,7 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) struct hfc_multi *hc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); - unsigned int id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: @@ -3443,19 +3481,13 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) spin_lock_irqsave(&hc->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ hfcmulti_tx(hc, bch->slot); ret = 0; /* start fifo */ HFC_outb_nodebug(hc, R_FIFO, 0); HFC_wait_nodebug(hc); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) { - spin_unlock_irqrestore(&hc->lock, flags); - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(&hc->lock, flags); - } else - spin_unlock_irqrestore(&hc->lock, flags); + } + spin_unlock_irqrestore(&hc->lock, flags); return ret; case PH_ACTIVATE_REQ: if (debug & DEBUG_HFCMULTI_MSG) @@ -3545,10 +3577,11 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP - | MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY; + ret = mISDN_ctrl_bchannel(bch, cq); + cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP; break; case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */ + ret = mISDN_ctrl_bchannel(bch, cq); hc->chan[bch->slot].rx_off = !!cq->p1; if (!hc->chan[bch->slot].rx_off) { /* reset fifo on rx on */ @@ -3561,11 +3594,10 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n", __func__, bch->nr, hc->chan[bch->slot].rx_off); break; - case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ - test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); - if (debug & DEBUG_HFCMULTI_MSG) - printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " - "off=%d)\n", __func__, bch->nr, !!cq->p1); + case MISDN_CTRL_FILL_EMPTY: + ret = mISDN_ctrl_bchannel(bch, cq); + hc->silence = bch->fill[0]; + memset(hc->silence_data, hc->silence, sizeof(hc->silence_data)); break; case MISDN_CTRL_HW_FEATURES: /* fill features structure */ if (debug & DEBUG_HFCMULTI_MSG) @@ -3654,9 +3686,7 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) ret = -EINVAL; break; default: - printk(KERN_WARNING "%s: unknown Op %x\n", - __func__, cq->op); - ret = -EINVAL; + ret = mISDN_ctrl_bchannel(bch, cq); break; } return ret; @@ -3676,8 +3706,7 @@ hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) - deactivate_bchannel(bch); /* locked there */ + deactivate_bchannel(bch); /* locked there */ ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -3839,31 +3868,37 @@ hfcmulti_initmode(struct dchannel *dch) if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG "%s: entered\n", __func__); + i = dch->slot; + pt = hc->chan[i].port; if (hc->ctype == HFC_TYPE_E1) { - hc->chan[hc->dslot].slot_tx = -1; - hc->chan[hc->dslot].slot_rx = -1; - hc->chan[hc->dslot].conf = -1; - if (hc->dslot) { - mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol, + /* E1 */ + hc->chan[hc->dnum[pt]].slot_tx = -1; + hc->chan[hc->dnum[pt]].slot_rx = -1; + hc->chan[hc->dnum[pt]].conf = -1; + if (hc->dnum[pt]) { + mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol, -1, 0, -1, 0); dch->timer.function = (void *) hfcmulti_dbusy_timer; dch->timer.data = (long) dch; init_timer(&dch->timer); } for (i = 1; i <= 31; i++) { - if (i == hc->dslot) + if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */ continue; hc->chan[i].slot_tx = -1; hc->chan[i].slot_rx = -1; hc->chan[i].conf = -1; mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0); } - /* E1 */ - if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { + } + if (hc->ctype == HFC_TYPE_E1 && pt == 0) { + /* E1, port 0 */ + dch = hc->chan[hc->dnum[0]].dch; + if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) { HFC_outb(hc, R_LOS0, 255); /* 2 ms */ HFC_outb(hc, R_LOS1, 255); /* 512 ms */ } - if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) { + if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dnum[0]].cfg)) { HFC_outb(hc, R_RX0, 0); hc->hw.r_tx0 = 0 | V_OUT_EN; } else { @@ -3876,12 +3911,12 @@ hfcmulti_initmode(struct dchannel *dch) HFC_outb(hc, R_TX_FR0, 0x00); HFC_outb(hc, R_TX_FR1, 0xf8); - if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) + if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg)) HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E); HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0); - if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) + if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg)) HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC); if (dch->dev.D.protocol == ISDN_P_NT_E1) { @@ -3944,13 +3979,14 @@ hfcmulti_initmode(struct dchannel *dch) hc->syncronized = 0; plxsd_checksync(hc, 0); } - } else { - i = dch->slot; + } + if (hc->ctype != HFC_TYPE_E1) { + /* ST */ hc->chan[i].slot_tx = -1; hc->chan[i].slot_rx = -1; hc->chan[i].conf = -1; mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0); - dch->timer.function = (void *)hfcmulti_dbusy_timer; + dch->timer.function = (void *) hfcmulti_dbusy_timer; dch->timer.data = (long) dch; init_timer(&dch->timer); hc->chan[i - 2].slot_tx = -1; @@ -3961,8 +3997,6 @@ hfcmulti_initmode(struct dchannel *dch) hc->chan[i - 1].slot_rx = -1; hc->chan[i - 1].conf = -1; mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0); - /* ST */ - pt = hc->chan[i].port; /* select interface */ HFC_outb(hc, R_ST_SEL, pt); /* undocumented: delay after R_ST_SEL */ @@ -4054,14 +4088,9 @@ open_dchannel(struct hfc_multi *hc, struct dchannel *dch, hfcmulti_initmode(dch); spin_unlock_irqrestore(&hc->lock, flags); } - - if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) || - ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) || - ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) || - ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) { + if (test_bit(FLG_ACTIVE, &dch->Flags)) _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); - } rq->ch = &dch->dev.D; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); @@ -4091,7 +4120,6 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch, } if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; hc->chan[ch].rx_off = 0; rq->ch = &bch->ch; @@ -4112,7 +4140,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_HFC_OP; + cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */ wd_cnt = cq->p1 & 0xf; @@ -4142,6 +4170,9 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) __func__); HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES); break; + case MISDN_CTRL_L1_TIMER3: + ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); + break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); @@ -4545,6 +4576,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch) } /* free channels */ for (i = 0; i <= 31; i++) { + if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */ + continue; if (hc->chan[i].bch) { if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG @@ -4600,7 +4633,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch) spin_unlock_irqrestore(&hc->lock, flags); if (debug & DEBUG_HFCMULTI_INIT) - printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt); + printk(KERN_DEBUG "%s: free port %d channel D(%d)\n", __func__, + pt+1, ci); mISDN_freedchannel(dch); kfree(dch); @@ -4622,15 +4656,19 @@ release_card(struct hfc_multi *hc) if (hc->iclock) mISDN_unregister_clock(hc->iclock); - /* disable irq */ + /* disable and free irq */ spin_lock_irqsave(&hc->lock, flags); disable_hwirq(hc); spin_unlock_irqrestore(&hc->lock, flags); udelay(1000); + if (hc->irq) { + if (debug & DEBUG_HFCMULTI_INIT) + printk(KERN_DEBUG "%s: free irq %d (hc=%p)\n", + __func__, hc->irq, hc); + free_irq(hc->irq, hc); + hc->irq = 0; - /* dimm leds */ - if (hc->leds) - hfcmulti_leds(hc); + } /* disable D-channels & B-channels */ if (debug & DEBUG_HFCMULTI_INIT) @@ -4641,15 +4679,11 @@ release_card(struct hfc_multi *hc) release_port(hc, hc->chan[ch].dch); } - /* release hardware & irq */ - if (hc->irq) { - if (debug & DEBUG_HFCMULTI_INIT) - printk(KERN_DEBUG "%s: free irq %d\n", - __func__, hc->irq); - free_irq(hc->irq, hc); - hc->irq = 0; + /* dimm leds */ + if (hc->leds) + hfcmulti_leds(hc); - } + /* release hardware */ release_io_hfcmulti(hc); if (debug & DEBUG_HFCMULTI_INIT) @@ -4667,61 +4701,9 @@ release_card(struct hfc_multi *hc) __func__); } -static int -init_e1_port(struct hfc_multi *hc, struct hm_map *m) +static void +init_e1_port_hw(struct hfc_multi *hc, struct hm_map *m) { - struct dchannel *dch; - struct bchannel *bch; - int ch, ret = 0; - char name[MISDN_MAX_IDLEN]; - - dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL); - if (!dch) - return -ENOMEM; - dch->debug = debug; - mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change); - dch->hw = hc; - dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1); - dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | - (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); - dch->dev.D.send = handle_dmsg; - dch->dev.D.ctrl = hfcm_dctrl; - dch->dev.nrbchan = (hc->dslot) ? 30 : 31; - dch->slot = hc->dslot; - hc->chan[hc->dslot].dch = dch; - hc->chan[hc->dslot].port = 0; - hc->chan[hc->dslot].nt_timer = -1; - for (ch = 1; ch <= 31; ch++) { - if (ch == hc->dslot) /* skip dchannel */ - continue; - bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL); - if (!bch) { - printk(KERN_ERR "%s: no memory for bchannel\n", - __func__); - ret = -ENOMEM; - goto free_chan; - } - hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL); - if (!hc->chan[ch].coeff) { - printk(KERN_ERR "%s: no memory for coeffs\n", - __func__); - ret = -ENOMEM; - kfree(bch); - goto free_chan; - } - bch->nr = ch; - bch->slot = ch; - bch->debug = debug; - mISDN_initbchannel(bch, MAX_DATA_MEM); - bch->hw = hc; - bch->ch.send = handle_bmsg; - bch->ch.ctrl = hfcm_bctrl; - bch->ch.nr = ch; - list_add(&bch->ch.list, &dch->dev.bchannels); - hc->chan[ch].bch = bch; - hc->chan[ch].port = 0; - set_channelmap(bch->nr, dch->dev.channelmap); - } /* set optical line type */ if (port[Port_cnt] & 0x001) { if (!m->opticalsupport) { @@ -4737,7 +4719,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_OPTICAL, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } } /* set LOS report */ @@ -4747,7 +4729,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) "LOS report: card(%d) port(%d)\n", __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_REPORT_LOS, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } /* set AIS report */ if (port[Port_cnt] & 0x008) { @@ -4756,7 +4738,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) "AIS report: card(%d) port(%d)\n", __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_REPORT_AIS, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } /* set SLIP report */ if (port[Port_cnt] & 0x010) { @@ -4766,7 +4748,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) "card(%d) port(%d)\n", __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_REPORT_SLIP, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } /* set RDI report */ if (port[Port_cnt] & 0x020) { @@ -4776,7 +4758,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) "card(%d) port(%d)\n", __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_REPORT_RDI, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } /* set CRC-4 Mode */ if (!(port[Port_cnt] & 0x100)) { @@ -4785,7 +4767,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) " card(%d) port(%d)\n", __func__, HFC_cnt + 1, 1); test_and_set_bit(HFC_CFG_CRC4, - &hc->chan[hc->dslot].cfg); + &hc->chan[hc->dnum[0]].cfg); } else { if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG "%s: PORT turn off CRC4" @@ -4817,20 +4799,85 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) } /* set elastic jitter buffer */ if (port[Port_cnt] & 0x3000) { - hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3; + hc->chan[hc->dnum[0]].jitter = (port[Port_cnt]>>12) & 0x3; if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG "%s: PORT set elastic " "buffer to %d: card(%d) port(%d)\n", - __func__, hc->chan[hc->dslot].jitter, + __func__, hc->chan[hc->dnum[0]].jitter, HFC_cnt + 1, 1); } else - hc->chan[hc->dslot].jitter = 2; /* default */ - snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); + hc->chan[hc->dnum[0]].jitter = 2; /* default */ +} + +static int +init_e1_port(struct hfc_multi *hc, struct hm_map *m, int pt) +{ + struct dchannel *dch; + struct bchannel *bch; + int ch, ret = 0; + char name[MISDN_MAX_IDLEN]; + int bcount = 0; + + dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL); + if (!dch) + return -ENOMEM; + dch->debug = debug; + mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change); + dch->hw = hc; + dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1); + dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | + (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); + dch->dev.D.send = handle_dmsg; + dch->dev.D.ctrl = hfcm_dctrl; + dch->slot = hc->dnum[pt]; + hc->chan[hc->dnum[pt]].dch = dch; + hc->chan[hc->dnum[pt]].port = pt; + hc->chan[hc->dnum[pt]].nt_timer = -1; + for (ch = 1; ch <= 31; ch++) { + if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */ + continue; + bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL); + if (!bch) { + printk(KERN_ERR "%s: no memory for bchannel\n", + __func__); + ret = -ENOMEM; + goto free_chan; + } + hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL); + if (!hc->chan[ch].coeff) { + printk(KERN_ERR "%s: no memory for coeffs\n", + __func__); + ret = -ENOMEM; + kfree(bch); + goto free_chan; + } + bch->nr = ch; + bch->slot = ch; + bch->debug = debug; + mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1); + bch->hw = hc; + bch->ch.send = handle_bmsg; + bch->ch.ctrl = hfcm_bctrl; + bch->ch.nr = ch; + list_add(&bch->ch.list, &dch->dev.bchannels); + hc->chan[ch].bch = bch; + hc->chan[ch].port = pt; + set_channelmap(bch->nr, dch->dev.channelmap); + bcount++; + } + dch->dev.nrbchan = bcount; + if (pt == 0) + init_e1_port_hw(hc, m); + if (hc->ports > 1) + snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d-%d", + HFC_cnt + 1, pt+1); + else + snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name); if (ret) goto free_chan; - hc->created[0] = 1; + hc->created[pt] = 1; return ret; free_chan: release_port(hc, dch); @@ -4881,7 +4928,7 @@ init_multi_port(struct hfc_multi *hc, int pt) bch->nr = ch + 1; bch->slot = i + ch; bch->debug = debug; - mISDN_initbchannel(bch, MAX_DATA_MEM); + mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1); bch->hw = hc; bch->ch.send = handle_bmsg; bch->ch.ctrl = hfcm_bctrl; @@ -4963,7 +5010,8 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev, struct hfc_multi *hc; u_long flags; u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */ - int i; + int i, ch; + u_int maskcheck; if (HFC_cnt >= MAX_CARDS) { printk(KERN_ERR "too many cards (max=%d).\n", @@ -4997,18 +5045,36 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev, hc->id = HFC_cnt; hc->pcm = pcm[HFC_cnt]; hc->io_mode = iomode[HFC_cnt]; - if (dslot[HFC_cnt] < 0 && hc->ctype == HFC_TYPE_E1) { - hc->dslot = 0; - printk(KERN_INFO "HFC-E1 card has disabled D-channel, but " - "31 B-channels\n"); - } - if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 - && hc->ctype == HFC_TYPE_E1) { - hc->dslot = dslot[HFC_cnt]; - printk(KERN_INFO "HFC-E1 card has alternating D-channel on " - "time slot %d\n", dslot[HFC_cnt]); - } else - hc->dslot = 16; + if (hc->ctype == HFC_TYPE_E1 && dmask[E1_cnt]) { + /* fragment card */ + pt = 0; + maskcheck = 0; + for (ch = 0; ch <= 31; ch++) { + if (!((1 << ch) & dmask[E1_cnt])) + continue; + hc->dnum[pt] = ch; + hc->bmask[pt] = bmask[bmask_cnt++]; + if ((maskcheck & hc->bmask[pt]) + || (dmask[E1_cnt] & hc->bmask[pt])) { + printk(KERN_INFO + "HFC-E1 #%d has overlapping B-channels on fragment #%d\n", + E1_cnt + 1, pt); + return -EINVAL; + } + maskcheck |= hc->bmask[pt]; + printk(KERN_INFO + "HFC-E1 #%d uses D-channel on slot %d and a B-channel map of 0x%08x\n", + E1_cnt + 1, ch, hc->bmask[pt]); + pt++; + } + hc->ports = pt; + } + if (hc->ctype == HFC_TYPE_E1 && !dmask[E1_cnt]) { + /* default card layout */ + hc->dnum[0] = 16; + hc->bmask[0] = 0xfffefffe; + hc->ports = 1; + } /* set chip specific features */ hc->masterclk = -1; @@ -5091,23 +5157,33 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev, goto free_card; } if (hc->ctype == HFC_TYPE_E1) - ret_err = init_e1_port(hc, m); + ret_err = init_e1_port(hc, m, pt); else ret_err = init_multi_port(hc, pt); if (debug & DEBUG_HFCMULTI_INIT) printk(KERN_DEBUG - "%s: Registering D-channel, card(%d) port(%d)" + "%s: Registering D-channel, card(%d) port(%d) " "result %d\n", - __func__, HFC_cnt + 1, pt, ret_err); + __func__, HFC_cnt + 1, pt + 1, ret_err); if (ret_err) { while (pt) { /* release already registered ports */ pt--; - release_port(hc, hc->chan[(pt << 2) + 2].dch); + if (hc->ctype == HFC_TYPE_E1) + release_port(hc, + hc->chan[hc->dnum[pt]].dch); + else + release_port(hc, + hc->chan[(pt << 2) + 2].dch); } goto free_card; } - Port_cnt++; + if (hc->ctype != HFC_TYPE_E1) + Port_cnt++; /* for each S0 port */ + } + if (hc->ctype == HFC_TYPE_E1) { + Port_cnt++; /* for each E1 port */ + E1_cnt++; } /* disp switches */ diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index e2c83a2d769..81363ffa535 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -453,7 +453,7 @@ hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz, } bz->za[new_f2].z2 = cpu_to_le16(new_z2); bz->f2 = new_f2; /* next buffer */ - recv_Bchannel(bch, MISDN_ID_ANY); + recv_Bchannel(bch, MISDN_ID_ANY, false); } } @@ -565,11 +565,6 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz, if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ - if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */ - *z2r = cpu_to_le16(new_z2); /* new position */ - return; - } - fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); if (fcnt_tx <= 0) fcnt_tx += B_FIFO_SIZE; @@ -577,8 +572,16 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz, fcnt_tx = B_FIFO_SIZE - fcnt_tx; /* remaining bytes to send (bytes in tx-fifo) */ - bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC); - if (bch->rx_skb) { + if (test_bit(FLG_RX_OFF, &bch->Flags)) { + bch->dropcnt += fcnt_rx; + *z2r = cpu_to_le16(new_z2); + return; + } + maxlen = bchannel_get_rxbuf(bch, fcnt_rx); + if (maxlen < 0) { + pr_warning("B%d: No bufferspace for %d bytes\n", + bch->nr, fcnt_rx); + } else { ptr = skb_put(bch->rx_skb, fcnt_rx); if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL) maxlen = fcnt_rx; /* complete transfer */ @@ -596,10 +599,8 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz, ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, fcnt_rx); /* rest */ } - recv_Bchannel(bch, fcnt_tx); /* bch, id */ - } else - printk(KERN_WARNING "HFCPCI: receive out of memory\n"); - + recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */ + } *z2r = cpu_to_le16(new_z2); /* new position */ } @@ -760,9 +761,14 @@ hfcpci_fill_fifo(struct bchannel *bch) if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) printk(KERN_DEBUG "%s\n", __func__); - if ((!bch->tx_skb) || bch->tx_skb->len <= 0) - return; - count = bch->tx_skb->len - bch->tx_idx; + if ((!bch->tx_skb) || bch->tx_skb->len == 0) { + if (!test_bit(FLG_FILLEMPTY, &bch->Flags) && + !test_bit(FLG_TRANSPARENT, &bch->Flags)) + return; + count = HFCPCI_FILLEMPTY; + } else { + count = bch->tx_skb->len - bch->tx_idx; + } if ((bch->nr & 2) && (!hc->hw.bswapped)) { bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2; @@ -781,16 +787,10 @@ hfcpci_fill_fifo(struct bchannel *bch) fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); if (fcnt <= 0) fcnt += B_FIFO_SIZE; - /* fcnt contains available bytes in fifo */ - fcnt = B_FIFO_SIZE - fcnt; - /* remaining bytes to send (bytes in fifo) */ - - /* "fill fifo if empty" feature */ - if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) { - /* printk(KERN_DEBUG "%s: buffer empty, so we have " - "underrun\n", __func__); */ - /* fill buffer, to prevent future underrun */ - count = HFCPCI_FILLEMPTY; + if (test_bit(FLG_FILLEMPTY, &bch->Flags)) { + /* fcnt contains available bytes in fifo */ + if (count > fcnt) + count = fcnt; new_z1 = le16_to_cpu(*z1t) + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) @@ -802,17 +802,20 @@ hfcpci_fill_fifo(struct bchannel *bch) printk(KERN_DEBUG "hfcpci_FFt fillempty " "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n", fcnt, maxlen, new_z1, dst); - fcnt += count; if (maxlen > count) maxlen = count; /* limit size */ - memset(dst, 0x2a, maxlen); /* first copy */ + memset(dst, bch->fill[0], maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ - memset(dst, 0x2a, count); + memset(dst, bch->fill[0], count); } *z1t = cpu_to_le16(new_z1); /* now send data */ + return; } + /* fcnt contains available bytes in fifo */ + fcnt = B_FIFO_SIZE - fcnt; + /* remaining bytes to send (bytes in fifo) */ next_t_frame: count = bch->tx_skb->len - bch->tx_idx; @@ -849,9 +852,6 @@ hfcpci_fill_fifo(struct bchannel *bch) *z1t = cpu_to_le16(new_z1); /* now send data */ if (bch->tx_idx < bch->tx_skb->len) return; - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &bch->Flags)) - confirm_Bsend(bch); dev_kfree_skb(bch->tx_skb); if (get_next_bframe(bch)) goto next_t_frame; @@ -1533,24 +1533,7 @@ deactivate_bchannel(struct bchannel *bch) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_FILL_EMPTY; - break; - case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ - test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); - if (debug & DEBUG_HW_OPEN) - printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " - "off=%d)\n", __func__, bch->nr, !!cq->p1); - break; - default: - printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } static int hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) @@ -1581,8 +1564,7 @@ hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) break; case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) - deactivate_bchannel(bch); + deactivate_bchannel(bch); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -1692,22 +1674,17 @@ hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) struct hfc_pci *hc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); - unsigned int id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&hc->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ hfcpci_fill_fifo(bch); ret = 0; - spin_unlock_irqrestore(&hc->lock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(&hc->lock, flags); + } + spin_unlock_irqrestore(&hc->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&hc->lock, flags); @@ -1819,7 +1796,7 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | - MISDN_CTRL_DISCONNECT; + MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_LOOP: /* channel 0 disabled loop */ @@ -1896,6 +1873,9 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); hc->hw.trm &= 0x7f; /* disable IOM-loop */ break; + case MISDN_CTRL_L1_TIMER3: + ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); + break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); @@ -1969,7 +1949,6 @@ open_bchannel(struct hfc_pci *hc, struct channel_req *rq) bch = &hc->bch[rq->adr.channel - 1]; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; /* TODO: E-channel */ if (!try_module_get(THIS_MODULE)) @@ -2121,7 +2100,7 @@ setup_card(struct hfc_pci *card) card->bch[i].nr = i + 1; set_channelmap(i + 1, card->dch.dev.channelmap); card->bch[i].debug = debug; - mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); + mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1); card->bch[i].hw = card; card->bch[i].ch.send = hfcpci_l2l1B; card->bch[i].ch.ctrl = hfc_bctrl; diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 8cde2a0538a..83206e453d4 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -226,19 +226,12 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n", hw->name, __func__, ret); - if (ret > 0) { - /* - * other l1 drivers don't send early confirms on - * transp data, but hfcsusb does because tx_next - * skb is needed in tx_iso_complete() - */ - queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL); + if (ret > 0) ret = 0; - } return ret; case PH_ACTIVATE_REQ: if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) { - hfcsusb_start_endpoint(hw, bch->nr); + hfcsusb_start_endpoint(hw, bch->nr - 1); ret = hfcsusb_setup_bch(bch, ch->protocol); } else ret = 0; @@ -498,16 +491,9 @@ open_bchannel(struct hfcsusb *hw, struct channel_req *rq) bch = &hw->bch[rq->adr.channel - 1]; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; - /* start USB endpoint for bchannel */ - if (rq->adr.channel == 1) - hfcsusb_start_endpoint(hw, HFC_CHAN_B1); - else - hfcsusb_start_endpoint(hw, HFC_CHAN_B2); - if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s: %s:cannot get module\n", hw->name, __func__); @@ -819,24 +805,7 @@ hfcsusb_ph_command(struct hfcsusb *hw, u_char command) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_FILL_EMPTY; - break; - case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ - test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); - if (debug & DEBUG_HW_OPEN) - printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " - "off=%d)\n", __func__, bch->nr, !!cq->p1); - break; - default: - printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } /* collect data from incoming interrupt or isochron USB data */ @@ -873,7 +842,21 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, hdlc = 1; } if (fifo->bch) { + if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) { + fifo->bch->dropcnt += len; + spin_unlock(&hw->lock); + return; + } + maxlen = bchannel_get_rxbuf(fifo->bch, len); rx_skb = fifo->bch->rx_skb; + if (maxlen < 0) { + if (rx_skb) + skb_trim(rx_skb, 0); + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + hw->name, fifo->bch->nr, len); + spin_unlock(&hw->lock); + return; + } maxlen = fifo->bch->maxlen; hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); } @@ -883,25 +866,22 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, hdlc = 1; } - if (!rx_skb) { - rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC); - if (rx_skb) { - if (fifo->dch) - fifo->dch->rx_skb = rx_skb; - if (fifo->bch) - fifo->bch->rx_skb = rx_skb; - if (fifo->ech) - fifo->ech->rx_skb = rx_skb; - skb_trim(rx_skb, 0); - } else { - printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", - hw->name, __func__); - spin_unlock(&hw->lock); - return; - } - } - if (fifo->dch || fifo->ech) { + if (!rx_skb) { + rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC); + if (rx_skb) { + if (fifo->dch) + fifo->dch->rx_skb = rx_skb; + if (fifo->ech) + fifo->ech->rx_skb = rx_skb; + skb_trim(rx_skb, 0); + } else { + printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", + hw->name, __func__); + spin_unlock(&hw->lock); + return; + } + } /* D/E-Channel SKB range check */ if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) { printk(KERN_DEBUG "%s: %s: sbk mem exceeded " @@ -911,16 +891,6 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, spin_unlock(&hw->lock); return; } - } else if (fifo->bch) { - /* B-Channel SKB range check */ - if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) { - printk(KERN_DEBUG "%s: %s: sbk mem exceeded " - "for fifo(%d) HFCUSB_B_RX\n", - hw->name, __func__, fifon); - skb_trim(rx_skb, 0); - spin_unlock(&hw->lock); - return; - } } memcpy(skb_put(rx_skb, len), data, len); @@ -948,7 +918,8 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, if (fifo->dch) recv_Dchannel(fifo->dch); if (fifo->bch) - recv_Bchannel(fifo->bch, MISDN_ID_ANY); + recv_Bchannel(fifo->bch, MISDN_ID_ANY, + 0); if (fifo->ech) recv_Echannel(fifo->ech, &hw->dch); @@ -969,8 +940,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, } } else { /* deliver transparent data to layer2 */ - if (rx_skb->len >= poll) - recv_Bchannel(fifo->bch, MISDN_ID_ANY); + recv_Bchannel(fifo->bch, MISDN_ID_ANY, false); } spin_unlock(&hw->lock); } @@ -1200,8 +1170,8 @@ tx_iso_complete(struct urb *urb) int k, tx_offset, num_isoc_packets, sink, remain, current_len, errcode, hdlc, i; int *tx_idx; - int frame_complete, fifon, status; - __u8 threshbit; + int frame_complete, fifon, status, fillempty = 0; + __u8 threshbit, *p; spin_lock(&hw->lock); if (fifo->stop_gracefull) { @@ -1219,6 +1189,9 @@ tx_iso_complete(struct urb *urb) tx_skb = fifo->bch->tx_skb; tx_idx = &fifo->bch->tx_idx; hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); + if (!tx_skb && !hdlc && + test_bit(FLG_FILLEMPTY, &fifo->bch->Flags)) + fillempty = 1; } else { printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", hw->name, __func__); @@ -1277,6 +1250,8 @@ tx_iso_complete(struct urb *urb) /* Generate next ISO Packets */ if (tx_skb) remain = tx_skb->len - *tx_idx; + else if (fillempty) + remain = 15; /* > not complete */ else remain = 0; @@ -1307,15 +1282,20 @@ tx_iso_complete(struct urb *urb) } /* copy tx data to iso-urb buffer */ - memcpy(context_iso_urb->buffer + tx_offset + 1, - (tx_skb->data + *tx_idx), current_len); - *tx_idx += current_len; - + p = context_iso_urb->buffer + tx_offset + 1; + if (fillempty) { + memset(p, fifo->bch->fill[0], + current_len); + } else { + memcpy(p, (tx_skb->data + *tx_idx), + current_len); + *tx_idx += current_len; + } urb->iso_frame_desc[k].offset = tx_offset; urb->iso_frame_desc[k].length = current_len + 1; /* USB data log for every D ISO out */ - if ((fifon == HFCUSB_D_RX) && + if ((fifon == HFCUSB_D_RX) && !fillempty && (debug & DBG_HFC_USB_VERBOSE)) { printk(KERN_DEBUG "%s: %s (%d/%d) offs(%d) len(%d) ", @@ -1365,12 +1345,8 @@ tx_iso_complete(struct urb *urb) if (fifo->dch && get_next_dframe(fifo->dch)) tx_skb = fifo->dch->tx_skb; else if (fifo->bch && - get_next_bframe(fifo->bch)) { - if (test_bit(FLG_TRANSPARENT, - &fifo->bch->Flags)) - confirm_Bsend(fifo->bch); + get_next_bframe(fifo->bch)) tx_skb = fifo->bch->tx_skb; - } } } errcode = usb_submit_urb(urb, GFP_ATOMIC); @@ -1812,7 +1788,7 @@ deactivate_bchannel(struct bchannel *bch) mISDN_clear_bchannel(bch); spin_unlock_irqrestore(&hw->lock, flags); hfcsusb_setup_bch(bch, ISDN_P_NONE); - hfcsusb_stop_endpoint(hw, bch->nr); + hfcsusb_stop_endpoint(hw, bch->nr - 1); } /* @@ -1836,8 +1812,7 @@ hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) - deactivate_bchannel(bch); + deactivate_bchannel(bch); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -1883,7 +1858,7 @@ setup_instance(struct hfcsusb *hw, struct device *parent) hw->bch[i].nr = i + 1; set_channelmap(i + 1, hw->dch.dev.channelmap); hw->bch[i].debug = debug; - mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM); + mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM, poll >> 1); hw->bch[i].hw = hw; hw->bch[i].ch.send = hfcusb_l2l1B; hw->bch[i].ch.ctrl = hfc_bctrl; diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index 884369f09ca..752e0825591 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -603,10 +603,11 @@ isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb) } static int -isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para) +isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para) { u8 tl = 0; - u_long flags; + unsigned long flags; + int ret = 0; switch (cmd) { case HW_TESTLOOP: @@ -626,12 +627,15 @@ isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para) } spin_unlock_irqrestore(isac->hwlock, flags); break; + case HW_TIMER3_VALUE: + ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff)); + break; default: pr_debug("%s: %s unknown command %x %lx\n", isac->name, __func__, cmd, para); - return -1; + ret = -1; } - return 0; + return ret; } static int @@ -929,22 +933,21 @@ static void hscx_empty_fifo(struct hscx_hw *hscx, u8 count) { u8 *p; + int maxlen; pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count); - if (!hscx->bch.rx_skb) { - hscx->bch.rx_skb = mI_alloc_skb(hscx->bch.maxlen, GFP_ATOMIC); - if (!hscx->bch.rx_skb) { - pr_info("%s: B receive out of memory\n", - hscx->ip->name); - hscx_cmdr(hscx, 0x80); /* RMC */ - return; - } + if (test_bit(FLG_RX_OFF, &hscx->bch.Flags)) { + hscx->bch.dropcnt += count; + hscx_cmdr(hscx, 0x80); /* RMC */ + return; } - if ((hscx->bch.rx_skb->len + count) > hscx->bch.maxlen) { - pr_debug("%s: overrun %d\n", hscx->ip->name, - hscx->bch.rx_skb->len + count); - skb_trim(hscx->bch.rx_skb, 0); + maxlen = bchannel_get_rxbuf(&hscx->bch, count); + if (maxlen < 0) { hscx_cmdr(hscx, 0x80); /* RMC */ + if (hscx->bch.rx_skb) + skb_trim(hscx->bch.rx_skb, 0); + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + hscx->ip->name, hscx->bch.nr, count); return; } p = skb_put(hscx->bch.rx_skb, count); @@ -971,22 +974,28 @@ hscx_fill_fifo(struct hscx_hw *hscx) int count, more; u8 *p; - if (!hscx->bch.tx_skb) - return; - count = hscx->bch.tx_skb->len - hscx->bch.tx_idx; - if (count <= 0) - return; - p = hscx->bch.tx_skb->data + hscx->bch.tx_idx; - - more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0; - if (count > hscx->fifo_size) { + if (!hscx->bch.tx_skb) { + if (!test_bit(FLG_TX_EMPTY, &hscx->bch.Flags)) + return; count = hscx->fifo_size; more = 1; - } - pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr, count, - hscx->bch.tx_idx, hscx->bch.tx_skb->len); - hscx->bch.tx_idx += count; + p = hscx->log; + memset(p, hscx->bch.fill[0], count); + } else { + count = hscx->bch.tx_skb->len - hscx->bch.tx_idx; + if (count <= 0) + return; + p = hscx->bch.tx_skb->data + hscx->bch.tx_idx; + more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0; + if (count > hscx->fifo_size) { + count = hscx->fifo_size; + more = 1; + } + pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr, + count, hscx->bch.tx_idx, hscx->bch.tx_skb->len); + hscx->bch.tx_idx += count; + } if (hscx->ip->type & IPAC_TYPE_IPACX) hscx->ip->write_fifo(hscx->ip->hw, hscx->off + IPACX_XFIFOB, p, count); @@ -997,7 +1006,7 @@ hscx_fill_fifo(struct hscx_hw *hscx) } hscx_cmdr(hscx, more ? 0x08 : 0x0a); - if (hscx->bch.debug & DEBUG_HW_BFIFO) { + if (hscx->bch.tx_skb && (hscx->bch.debug & DEBUG_HW_BFIFO)) { snprintf(hscx->log, 64, "B%1d-send %s %d ", hscx->bch.nr, hscx->ip->name, count); print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count); @@ -1007,17 +1016,17 @@ hscx_fill_fifo(struct hscx_hw *hscx) static void hscx_xpr(struct hscx_hw *hx) { - if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) + if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) { hscx_fill_fifo(hx); - else { - if (hx->bch.tx_skb) { - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) - confirm_Bsend(&hx->bch); + } else { + if (hx->bch.tx_skb) dev_kfree_skb(hx->bch.tx_skb); - } - if (get_next_bframe(&hx->bch)) + if (get_next_bframe(&hx->bch)) { + hscx_fill_fifo(hx); + test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags); + } else if (test_bit(FLG_TX_EMPTY, &hx->bch.Flags)) { hscx_fill_fifo(hx); + } } } @@ -1069,7 +1078,7 @@ ipac_rme(struct hscx_hw *hx) skb_trim(hx->bch.rx_skb, 0); } else { skb_trim(hx->bch.rx_skb, hx->bch.rx_skb->len - 1); - recv_Bchannel(&hx->bch, 0); + recv_Bchannel(&hx->bch, 0, false); } } @@ -1120,11 +1129,8 @@ ipac_irq(struct hscx_hw *hx, u8 ista) if (istab & IPACX_B_RPF) { hscx_empty_fifo(hx, hx->fifo_size); - if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) { - /* receive transparent audio data */ - if (hx->bch.rx_skb) - recv_Bchannel(&hx->bch, 0); - } + if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) + recv_Bchannel(&hx->bch, 0, false); } if (istab & IPACX_B_RFO) { @@ -1137,7 +1143,9 @@ ipac_irq(struct hscx_hw *hx, u8 ista) if (istab & IPACX_B_XDU) { if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) { - hscx_fill_fifo(hx); + if (test_bit(FLG_FILLEMPTY, &hx->bch.Flags)) + test_and_set_bit(FLG_TX_EMPTY, &hx->bch.Flags); + hscx_xpr(hx); return; } pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name, @@ -1338,22 +1346,17 @@ hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch); int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); - u32 id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(hx->ip->hwlock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ ret = 0; hscx_fill_fifo(hx); - spin_unlock_irqrestore(hx->ip->hwlock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(hx->ip->hwlock, flags); + } + spin_unlock_irqrestore(hx->ip->hwlock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(hx->ip->hwlock, flags); @@ -1388,20 +1391,7 @@ hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = 0; - break; - /* Nothing implemented yet */ - case MISDN_CTRL_FILL_EMPTY: - default: - pr_info("%s: unknown Op %x\n", __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } static int @@ -1416,15 +1406,10 @@ hscx_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) { - spin_lock_irqsave(hx->ip->hwlock, flags); - mISDN_freebchannel(bch); - hscx_mode(hx, ISDN_P_NONE); - spin_unlock_irqrestore(hx->ip->hwlock, flags); - } else { - skb_queue_purge(&bch->rqueue); - bch->rcount = 0; - } + spin_lock_irqsave(hx->ip->hwlock, flags); + mISDN_freebchannel(bch); + hscx_mode(hx, ISDN_P_NONE); + spin_unlock_irqrestore(hx->ip->hwlock, flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(hx->ip->owner); @@ -1526,7 +1511,7 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_LOOP; + cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_LOOP: /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ @@ -1536,6 +1521,9 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq) } ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); break; + case MISDN_CTRL_L1_TIMER3: + ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1); + break; default: pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); ret = -EINVAL; @@ -1621,7 +1609,8 @@ mISDNipac_init(struct ipac_hw *ipac, void *hw) set_channelmap(i + 1, ipac->isac.dch.dev.channelmap); list_add(&ipac->hscx[i].bch.ch.list, &ipac->isac.dch.dev.bchannels); - mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM); + mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM, + ipac->hscx[i].fifo_size); ipac->hscx[i].bch.ch.nr = i + 1; ipac->hscx[i].bch.ch.send = &hscx_l2l1; ipac->hscx[i].bch.ch.ctrl = hscx_bctrl; diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index 9a6da6edcfa..be5973ded6d 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -421,13 +421,19 @@ deliver_status(struct isar_ch *ch, int status) static inline void isar_rcv_frame(struct isar_ch *ch) { - u8 *ptr; + u8 *ptr; + int maxlen; if (!ch->is->clsb) { pr_debug("%s; ISAR zero len frame\n", ch->is->name); ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); return; } + if (test_bit(FLG_RX_OFF, &ch->bch.Flags)) { + ch->bch.dropcnt += ch->is->clsb; + ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); + return; + } switch (ch->bch.state) { case ISDN_P_NONE: pr_debug("%s: ISAR protocol 0 spurious IIS_RDATA %x/%x/%x\n", @@ -437,36 +443,22 @@ isar_rcv_frame(struct isar_ch *ch) case ISDN_P_B_RAW: case ISDN_P_B_L2DTMF: case ISDN_P_B_MODEM_ASYNC: - if (!ch->bch.rx_skb) { - ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen, - GFP_ATOMIC); - if (unlikely(!ch->bch.rx_skb)) { - pr_info("%s: B receive out of memory\n", - ch->is->name); - ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); - break; - } + maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb); + if (maxlen < 0) { + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + ch->is->name, ch->bch.nr, ch->is->clsb); + ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); + break; } rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb)); - recv_Bchannel(&ch->bch, 0); + recv_Bchannel(&ch->bch, 0, false); break; case ISDN_P_B_HDLC: - if (!ch->bch.rx_skb) { - ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen, - GFP_ATOMIC); - if (unlikely(!ch->bch.rx_skb)) { - pr_info("%s: B receive out of memory\n", - ch->is->name); - ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); - break; - } - } - if ((ch->bch.rx_skb->len + ch->is->clsb) > - (ch->bch.maxlen + 2)) { - pr_debug("%s: incoming packet too large\n", - ch->is->name); + maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb); + if (maxlen < 0) { + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + ch->is->name, ch->bch.nr, ch->is->clsb); ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); - skb_trim(ch->bch.rx_skb, 0); break; } if (ch->is->cmsb & HDLC_ERROR) { @@ -494,7 +486,7 @@ isar_rcv_frame(struct isar_ch *ch) break; } skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); - recv_Bchannel(&ch->bch, 0); + recv_Bchannel(&ch->bch, 0, false); } break; case ISDN_P_B_T30_FAX: @@ -530,7 +522,7 @@ isar_rcv_frame(struct isar_ch *ch) ch->state = STFAX_ESCAPE; /* set_skb_flag(skb, DF_NOMOREDATA); */ } - recv_Bchannel(&ch->bch, 0); + recv_Bchannel(&ch->bch, 0, false); if (ch->is->cmsb & SART_NMD) deliver_status(ch, HW_MOD_NOCARR); break; @@ -570,7 +562,7 @@ isar_rcv_frame(struct isar_ch *ch) break; } skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); - recv_Bchannel(&ch->bch, 0); + recv_Bchannel(&ch->bch, 0, false); } if (ch->is->cmsb & SART_NMD) { /* ABORT */ pr_debug("%s: isar_rcv_frame: no more data\n", @@ -598,16 +590,25 @@ isar_fill_fifo(struct isar_ch *ch) u8 msb; u8 *ptr; - pr_debug("%s: ch%d tx_skb %p tx_idx %d\n", - ch->is->name, ch->bch.nr, ch->bch.tx_skb, ch->bch.tx_idx); - if (!ch->bch.tx_skb) + pr_debug("%s: ch%d tx_skb %d tx_idx %d\n", ch->is->name, ch->bch.nr, + ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, ch->bch.tx_idx); + if (!(ch->is->bstat & + (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2))) + return; + if (!ch->bch.tx_skb) { + if (!test_bit(FLG_TX_EMPTY, &ch->bch.Flags) || + (ch->bch.state != ISDN_P_B_RAW)) + return; + count = ch->mml; + /* use the card buffer */ + memset(ch->is->buf, ch->bch.fill[0], count); + send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, + 0, count, ch->is->buf); return; + } count = ch->bch.tx_skb->len - ch->bch.tx_idx; if (count <= 0) return; - if (!(ch->is->bstat & - (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2))) - return; if (count > ch->mml) { msb = 0; count = ch->mml; @@ -686,9 +687,9 @@ sel_bch_isar(struct isar_hw *isar, u8 dpath) static void send_next(struct isar_ch *ch) { - pr_debug("%s: %s ch%d tx_skb %p tx_idx %d\n", - ch->is->name, __func__, ch->bch.nr, - ch->bch.tx_skb, ch->bch.tx_idx); + pr_debug("%s: %s ch%d tx_skb %d tx_idx %d\n", ch->is->name, __func__, + ch->bch.nr, ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, + ch->bch.tx_idx); if (ch->bch.state == ISDN_P_B_T30_FAX) { if (ch->cmd == PCTRL_CMD_FTH) { if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) { @@ -702,15 +703,14 @@ send_next(struct isar_ch *ch) } } } - if (ch->bch.tx_skb) { - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &ch->bch.Flags)) - confirm_Bsend(&ch->bch); + if (ch->bch.tx_skb) dev_kfree_skb(ch->bch.tx_skb); - } - if (get_next_bframe(&ch->bch)) + if (get_next_bframe(&ch->bch)) { isar_fill_fifo(ch); - else { + test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags); + } else if (test_bit(FLG_TX_EMPTY, &ch->bch.Flags)) { + isar_fill_fifo(ch); + } else { if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) { if (test_and_clear_bit(FLG_LASTDATA, &ch->bch.Flags)) { @@ -724,6 +724,8 @@ send_next(struct isar_ch *ch) } else { deliver_status(ch, HW_MOD_CONNECT); } + } else if (test_bit(FLG_FILLEMPTY, &ch->bch.Flags)) { + test_and_set_bit(FLG_TX_EMPTY, &ch->bch.Flags); } } } @@ -1487,14 +1489,10 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) spin_lock_irqsave(ich->is->hwlock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ ret = 0; isar_fill_fifo(ich); - spin_unlock_irqrestore(ich->is->hwlock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(ich->is->hwlock, flags); + } + spin_unlock_irqrestore(ich->is->hwlock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(ich->is->hwlock, flags); @@ -1575,20 +1573,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = 0; - break; - /* Nothing implemented yet */ - case MISDN_CTRL_FILL_EMPTY: - default: - pr_info("%s: unknown Op %x\n", __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } static int @@ -1603,15 +1588,10 @@ isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) { - spin_lock_irqsave(ich->is->hwlock, flags); - mISDN_freebchannel(bch); - modeisar(ich, ISDN_P_NONE); - spin_unlock_irqrestore(ich->is->hwlock, flags); - } else { - skb_queue_purge(&bch->rqueue); - bch->rcount = 0; - } + spin_lock_irqsave(ich->is->hwlock, flags); + mISDN_freebchannel(bch); + modeisar(ich, ISDN_P_NONE); + spin_unlock_irqrestore(ich->is->hwlock, flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(ich->is->owner); @@ -1677,7 +1657,6 @@ isar_open(struct isar_hw *isar, struct channel_req *rq) bch = &isar->ch[rq->adr.channel - 1].bch; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; return 0; @@ -1691,7 +1670,7 @@ mISDNisar_init(struct isar_hw *isar, void *hw) isar->hw = hw; for (i = 0; i < 2; i++) { isar->ch[i].bch.nr = i + 1; - mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM); + mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM, 32); isar->ch[i].bch.ch.nr = i + 1; isar->ch[i].bch.ch.send = &isar_l2l1; isar->ch[i].bch.ch.ctrl = isar_bctrl; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index c726e09d098..c3e3e768627 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -386,24 +386,20 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt) bc->bch.nr, idx); } bc->lastrx = idx; - if (!bc->bch.rx_skb) { - bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC); - if (!bc->bch.rx_skb) { - pr_info("%s: B%1d receive out of memory\n", - card->name, bc->bch.nr); - return; - } + if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) { + bc->bch.dropcnt += cnt; + return; } - - if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) { - if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) { - pr_debug("%s: B%1d overrun %d\n", card->name, - bc->bch.nr, bc->bch.rx_skb->len + cnt); - skb_trim(bc->bch.rx_skb, 0); - return; - } + stat = bchannel_get_rxbuf(&bc->bch, cnt); + /* only transparent use the count here, HDLC overun is detected later */ + if (stat == ENOMEM) { + pr_warning("%s.B%d: No memory for %d bytes\n", + card->name, bc->bch.nr, cnt); + return; + } + if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) p = skb_put(bc->bch.rx_skb, cnt); - } else + else p = bc->hrbuf; for (i = 0; i < cnt; i++) { @@ -414,48 +410,45 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt) idx = 0; p[i] = val & 0xff; } + + if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) { + recv_Bchannel(&bc->bch, 0, false); + return; + } + pn = bc->hrbuf; -next_frame: - if (test_bit(FLG_HDLC, &bc->bch.Flags)) { + while (cnt > 0) { stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i, bc->bch.rx_skb->data, bc->bch.maxlen); - if (stat > 0) /* valid frame received */ + if (stat > 0) { /* valid frame received */ p = skb_put(bc->bch.rx_skb, stat); - else if (stat == -HDLC_CRC_ERROR) + if (debug & DEBUG_HW_BFIFO) { + snprintf(card->log, LOG_SIZE, + "B%1d-recv %s %d ", bc->bch.nr, + card->name, stat); + print_hex_dump_bytes(card->log, + DUMP_PREFIX_OFFSET, p, + stat); + } + recv_Bchannel(&bc->bch, 0, false); + stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen); + if (stat < 0) { + pr_warning("%s.B%d: No memory for %d bytes\n", + card->name, bc->bch.nr, cnt); + return; + } + } else if (stat == -HDLC_CRC_ERROR) { pr_info("%s: B%1d receive frame CRC error\n", card->name, bc->bch.nr); - else if (stat == -HDLC_FRAMING_ERROR) + } else if (stat == -HDLC_FRAMING_ERROR) { pr_info("%s: B%1d receive framing error\n", card->name, bc->bch.nr); - else if (stat == -HDLC_LENGTH_ERROR) + } else if (stat == -HDLC_LENGTH_ERROR) { pr_info("%s: B%1d receive frame too long (> %d)\n", card->name, bc->bch.nr, bc->bch.maxlen); - } else - stat = cnt; - - if (stat > 0) { - if (debug & DEBUG_HW_BFIFO) { - snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ", - bc->bch.nr, card->name, stat); - print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, - p, stat); } - recv_Bchannel(&bc->bch, 0); - } - if (test_bit(FLG_HDLC, &bc->bch.Flags)) { pn += i; cnt -= i; - if (!bc->bch.rx_skb) { - bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, - GFP_ATOMIC); - if (!bc->bch.rx_skb) { - pr_info("%s: B%1d receive out of memory\n", - card->name, bc->bch.nr); - return; - } - } - if (cnt > 0) - goto next_frame; } } @@ -544,22 +537,31 @@ static void fill_dma(struct tiger_ch *bc) { struct tiger_hw *card = bc->bch.hw; - int count, i; - u32 m, v; + int count, i, fillempty = 0; + u32 m, v, n = 0; u8 *p; if (bc->free == 0) return; - count = bc->bch.tx_skb->len - bc->bch.tx_idx; - if (count <= 0) - return; - pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name, - __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx, - bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx); + if (!bc->bch.tx_skb) { + if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) + return; + fillempty = 1; + count = card->send.size >> 1; + p = bc->bch.fill; + } else { + count = bc->bch.tx_skb->len - bc->bch.tx_idx; + if (count <= 0) + return; + pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", + card->name, __func__, bc->bch.nr, count, bc->free, + bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate, + bc->idx, card->send.idx); + p = bc->bch.tx_skb->data + bc->bch.tx_idx; + } if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN)) resync(bc, card); - p = bc->bch.tx_skb->data + bc->bch.tx_idx; - if (test_bit(FLG_HDLC, &bc->bch.Flags)) { + if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) { count = isdnhdlc_encode(&bc->hsend, p, count, &i, bc->hsbuf, bc->free); pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name, @@ -570,17 +572,33 @@ fill_dma(struct tiger_ch *bc) } else { if (count > bc->free) count = bc->free; - bc->bch.tx_idx += count; + if (!fillempty) + bc->bch.tx_idx += count; bc->free -= count; } m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff; - for (i = 0; i < count; i++) { - if (bc->idx >= card->send.size) - bc->idx = 0; - v = card->send.start[bc->idx]; - v &= m; - v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8; - card->send.start[bc->idx++] = v; + if (fillempty) { + n = p[0]; + if (!(bc->bch.nr & 1)) + n <<= 8; + for (i = 0; i < count; i++) { + if (bc->idx >= card->send.size) + bc->idx = 0; + v = card->send.start[bc->idx]; + v &= m; + v |= n; + card->send.start[bc->idx++] = v; + } + } else { + for (i = 0; i < count; i++) { + if (bc->idx >= card->send.size) + bc->idx = 0; + v = card->send.start[bc->idx]; + v &= m; + n = p[i]; + v |= (bc->bch.nr & 1) ? n : n << 8; + card->send.start[bc->idx++] = v; + } } if (debug & DEBUG_HW_BFIFO) { snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ", @@ -595,21 +613,26 @@ fill_dma(struct tiger_ch *bc) static int bc_next_frame(struct tiger_ch *bc) { - if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) + int ret = 1; + + if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) { fill_dma(bc); - else { - if (bc->bch.tx_skb) { - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) - confirm_Bsend(&bc->bch); + } else { + if (bc->bch.tx_skb) dev_kfree_skb(bc->bch.tx_skb); - } - if (get_next_bframe(&bc->bch)) + if (get_next_bframe(&bc->bch)) { fill_dma(bc); - else - return 0; + test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags); + } else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) { + fill_dma(bc); + } else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) { + test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags); + ret = 0; + } else { + ret = 0; + } } - return 1; + return ret; } static void @@ -732,22 +755,17 @@ nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch); struct tiger_hw *card = bch->hw; struct mISDNhead *hh = mISDN_HEAD_P(skb); - u32 id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&card->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ fill_dma(bc); ret = 0; - spin_unlock_irqrestore(&card->lock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(&card->lock, flags); + } + spin_unlock_irqrestore(&card->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&card->lock, flags); @@ -778,21 +796,7 @@ nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) static int channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) { - int ret = 0; - struct tiger_hw *card = bc->bch.hw; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = 0; - break; - /* Nothing implemented yet */ - case MISDN_CTRL_FILL_EMPTY: - default: - pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(&bc->bch, cq); } static int @@ -808,14 +812,10 @@ nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) { - spin_lock_irqsave(&card->lock, flags); - mISDN_freebchannel(bch); - test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); - test_and_clear_bit(FLG_ACTIVE, &bch->Flags); - mode_tiger(bc, ISDN_P_NONE); - spin_unlock_irqrestore(&card->lock, flags); - } + spin_lock_irqsave(&card->lock, flags); + mISDN_freebchannel(bch); + mode_tiger(bc, ISDN_P_NONE); + spin_unlock_irqrestore(&card->lock, flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -837,7 +837,7 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_LOOP; + cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_LOOP: /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ @@ -847,6 +847,9 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) } ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); break; + case MISDN_CTRL_L1_TIMER3: + ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1); + break; default: pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); ret = -EINVAL; @@ -1027,7 +1030,8 @@ setup_instance(struct tiger_hw *card) for (i = 0; i < 2; i++) { card->bc[i].bch.nr = i + 1; set_channelmap(i + 1, card->isac.dch.dev.channelmap); - mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM); + mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM, + NJ_DMA_RXSIZE >> 1); card->bc[i].bch.hw = card; card->bc[i].bch.ch.send = nj_l2l1B; card->bc[i].bch.ch.ctrl = nj_bctrl; diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c index 04689935148..93f344d74e5 100644 --- a/drivers/isdn/hardware/mISDN/speedfax.c +++ b/drivers/isdn/hardware/mISDN/speedfax.c @@ -224,7 +224,7 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = MISDN_CTRL_LOOP; + cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; break; case MISDN_CTRL_LOOP: /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ @@ -234,6 +234,9 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq) } ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); break; + case MISDN_CTRL_L1_TIMER3: + ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1); + break; default: pr_info("%s: unknown Op %x\n", sf->name, cq->op); ret = -EINVAL; diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index 2183357f079..26a86b84609 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -465,6 +465,7 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count) { struct w6692_hw *card = wch->bch.hw; u8 *ptr; + int maxlen; pr_debug("%s: empty_Bfifo %d\n", card->name, count); if (unlikely(wch->bch.state == ISDN_P_NONE)) { @@ -474,20 +475,18 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count) skb_trim(wch->bch.rx_skb, 0); return; } - if (!wch->bch.rx_skb) { - wch->bch.rx_skb = mI_alloc_skb(wch->bch.maxlen, GFP_ATOMIC); - if (unlikely(!wch->bch.rx_skb)) { - pr_info("%s: B receive out of memory\n", card->name); - WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | - W_B_CMDR_RACT); - return; - } + if (test_bit(FLG_RX_OFF, &wch->bch.Flags)) { + wch->bch.dropcnt += count; + WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); + return; } - if (wch->bch.rx_skb->len + count > wch->bch.maxlen) { - pr_debug("%s: empty_Bfifo incoming packet too large\n", - card->name); + maxlen = bchannel_get_rxbuf(&wch->bch, count); + if (maxlen < 0) { WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); - skb_trim(wch->bch.rx_skb, 0); + if (wch->bch.rx_skb) + skb_trim(wch->bch.rx_skb, 0); + pr_warning("%s.B%d: No bufferspace for %d bytes\n", + card->name, wch->bch.nr, count); return; } ptr = skb_put(wch->bch.rx_skb, count); @@ -504,16 +503,22 @@ static void W6692_fill_Bfifo(struct w6692_ch *wch) { struct w6692_hw *card = wch->bch.hw; - int count; + int count, fillempty = 0; u8 *ptr, cmd = W_B_CMDR_RACT | W_B_CMDR_XMS; pr_debug("%s: fill Bfifo\n", card->name); - if (!wch->bch.tx_skb) - return; - count = wch->bch.tx_skb->len - wch->bch.tx_idx; - if (count <= 0) - return; - ptr = wch->bch.tx_skb->data + wch->bch.tx_idx; + if (!wch->bch.tx_skb) { + if (!test_bit(FLG_TX_EMPTY, &wch->bch.Flags)) + return; + ptr = wch->bch.fill; + count = W_B_FIFO_THRESH; + fillempty = 1; + } else { + count = wch->bch.tx_skb->len - wch->bch.tx_idx; + if (count <= 0) + return; + ptr = wch->bch.tx_skb->data + wch->bch.tx_idx; + } if (count > W_B_FIFO_THRESH) count = W_B_FIFO_THRESH; else if (test_bit(FLG_HDLC, &wch->bch.Flags)) @@ -522,9 +527,16 @@ W6692_fill_Bfifo(struct w6692_ch *wch) pr_debug("%s: fill Bfifo%d/%d\n", card->name, count, wch->bch.tx_idx); wch->bch.tx_idx += count; - outsb(wch->addr + W_B_XFIFO, ptr, count); + if (fillempty) { + while (count > 0) { + outsb(wch->addr + W_B_XFIFO, ptr, MISDN_BCH_FILL_SIZE); + count -= MISDN_BCH_FILL_SIZE; + } + } else { + outsb(wch->addr + W_B_XFIFO, ptr, count); + } WriteW6692B(wch, W_B_CMDR, cmd); - if (debug & DEBUG_HW_DFIFO) { + if ((debug & DEBUG_HW_BFIFO) && !fillempty) { snprintf(card->log, 63, "B%1d-send %s %d ", wch->bch.nr, card->name, count); print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count); @@ -638,17 +650,17 @@ w6692_mode(struct w6692_ch *wch, u32 pr) static void send_next(struct w6692_ch *wch) { - if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) + if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) { W6692_fill_Bfifo(wch); - else { - if (wch->bch.tx_skb) { - /* send confirm, on trans, free on hdlc. */ - if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags)) - confirm_Bsend(&wch->bch); + } else { + if (wch->bch.tx_skb) dev_kfree_skb(wch->bch.tx_skb); - } - if (get_next_bframe(&wch->bch)) + if (get_next_bframe(&wch->bch)) { + W6692_fill_Bfifo(wch); + test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags); + } else if (test_bit(FLG_TX_EMPTY, &wch->bch.Flags)) { W6692_fill_Bfifo(wch); + } } } @@ -698,7 +710,7 @@ W6692B_interrupt(struct w6692_hw *card, int ch) if (count == 0) count = W_B_FIFO_THRESH; W6692_empty_Bfifo(wch, count); - recv_Bchannel(&wch->bch, 0); + recv_Bchannel(&wch->bch, 0, false); } } if (stat & W_B_EXI_RMR) { @@ -714,9 +726,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch) W_B_CMDR_RRST | W_B_CMDR_RACT); } else { W6692_empty_Bfifo(wch, W_B_FIFO_THRESH); - if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags) && - wch->bch.rx_skb && (wch->bch.rx_skb->len > 0)) - recv_Bchannel(&wch->bch, 0); + if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags)) + recv_Bchannel(&wch->bch, 0, false); } } if (stat & W_B_EXI_RDOV) { @@ -738,8 +749,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch) wch->bch.nr, star); } if (star & W_B_STAR_XDOW) { - pr_debug("%s: B%d XDOW proto=%x\n", card->name, - wch->bch.nr, wch->bch.state); + pr_warning("%s: B%d XDOW proto=%x\n", card->name, + wch->bch.nr, wch->bch.state); #ifdef ERROR_STATISTIC wch->bch.err_xdu++; #endif @@ -752,20 +763,21 @@ W6692B_interrupt(struct w6692_hw *card, int ch) } } send_next(wch); - if (stat & W_B_EXI_XDUN) + if (star & W_B_STAR_XDOW) return; /* handle XDOW only once */ } if (stat & W_B_EXI_XDUN) { - pr_debug("%s: B%d XDUN proto=%x\n", card->name, - wch->bch.nr, wch->bch.state); + pr_warning("%s: B%d XDUN proto=%x\n", card->name, + wch->bch.nr, wch->bch.state); #ifdef ERROR_STATISTIC wch->bch.err_xdu++; #endif - WriteW6692B(wch, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); - /* resend */ + /* resend - no XRST needed */ if (wch->bch.tx_skb) { if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags)) wch->bch.tx_idx = 0; + } else if (test_bit(FLG_FILLEMPTY, &wch->bch.Flags)) { + test_and_set_bit(FLG_TX_EMPTY, &wch->bch.Flags); } send_next(wch); } @@ -944,22 +956,17 @@ w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) struct w6692_hw *card = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); - u32 id; - u_long flags; + unsigned long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&card->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ - id = hh->id; /* skb can be freed */ ret = 0; W6692_fill_Bfifo(bc); - spin_unlock_irqrestore(&card->lock, flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - queue_ch_frame(ch, PH_DATA_CNF, id, NULL); - } else - spin_unlock_irqrestore(&card->lock, flags); + } + spin_unlock_irqrestore(&card->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&card->lock, flags); @@ -994,20 +1001,7 @@ w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { - int ret = 0; - - switch (cq->op) { - case MISDN_CTRL_GETOP: - cq->op = 0; - break; - /* Nothing implemented yet */ - case MISDN_CTRL_FILL_EMPTY: - default: - pr_info("%s: unknown Op %x\n", __func__, cq->op); - ret = -EINVAL; - break; - } - return ret; + return mISDN_ctrl_bchannel(bch, cq); } static int @@ -1022,7 +1016,6 @@ open_bchannel(struct w6692_hw *card, struct channel_req *rq) bch = &card->bc[rq->adr.channel - 1].bch; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ - test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; return 0; @@ -1035,7 +1028,10 @@ channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq) switch (cq->op) { case MISDN_CTRL_GETOP: - cq->op = 0; + cq->op = MISDN_CTRL_L1_TIMER3; + break; + case MISDN_CTRL_L1_TIMER3: + ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); break; default: pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); @@ -1058,15 +1054,10 @@ w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); - if (test_bit(FLG_ACTIVE, &bch->Flags)) { - spin_lock_irqsave(&card->lock, flags); - mISDN_freebchannel(bch); - w6692_mode(bc, ISDN_P_NONE); - spin_unlock_irqrestore(&card->lock, flags); - } else { - skb_queue_purge(&bch->rqueue); - bch->rcount = 0; - } + spin_lock_irqsave(&card->lock, flags); + mISDN_freebchannel(bch); + w6692_mode(bc, ISDN_P_NONE); + spin_unlock_irqrestore(&card->lock, flags); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); @@ -1320,7 +1311,8 @@ setup_instance(struct w6692_hw *card) card->dch.hw = card; card->dch.dev.nrbchan = 2; for (i = 0; i < 2; i++) { - mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM); + mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM, + W_B_FIFO_THRESH); card->bc[i].bch.hw = card; card->bc[i].bch.nr = i + 1; card->bc[i].bch.ch.nr = i + 1; diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index ba91333e3e4..88e4f0ee073 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c @@ -156,17 +156,9 @@ static ssize_t hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { int rc; - unsigned char valbuf[128]; hysdn_card *card = file->private_data; - if (count > (sizeof(valbuf) - 1)) - count = sizeof(valbuf) - 1; /* limit length */ - if (copy_from_user(valbuf, buf, count)) - return (-EFAULT); /* copy failed */ - - valbuf[count] = 0; /* terminating 0 */ - - rc = kstrtoul(valbuf, 0, &card->debug_flags); + rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags); if (rc < 0) return rc; hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags); diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c index c59e8d2c067..8837ac5a492 100644 --- a/drivers/isdn/i4l/isdn_bsdcomp.c +++ b/drivers/isdn/i4l/isdn_bsdcomp.c @@ -612,7 +612,7 @@ static int bsd_compress(void *state, struct sk_buff *skb_in, struct sk_buff *skb db->n_bits++; /* If output length is too large then this is an incompressible frame. */ - if (!skb_out || (skb_out && skb_out->len >= skb_in->len)) { + if (!skb_out || skb_out->len >= skb_in->len) { ++db->incomp_count; db->incomp_bytes += isize; return 0; diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index a24530f05db..c401634c00e 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -355,6 +355,22 @@ mISDN_unregister_Bprotocol(struct Bprotocol *bp) } EXPORT_SYMBOL(mISDN_unregister_Bprotocol); +static const char *msg_no_channel = "<no channel>"; +static const char *msg_no_stack = "<no stack>"; +static const char *msg_no_stackdev = "<no stack device>"; + +const char *mISDNDevName4ch(struct mISDNchannel *ch) +{ + if (!ch) + return msg_no_channel; + if (!ch->st) + return msg_no_stack; + if (!ch->st->dev) + return msg_no_stackdev; + return dev_name(&ch->st->dev->dev); +}; +EXPORT_SYMBOL(mISDNDevName4ch); + static int mISDNInit(void) { diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index afe4173ae00..fc1733a0884 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -76,7 +76,9 @@ extern u8 dsp_silence; #define MAX_SECONDS_JITTER_CHECK 5 extern struct timer_list dsp_spl_tl; -extern u32 dsp_spl_jiffies; + +/* the datatype need to match jiffies datatype */ +extern unsigned long dsp_spl_jiffies; /* the structure of conferences: * diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 334feab060a..a4f05c54c32 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -742,8 +742,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp) member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx, member->dsp->pcm_bank_rx); - conf->hardware = 0; - conf->software = 1; + conf->hardware = 1; + conf->software = tx_data; return; } /* find a new slot */ @@ -834,8 +834,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp) nextm->dsp->name, member->dsp->pcm_slot_tx, member->dsp->pcm_slot_rx); - conf->hardware = 0; - conf->software = 1; + conf->hardware = 1; + conf->software = tx_data; return; } /* find two new slot */ @@ -939,8 +939,11 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp) /* for more than two members.. */ /* if all members already have the same conference */ - if (all_conf) + if (all_conf) { + conf->hardware = 1; + conf->software = tx_data; return; + } /* * if there is an existing conference, but not all members have joined @@ -1013,6 +1016,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp) dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0); } + conf->hardware = 1; + conf->software = tx_data; return; } @@ -1328,7 +1333,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members) } if (dsp->conf && dsp->conf->software && dsp->conf->hardware) tx_data_only = 1; - if (dsp->conf->software && dsp->echo.hardware) + if (dsp->echo.software && dsp->echo.hardware) tx_data_only = 1; } @@ -1619,7 +1624,7 @@ send_packet: static u32 jittercount; /* counter for jitter check */ struct timer_list dsp_spl_tl; -u32 dsp_spl_jiffies; /* calculate the next time to fire */ +unsigned long dsp_spl_jiffies; /* calculate the next time to fire */ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 2ac2d7a25a9..28c99c623bc 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -268,6 +268,7 @@ dsp_fill_empty(struct dsp *dsp) } cq.op = MISDN_CTRL_FILL_EMPTY; cq.p1 = 1; + cq.p2 = dsp_silence; if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n", __func__); diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c index 887860bdc63..642f30be5ce 100644 --- a/drivers/isdn/mISDN/dsp_dtmf.c +++ b/drivers/isdn/mISDN/dsp_dtmf.c @@ -222,16 +222,25 @@ coefficients: goto storedigit; } - if (dsp_debug & DEBUG_DSP_DTMFCOEFF) + if (dsp_debug & DEBUG_DSP_DTMFCOEFF) { + s32 tresh_100 = tresh/100; + + if (tresh_100 == 0) { + tresh_100 = 1; + printk(KERN_DEBUG + "tresh(%d) too small set tresh/100 to 1\n", + tresh); + } printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d" " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n", result[0] / 10000, result[1] / 10000, result[2] / 10000, result[3] / 10000, result[4] / 10000, result[5] / 10000, result[6] / 10000, result[7] / 10000, tresh / 10000, - result[0] / (tresh / 100), result[1] / (tresh / 100), - result[2] / (tresh / 100), result[3] / (tresh / 100), - result[4] / (tresh / 100), result[5] / (tresh / 100), - result[6] / (tresh / 100), result[7] / (tresh / 100)); + result[0] / (tresh_100), result[1] / (tresh_100), + result[2] / (tresh_100), result[3] / (tresh_100), + result[4] / (tresh_100), result[5] / (tresh_100), + result[6] / (tresh_100), result[7] / (tresh_100)); + } /* calc digit (lowgroup/highgroup) */ lowgroup = -1; diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c index c74c363554c..ef34fd40867 100644 --- a/drivers/isdn/mISDN/hwchannel.c +++ b/drivers/isdn/mISDN/hwchannel.c @@ -81,10 +81,16 @@ mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf) EXPORT_SYMBOL(mISDN_initdchannel); int -mISDN_initbchannel(struct bchannel *ch, int maxlen) +mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen, + unsigned short minlen) { ch->Flags = 0; + ch->minlen = minlen; + ch->next_minlen = minlen; + ch->init_minlen = minlen; ch->maxlen = maxlen; + ch->next_maxlen = maxlen; + ch->init_maxlen = maxlen; ch->hw = NULL; ch->rx_skb = NULL; ch->tx_skb = NULL; @@ -134,6 +140,14 @@ mISDN_clear_bchannel(struct bchannel *ch) test_and_clear_bit(FLG_TX_BUSY, &ch->Flags); test_and_clear_bit(FLG_TX_NEXT, &ch->Flags); test_and_clear_bit(FLG_ACTIVE, &ch->Flags); + test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags); + test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags); + test_and_clear_bit(FLG_RX_OFF, &ch->Flags); + ch->dropcnt = 0; + ch->minlen = ch->init_minlen; + ch->next_minlen = ch->init_minlen; + ch->maxlen = ch->init_maxlen; + ch->next_maxlen = ch->init_maxlen; } EXPORT_SYMBOL(mISDN_clear_bchannel); @@ -148,6 +162,51 @@ mISDN_freebchannel(struct bchannel *ch) } EXPORT_SYMBOL(mISDN_freebchannel); +int +mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq) +{ + int ret = 0; + + switch (cq->op) { + case MISDN_CTRL_GETOP: + cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY | + MISDN_CTRL_RX_OFF; + break; + case MISDN_CTRL_FILL_EMPTY: + if (cq->p1) { + memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE); + test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); + } else { + test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); + } + break; + case MISDN_CTRL_RX_OFF: + /* read back dropped byte count */ + cq->p2 = bch->dropcnt; + if (cq->p1) + test_and_set_bit(FLG_RX_OFF, &bch->Flags); + else + test_and_clear_bit(FLG_RX_OFF, &bch->Flags); + bch->dropcnt = 0; + break; + case MISDN_CTRL_RX_BUFFER: + if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE) + bch->next_maxlen = cq->p2; + if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE) + bch->next_minlen = cq->p1; + /* we return the old values */ + cq->p1 = bch->minlen; + cq->p2 = bch->maxlen; + break; + default: + pr_info("mISDN unhandled control %x operation\n", cq->op); + ret = -EINVAL; + break; + } + return ret; +} +EXPORT_SYMBOL(mISDN_ctrl_bchannel); + static inline u_int get_sapi_tei(u_char *p) { @@ -197,24 +256,37 @@ recv_Echannel(struct dchannel *ech, struct dchannel *dch) EXPORT_SYMBOL(recv_Echannel); void -recv_Bchannel(struct bchannel *bch, unsigned int id) +recv_Bchannel(struct bchannel *bch, unsigned int id, bool force) { struct mISDNhead *hh; - hh = mISDN_HEAD_P(bch->rx_skb); - hh->prim = PH_DATA_IND; - hh->id = id; - if (bch->rcount >= 64) { - printk(KERN_WARNING "B-channel %p receive queue overflow, " - "flushing!\n", bch); - skb_queue_purge(&bch->rqueue); - bch->rcount = 0; + /* if allocation did fail upper functions still may call us */ + if (unlikely(!bch->rx_skb)) return; + if (unlikely(!bch->rx_skb->len)) { + /* we have no data to send - this may happen after recovery + * from overflow or too small allocation. + * We need to free the buffer here */ + dev_kfree_skb(bch->rx_skb); + bch->rx_skb = NULL; + } else { + if (test_bit(FLG_TRANSPARENT, &bch->Flags) && + (bch->rx_skb->len < bch->minlen) && !force) + return; + hh = mISDN_HEAD_P(bch->rx_skb); + hh->prim = PH_DATA_IND; + hh->id = id; + if (bch->rcount >= 64) { + printk(KERN_WARNING + "B%d receive queue overflow - flushing!\n", + bch->nr); + skb_queue_purge(&bch->rqueue); + } + bch->rcount++; + skb_queue_tail(&bch->rqueue, bch->rx_skb); + bch->rx_skb = NULL; + schedule_event(bch, FLG_RECVQUEUE); } - bch->rcount++; - skb_queue_tail(&bch->rqueue, bch->rx_skb); - bch->rx_skb = NULL; - schedule_event(bch, FLG_RECVQUEUE); } EXPORT_SYMBOL(recv_Bchannel); @@ -272,7 +344,7 @@ get_next_dframe(struct dchannel *dch) } EXPORT_SYMBOL(get_next_dframe); -void +static void confirm_Bsend(struct bchannel *bch) { struct sk_buff *skb; @@ -294,7 +366,6 @@ confirm_Bsend(struct bchannel *bch) skb_queue_tail(&bch->rqueue, skb); schedule_event(bch, FLG_RECVQUEUE); } -EXPORT_SYMBOL(confirm_Bsend); int get_next_bframe(struct bchannel *bch) @@ -305,8 +376,8 @@ get_next_bframe(struct bchannel *bch) if (bch->tx_skb) { bch->next_skb = NULL; test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); - if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) - confirm_Bsend(bch); /* not for transparent */ + /* confirm imediately to allow next data */ + confirm_Bsend(bch); return 1; } else { test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); @@ -395,7 +466,62 @@ bchannel_senddata(struct bchannel *ch, struct sk_buff *skb) /* write to fifo */ ch->tx_skb = skb; ch->tx_idx = 0; + confirm_Bsend(ch); return 1; } } EXPORT_SYMBOL(bchannel_senddata); + +/* The function allocates a new receive skb on demand with a size for the + * requirements of the current protocol. It returns the tailroom of the + * receive skb or an error. + */ +int +bchannel_get_rxbuf(struct bchannel *bch, int reqlen) +{ + int len; + + if (bch->rx_skb) { + len = skb_tailroom(bch->rx_skb); + if (len < reqlen) { + pr_warning("B%d no space for %d (only %d) bytes\n", + bch->nr, reqlen, len); + if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { + /* send what we have now and try a new buffer */ + recv_Bchannel(bch, 0, true); + } else { + /* on HDLC we have to drop too big frames */ + return -EMSGSIZE; + } + } else { + return len; + } + } + /* update current min/max length first */ + if (unlikely(bch->maxlen != bch->next_maxlen)) + bch->maxlen = bch->next_maxlen; + if (unlikely(bch->minlen != bch->next_minlen)) + bch->minlen = bch->next_minlen; + if (unlikely(reqlen > bch->maxlen)) + return -EMSGSIZE; + if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { + if (reqlen >= bch->minlen) { + len = reqlen; + } else { + len = 2 * bch->minlen; + if (len > bch->maxlen) + len = bch->maxlen; + } + } else { + /* with HDLC we do not know the length yet */ + len = bch->maxlen; + } + bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC); + if (!bch->rx_skb) { + pr_warning("B%d receive no memory for %d bytes\n", + bch->nr, len); + len = -ENOMEM; + } + return len; +} +EXPORT_SYMBOL(bchannel_get_rxbuf); diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 0f88acf1185..db50f788855 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -1420,7 +1420,7 @@ init_card(struct l1oip *hc, int pri, int bundle) bch->nr = i + ch; bch->slot = i + ch; bch->debug = debug; - mISDN_initbchannel(bch, MAX_DATA_MEM); + mISDN_initbchannel(bch, MAX_DATA_MEM, 0); bch->hw = hc; bch->ch.send = handle_bmsg; bch->ch.ctrl = l1oip_bctrl; diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index 0fc49b37551..bebc57b7213 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c @@ -28,13 +28,15 @@ static u_int *debug; struct layer1 { u_long Flags; struct FsmInst l1m; - struct FsmTimer timer; + struct FsmTimer timer3; + struct FsmTimer timerX; int delay; + int t3_value; struct dchannel *dch; dchannel_l1callback *dcb; }; -#define TIMER3_VALUE 7000 +#define TIMER3_DEFAULT_VALUE 7000 static struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; @@ -134,7 +136,7 @@ l1_deact_req_s(struct FsmInst *fi, int event, void *arg) struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F3); - mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2); + mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2); test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); } @@ -179,11 +181,11 @@ l1_info4_ind(struct FsmInst *fi, int event, void *arg) mISDN_FsmChangeState(fi, ST_L1_F7); l1->dcb(l1->dch, INFO3_P8); if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) - mISDN_FsmDelTimer(&l1->timer, 4); + mISDN_FsmDelTimer(&l1->timerX, 4); if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) - mISDN_FsmDelTimer(&l1->timer, 3); - mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2); + mISDN_FsmDelTimer(&l1->timer3, 3); + mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2); test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); } } @@ -201,7 +203,7 @@ l1_timer3(struct FsmInst *fi, int event, void *arg) } if (l1->l1m.state != ST_L1_F6) { mISDN_FsmChangeState(fi, ST_L1_F3); - l1->dcb(l1->dch, HW_POWERUP_REQ); + /* do not force anything here, we need send INFO 0 */ } } @@ -233,8 +235,9 @@ l1_activate_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; - mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); + mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2); test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); + /* Tell HW to send INFO 1 */ l1->dcb(l1->dch, HW_RESET_REQ); } @@ -302,7 +305,8 @@ static struct FsmNode L1SFnList[] = static void release_l1(struct layer1 *l1) { - mISDN_FsmDelTimer(&l1->timer, 0); + mISDN_FsmDelTimer(&l1->timerX, 0); + mISDN_FsmDelTimer(&l1->timer3, 0); if (l1->dch) l1->dch->l1 = NULL; module_put(THIS_MODULE); @@ -356,6 +360,16 @@ l1_event(struct layer1 *l1, u_int event) release_l1(l1); break; default: + if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) { + int val = event & HW_TIMER3_VMASK; + + if (val < 5) + val = 5; + if (val > 30) + val = 30; + l1->t3_value = val; + break; + } if (*debug & DEBUG_L1) printk(KERN_DEBUG "%s %x unhandled\n", __func__, event); @@ -377,13 +391,15 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) { nl1->l1m.fsm = &l1fsm_s; nl1->l1m.state = ST_L1_F3; nl1->Flags = 0; + nl1->t3_value = TIMER3_DEFAULT_VALUE; nl1->l1m.debug = *debug & DEBUG_L1_FSM; nl1->l1m.userdata = nl1; nl1->l1m.userint = 0; nl1->l1m.printdebug = l1m_debug; nl1->dch = dch; nl1->dcb = dcb; - mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer); + mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3); + mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX); __module_get(THIS_MODULE); dch->l1 = nl1; return 0; diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 39d7375fa55..0dc8abca140 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -58,6 +58,8 @@ enum { EV_L1_DEACTIVATE, EV_L2_T200, EV_L2_T203, + EV_L2_T200I, + EV_L2_T203I, EV_L2_SET_OWN_BUSY, EV_L2_CLEAR_OWN_BUSY, EV_L2_FRAME_ERROR, @@ -86,6 +88,8 @@ static char *strL2Event[] = "EV_L1_DEACTIVATE", "EV_L2_T200", "EV_L2_T203", + "EV_L2_T200I", + "EV_L2_T203I", "EV_L2_SET_OWN_BUSY", "EV_L2_CLEAR_OWN_BUSY", "EV_L2_FRAME_ERROR", @@ -106,8 +110,8 @@ l2m_debug(struct FsmInst *fi, char *fmt, ...) vaf.fmt = fmt; vaf.va = &va; - printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", - l2->sapi, l2->tei, &vaf); + printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n", + mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf); va_end(va); } @@ -150,7 +154,8 @@ l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb) mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; err = l2->up->send(l2->up, skb); if (err) { - printk(KERN_WARNING "%s: err=%d\n", __func__, err); + printk(KERN_WARNING "%s: dev %s err=%d\n", __func__, + mISDNDevName4ch(&l2->ch), err); dev_kfree_skb(skb); } } @@ -174,7 +179,8 @@ l2up_create(struct layer2 *l2, u_int prim, int len, void *arg) memcpy(skb_put(skb, len), arg, len); err = l2->up->send(l2->up, skb); if (err) { - printk(KERN_WARNING "%s: err=%d\n", __func__, err); + printk(KERN_WARNING "%s: dev %s err=%d\n", __func__, + mISDNDevName4ch(&l2->ch), err); dev_kfree_skb(skb); } } @@ -185,7 +191,8 @@ l2down_skb(struct layer2 *l2, struct sk_buff *skb) { ret = l2->ch.recv(l2->ch.peer, skb); if (ret && (*debug & DEBUG_L2_RECV)) - printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); + printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n", + mISDNDevName4ch(&l2->ch), ret); return ret; } @@ -276,12 +283,37 @@ ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) { return ret; } +static void +l2_timeout(struct FsmInst *fi, int event, void *arg) +{ + struct layer2 *l2 = fi->userdata; + struct sk_buff *skb; + struct mISDNhead *hh; + + skb = mI_alloc_skb(0, GFP_ATOMIC); + if (!skb) { + printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n", + mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, + l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); + return; + } + hh = mISDN_HEAD_P(skb); + hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND; + hh->id = l2->ch.nr; + if (*debug & DEBUG_TIMER) + printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n", + mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, + l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); + if (l2->ch.st) + l2->ch.st->own.recv(&l2->ch.st->own, skb); +} + static int l2mgr(struct layer2 *l2, u_int prim, void *arg) { long c = (long)arg; - printk(KERN_WARNING - "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); + printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n", + mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c); if (test_bit(FLG_LAPD, &l2->flag) && !test_bit(FLG_FIXED_TEI, &l2->flag)) { switch (c) { @@ -603,8 +635,8 @@ send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr) else { skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { - printk(KERN_WARNING "%s: can't alloc skbuff\n", - __func__); + printk(KERN_WARNING "%s: can't alloc skbuff in %s\n", + mISDNDevName4ch(&l2->ch), __func__); return; } } @@ -1089,8 +1121,8 @@ enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf) tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { - printk(KERN_WARNING - "isdnl2 can't alloc sbbuff for enquiry_cr\n"); + printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n", + mISDNDevName4ch(&l2->ch), __func__); return; } memcpy(skb_put(skb, i), tmp, i); @@ -1150,7 +1182,7 @@ invoke_retransmission(struct layer2 *l2, unsigned int nr) else printk(KERN_WARNING "%s: windowar[%d] is NULL\n", - __func__, p1); + mISDNDevName4ch(&l2->ch), p1); l2->windowar[p1] = NULL; } mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); @@ -1461,8 +1493,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) p1 = (l2->vs - l2->va) % 8; p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) { - printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", - p1); + printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n", + mISDNDevName4ch(&l2->ch), p1); dev_kfree_skb(l2->windowar[p1]); } l2->windowar[p1] = skb; @@ -1482,12 +1514,14 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) memcpy(skb_push(nskb, i), header, i); else { printk(KERN_WARNING - "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); + "%s: L2 pull_iqueue skb header(%d/%d) too short\n", + mISDNDevName4ch(&l2->ch), i, p1); oskb = nskb; nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); if (!nskb) { dev_kfree_skb(oskb); - printk(KERN_WARNING "%s: no skb mem\n", __func__); + printk(KERN_WARNING "%s: no skb mem in %s\n", + mISDNDevName4ch(&l2->ch), __func__); return; } memcpy(skb_put(nskb, i), header, i); @@ -1814,11 +1848,16 @@ static struct FsmNode L2FnList[] = {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, {ST_L2_7, EV_L2_I, l2_got_iframe}, {ST_L2_8, EV_L2_I, l2_got_iframe}, - {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, - {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, - {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, - {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, - {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, + {ST_L2_5, EV_L2_T200, l2_timeout}, + {ST_L2_6, EV_L2_T200, l2_timeout}, + {ST_L2_7, EV_L2_T200, l2_timeout}, + {ST_L2_8, EV_L2_T200, l2_timeout}, + {ST_L2_7, EV_L2_T203, l2_timeout}, + {ST_L2_5, EV_L2_T200I, l2_st5_tout_200}, + {ST_L2_6, EV_L2_T200I, l2_st6_tout_200}, + {ST_L2_7, EV_L2_T200I, l2_st7_tout_200}, + {ST_L2_8, EV_L2_T200I, l2_st8_tout_200}, + {ST_L2_7, EV_L2_T203I, l2_st7_tout_203}, {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, @@ -1858,7 +1897,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) ptei = *datap++; if ((psapi & 1) || !(ptei & 1)) { printk(KERN_WARNING - "l2 D-channel frame wrong EA0/EA1\n"); + "%s l2 D-channel frame wrong EA0/EA1\n", + mISDNDevName4ch(&l2->ch)); return ret; } psapi >>= 2; @@ -1867,7 +1907,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", - __func__, psapi, l2->sapi); + mISDNDevName4ch(&l2->ch), psapi, + l2->sapi); dev_kfree_skb(skb); return 0; } @@ -1875,7 +1916,7 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", - __func__, ptei, l2->tei); + mISDNDevName4ch(&l2->ch), ptei, l2->tei); dev_kfree_skb(skb); return 0; } @@ -1916,7 +1957,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) } else c = 'L'; if (c) { - printk(KERN_WARNING "l2 D-channel frame error %c\n", c); + printk(KERN_WARNING "%s:l2 D-channel frame error %c\n", + mISDNDevName4ch(&l2->ch), c); mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); } return ret; @@ -1930,8 +1972,17 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb) int ret = -EINVAL; if (*debug & DEBUG_L2_RECV) - printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", - __func__, hh->prim, hh->id, l2->sapi, l2->tei); + printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n", + __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id, + l2->sapi, l2->tei); + if (hh->prim == DL_INTERN_MSG) { + struct mISDNhead *chh = hh + 1; /* saved copy */ + + *hh = *chh; + if (*debug & DEBUG_L2_RECV) + printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n", + mISDNDevName4ch(&l2->ch), hh->prim, hh->id); + } switch (hh->prim) { case PH_DATA_IND: ret = ph_data_indication(l2, hh, skb); @@ -1987,6 +2038,12 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, skb); break; + case DL_TIMER200_IND: + mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL); + break; + case DL_TIMER203_IND: + mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL); + break; default: if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "l2 unknown pr %04x", @@ -2005,7 +2062,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg) int ret = -EINVAL; if (*debug & DEBUG_L2_TEI) - printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); + printk(KERN_DEBUG "%s: cmd(%x) in %s\n", + mISDNDevName4ch(&l2->ch), cmd, __func__); switch (cmd) { case (MDL_ASSIGN_REQ): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); @@ -2018,7 +2076,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg) break; case (MDL_ERROR_RSP): /* ETS 300-125 5.3.2.1 Test: TC13010 */ - printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); + printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n", + mISDNDevName4ch(&l2->ch)); ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); break; } @@ -2050,7 +2109,8 @@ l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) u_int info; if (*debug & DEBUG_L2_CTRL) - printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); + printk(KERN_DEBUG "%s: %s cmd(%x)\n", + mISDNDevName4ch(ch), __func__, cmd); switch (cmd) { case OPEN_CHANNEL: diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index ba2bc0c776e..be88728f110 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -790,18 +790,23 @@ tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len) static struct layer2 * create_new_tei(struct manager *mgr, int tei, int sapi) { - u_long opt = 0; - u_long flags; - int id; - struct layer2 *l2; + unsigned long opt = 0; + unsigned long flags; + int id; + struct layer2 *l2; + struct channel_req rq; if (!mgr->up) return NULL; if ((tei >= 0) && (tei < 64)) test_and_set_bit(OPTION_L2_FIXEDTEI, &opt); - if (mgr->ch.st->dev->Dprotocols - & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) + if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) | + (1 << ISDN_P_NT_E1))) { test_and_set_bit(OPTION_L2_PMX, &opt); + rq.protocol = ISDN_P_NT_E1; + } else { + rq.protocol = ISDN_P_NT_S0; + } l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi); if (!l2) { printk(KERN_WARNING "%s:no memory for layer2\n", __func__); @@ -836,6 +841,14 @@ create_new_tei(struct manager *mgr, int tei, int sapi) l2->ch.recv = mgr->ch.recv; l2->ch.peer = mgr->ch.peer; l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); + /* We need open here L1 for the manager as well (refcounting) */ + rq.adr.dev = mgr->ch.st->dev->id; + id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq); + if (id < 0) { + printk(KERN_WARNING "%s: cannot open L1\n", __func__); + l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); + l2 = NULL; + } } return l2; } @@ -978,10 +991,11 @@ TEIrelease(struct layer2 *l2) static int create_teimgr(struct manager *mgr, struct channel_req *crq) { - struct layer2 *l2; - u_long opt = 0; - u_long flags; - int id; + struct layer2 *l2; + unsigned long opt = 0; + unsigned long flags; + int id; + struct channel_req l1rq; if (*debug & DEBUG_L2_TEI) printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", @@ -1016,6 +1030,7 @@ create_teimgr(struct manager *mgr, struct channel_req *crq) if (crq->protocol == ISDN_P_LAPD_TE) test_and_set_bit(MGR_OPT_USER, &mgr->options); } + l1rq.adr = crq->adr; if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) test_and_set_bit(OPTION_L2_PMX, &opt); @@ -1023,6 +1038,8 @@ create_teimgr(struct manager *mgr, struct channel_req *crq) mgr->up = crq->ch; id = DL_INFO_L2_CONNECT; teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id); + if (test_bit(MGR_PH_ACTIVE, &mgr->options)) + teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL); crq->ch = NULL; if (!list_empty(&mgr->layer2)) { read_lock_irqsave(&mgr->lock, flags); @@ -1053,24 +1070,34 @@ create_teimgr(struct manager *mgr, struct channel_req *crq) l2->tm->tei_m.fsm = &teifsmu; l2->tm->tei_m.state = ST_TEI_NOP; l2->tm->tval = 1000; /* T201 1 sec */ + if (test_bit(OPTION_L2_PMX, &opt)) + l1rq.protocol = ISDN_P_TE_E1; + else + l1rq.protocol = ISDN_P_TE_S0; } else { l2->tm->tei_m.fsm = &teifsmn; l2->tm->tei_m.state = ST_TEI_NOP; l2->tm->tval = 2000; /* T202 2 sec */ + if (test_bit(OPTION_L2_PMX, &opt)) + l1rq.protocol = ISDN_P_NT_E1; + else + l1rq.protocol = ISDN_P_NT_S0; } mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer); write_lock_irqsave(&mgr->lock, flags); id = get_free_id(mgr); list_add_tail(&l2->list, &mgr->layer2); write_unlock_irqrestore(&mgr->lock, flags); - if (id < 0) { - l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); - } else { + if (id >= 0) { l2->ch.nr = id; l2->up->nr = id; crq->ch = &l2->ch; - id = 0; + /* We need open here L1 for the manager as well (refcounting) */ + id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, + &l1rq); } + if (id < 0) + l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); return id; } @@ -1096,12 +1123,16 @@ mgr_send(struct mISDNchannel *ch, struct sk_buff *skb) break; case PH_ACTIVATE_IND: test_and_set_bit(MGR_PH_ACTIVE, &mgr->options); + if (mgr->up) + teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL); mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL); do_send(mgr); ret = 0; break; case PH_DEACTIVATE_IND: test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options); + if (mgr->up) + teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL); mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL); ret = 0; break; @@ -1263,7 +1294,7 @@ static int mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) { struct manager *mgr = container_of(ch, struct manager, bcast); - struct mISDNhead *hh = mISDN_HEAD_P(skb); + struct mISDNhead *hhc, *hh = mISDN_HEAD_P(skb); struct sk_buff *cskb = NULL; struct layer2 *l2; u_long flags; @@ -1278,10 +1309,17 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) skb = NULL; } else { if (!cskb) - cskb = skb_copy(skb, GFP_KERNEL); + cskb = skb_copy(skb, GFP_ATOMIC); } if (cskb) { - ret = l2->ch.send(&l2->ch, cskb); + hhc = mISDN_HEAD_P(cskb); + /* save original header behind normal header */ + hhc++; + *hhc = *hh; + hhc--; + hhc->prim = DL_INTERN_MSG; + hhc->id = l2->ch.nr; + ret = ch->st->own.recv(&ch->st->own, cskb); if (ret) { if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index c171afa9323..69e9d546356 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -69,7 +69,6 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/delay.h> -// #include <linux/trdevice.h> #include <asm/uaccess.h> #include <asm/io.h> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b98285446a5..0c2bd806950 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -66,10 +66,7 @@ config DUMMY <http://www.tldp.org/docs.html#guide>. To compile this driver as a module, choose M here: the module - will be called dummy. If you want to use more than one dummy - device at a time, you need to compile this driver as a module. - Instead of 'dummy', the devices will then be called 'dummy0', - 'dummy1' etc. + will be called dummy. config EQUALIZER tristate "EQL (serial line load balancing) support" @@ -285,8 +282,6 @@ source "drivers/net/slip/Kconfig" source "drivers/s390/net/Kconfig" -source "drivers/net/tokenring/Kconfig" - source "drivers/net/usb/Kconfig" source "drivers/net/wireless/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a6b8ce11a22..3d375ca128a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -50,7 +50,6 @@ obj-$(CONFIG_SLIP) += slip/ obj-$(CONFIG_SLHC) += slip/ obj-$(CONFIG_NET_SB1000) += sb1000.o obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o -obj-$(CONFIG_TR) += tokenring/ obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_WIMAX) += wimax/ diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 88bbd8ffa7f..e3f0faca98d 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -29,7 +29,6 @@ */ #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/trdevice.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netlink.h> @@ -134,22 +133,9 @@ static struct devprobe2 eisa_probes[] __initdata = { {NULL, 0}, }; -static struct devprobe2 mca_probes[] __initdata = { -#ifdef CONFIG_NE2_MCA - {ne2_probe, 0}, -#endif -#ifdef CONFIG_ELMC /* 3c523 */ - {elmc_probe, 0}, -#endif -#ifdef CONFIG_ELMC_II /* 3c527 */ - {mc32_probe, 0}, -#endif - {NULL, 0}, -}; - /* * ISA probes that touch addresses < 0x400 (including those that also - * look for EISA/PCI/MCA cards in addition to ISA cards). + * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { #if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ @@ -279,51 +265,10 @@ static void __init ethif_probe2(int unit) (void)( probe_list2(unit, m68k_probes, base_addr == 0) && probe_list2(unit, eisa_probes, base_addr == 0) && - probe_list2(unit, mca_probes, base_addr == 0) && probe_list2(unit, isa_probes, base_addr == 0) && probe_list2(unit, parport_probes, base_addr == 0)); } -#ifdef CONFIG_TR -/* Token-ring device probe */ -extern int ibmtr_probe_card(struct net_device *); -extern struct net_device *smctr_probe(int unit); - -static struct devprobe2 tr_probes2[] __initdata = { -#ifdef CONFIG_SMCTR - {smctr_probe, 0}, -#endif - {NULL, 0}, -}; - -static __init int trif_probe(int unit) -{ - int err = -ENODEV; -#ifdef CONFIG_IBMTR - struct net_device *dev = alloc_trdev(0); - if (!dev) - return -ENOMEM; - - sprintf(dev->name, "tr%d", unit); - netdev_boot_setup_check(dev); - err = ibmtr_probe_card(dev); - if (err) - free_netdev(dev); -#endif - return err; -} - -static void __init trif_probe2(int unit) -{ - unsigned long base_addr = netdev_boot_base("tr", unit); - - if (base_addr == 1) - return; - probe_list2(unit, tr_probes2, base_addr == 0); -} -#endif - - /* Statically configured drivers -- order matters here. */ static int __init net_olddevs_init(void) { @@ -333,11 +278,6 @@ static int __init net_olddevs_init(void) for (num = 0; num < 8; ++num) sbni_probe(num); #endif -#ifdef CONFIG_TR - for (num = 0; num < 8; ++num) - if (!trif_probe(num)) - trif_probe2(num); -#endif for (num = 0; num < 8; ++num) ethif_probe2(num); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 2e1f8066f1a..0f59c1564e5 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -332,7 +332,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) if ((client_info->assigned) && (client_info->ip_src == arp->ip_dst) && (client_info->ip_dst == arp->ip_src) && - (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) { + (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) { /* update the clients MAC address */ memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); client_info->ntt = 1; @@ -450,8 +450,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) if (assigned_slave) { rx_hash_table[index].slave = assigned_slave; - if (compare_ether_addr_64bits(rx_hash_table[index].mac_dst, - mac_bcast)) { + if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst, + mac_bcast)) { bond_info->rx_hashtbl[index].ntt = 1; bond_info->rx_ntt = 1; /* A slave has been removed from the @@ -563,7 +563,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && - compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { + !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { client_info->ntt = 1; ntt = 1; } @@ -602,9 +602,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) * unicast mac address. */ if ((client_info->ip_src == src_ip) && - compare_ether_addr_64bits(client_info->slave->dev->dev_addr, - bond->dev->dev_addr) && - compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { + !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, + bond->dev->dev_addr) && + !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { client_info->ntt = 1; bond_info->rx_ntt = 1; } @@ -631,7 +631,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon if ((client_info->ip_src == arp->ip_src) && (client_info->ip_dst == arp->ip_dst)) { /* the entry is already assigned to this client */ - if (compare_ether_addr_64bits(arp->mac_dst, mac_bcast)) { + if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) { /* update mac address from arp */ memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); } @@ -666,7 +666,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); client_info->slave = assigned_slave; - if (compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) { + if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { client_info->ntt = 1; bond->alb_info.rx_ntt = 1; } else { @@ -1011,18 +1011,18 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla int perm_curr_diff; int perm_bond_diff; - perm_curr_diff = compare_ether_addr_64bits(slave->perm_hwaddr, - slave->dev->dev_addr); - perm_bond_diff = compare_ether_addr_64bits(slave->perm_hwaddr, - bond->dev->dev_addr); + perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr, + slave->dev->dev_addr); + perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr, + bond->dev->dev_addr); if (perm_curr_diff && perm_bond_diff) { struct slave *tmp_slave; int i, found = 0; bond_for_each_slave(bond, tmp_slave, i) { - if (!compare_ether_addr_64bits(slave->perm_hwaddr, - tmp_slave->dev->dev_addr)) { + if (ether_addr_equal_64bits(slave->perm_hwaddr, + tmp_slave->dev->dev_addr)) { found = 1; break; } @@ -1076,10 +1076,10 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav * check uniqueness of slave's mac address against the other * slaves in the bond. */ - if (compare_ether_addr_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) { + if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) { bond_for_each_slave(bond, tmp_slave1, i) { - if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr, - slave->dev->dev_addr)) { + if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr, + slave->dev->dev_addr)) { found = 1; break; } @@ -1101,8 +1101,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav bond_for_each_slave(bond, tmp_slave1, i) { found = 0; bond_for_each_slave(bond, tmp_slave2, j) { - if (!compare_ether_addr_64bits(tmp_slave1->perm_hwaddr, - tmp_slave2->dev->dev_addr)) { + if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr, + tmp_slave2->dev->dev_addr)) { found = 1; break; } @@ -1117,8 +1117,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav } if (!has_bond_addr) { - if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr, - bond->dev->dev_addr)) { + if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr, + bond->dev->dev_addr)) { has_bond_addr = tmp_slave1; } @@ -1259,7 +1259,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) case ETH_P_IP: { const struct iphdr *iph = ip_hdr(skb); - if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast) || + if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || (iph->daddr == ip_bcast) || (iph->protocol == IPPROTO_IGMP)) { do_tx_balance = 0; @@ -1273,7 +1273,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ - if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast)) { + if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { do_tx_balance = 0; break; } @@ -1281,7 +1281,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* IPv6 uses all-nodes multicast as an equivalent to * broadcasts in IPv4. */ - if (!compare_ether_addr_64bits(eth_data->h_dest, mac_v6_allmcast)) { + if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) { do_tx_balance = 0; break; } @@ -1605,8 +1605,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave struct slave *tmp_slave; /* find slave that is holding the bond's mac address */ bond_for_each_slave(bond, tmp_slave, i) { - if (!compare_ether_addr_64bits(tmp_slave->dev->dev_addr, - bond->dev->dev_addr)) { + if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr, + bond->dev->dev_addr)) { swap_slave = tmp_slave; break; } @@ -1683,8 +1683,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) swap_slave = NULL; bond_for_each_slave(bond, slave, i) { - if (!compare_ether_addr_64bits(slave->dev->dev_addr, - bond_dev->dev_addr)) { + if (ether_addr_equal_64bits(slave->dev->dev_addr, + bond_dev->dev_addr)) { swap_slave = slave; break; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bc13b3d7743..2ee8cf9e8a3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -549,9 +549,9 @@ down: * Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, set speed and duplex to -1, - * and return error. + * and return. */ -static int bond_update_speed_duplex(struct slave *slave) +static void bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; struct ethtool_cmd ecmd; @@ -563,24 +563,24 @@ static int bond_update_speed_duplex(struct slave *slave) res = __ethtool_get_settings(slave_dev, &ecmd); if (res < 0) - return -1; + return; slave_speed = ethtool_cmd_speed(&ecmd); if (slave_speed == 0 || slave_speed == ((__u32) -1)) - return -1; + return; switch (ecmd.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; default: - return -1; + return; } slave->speed = slave_speed; slave->duplex = ecmd.duplex; - return 0; + return; } /* @@ -1731,7 +1731,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) read_lock(&bond->lock); - new_slave->last_arp_rx = jiffies; + new_slave->last_arp_rx = jiffies - + (msecs_to_jiffies(bond->params.arp_interval) + 1); if (bond->params.miimon && !bond->params.use_carrier) { link_reporting = bond_check_dev_link(bond, slave_dev, 1); @@ -1756,22 +1757,30 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } /* check for initial state */ - if (!bond->params.miimon || - (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { - if (bond->params.updelay) { - pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n"); - new_slave->link = BOND_LINK_BACK; - new_slave->delay = bond->params.updelay; + if (bond->params.miimon) { + if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { + if (bond->params.updelay) { + new_slave->link = BOND_LINK_BACK; + new_slave->delay = bond->params.updelay; + } else { + new_slave->link = BOND_LINK_UP; + } } else { - pr_debug("Initial state of slave_dev is BOND_LINK_UP\n"); - new_slave->link = BOND_LINK_UP; + new_slave->link = BOND_LINK_DOWN; } - new_slave->jiffies = jiffies; + } else if (bond->params.arp_interval) { + new_slave->link = (netif_carrier_ok(slave_dev) ? + BOND_LINK_UP : BOND_LINK_DOWN); } else { - pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n"); - new_slave->link = BOND_LINK_DOWN; + new_slave->link = BOND_LINK_UP; } + if (new_slave->link != BOND_LINK_DOWN) + new_slave->jiffies = jiffies; + pr_debug("Initial state of slave_dev is BOND_LINK_%s\n", + new_slave->link == BOND_LINK_DOWN ? "DOWN" : + (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); + bond_update_speed_duplex(new_slave); if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { @@ -1957,7 +1966,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) write_lock_bh(&bond->lock); if (!bond->params.fail_over_mac) { - if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && + if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) && bond->slave_cnt > 1) pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", bond_dev->name, slave_dev->name, @@ -4826,12 +4835,9 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], - unsigned int *num_queues, - unsigned int *real_num_queues) +static int bond_get_tx_queues(struct net *net, struct nlattr *tb[]) { - *num_queues = tx_queues; - return 0; + return tx_queues; } static struct rtnl_link_ops bond_link_ops __read_mostly = { diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 9c1c8cd5223..1520814c77c 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -6,6 +6,8 @@ * License terms: GNU General Public License (GPL) version 2. */ +#define pr_fmt(fmt) KBUILD_MODNAME fmt + #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> @@ -19,6 +21,7 @@ #include <linux/if_arp.h> #include <linux/timer.h> #include <linux/rtnetlink.h> +#include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/caif_hsi.h> @@ -34,6 +37,10 @@ static int inactivity_timeout = 1000; module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); +static int aggregation_timeout = 1; +module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms."); + /* * HSI padding options. * Warning: must be a base of 2 (& operation used) and can not be zero ! @@ -86,24 +93,84 @@ static void cfhsi_inactivity_tout(unsigned long arg) queue_work(cfhsi->wq, &cfhsi->wake_down_work); } +static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, + const struct sk_buff *skb, + int direction) +{ + struct caif_payload_info *info; + int hpad, tpad, len; + + info = (struct caif_payload_info *)&skb->cb; + hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); + tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); + len = skb->len + hpad + tpad; + + if (direction > 0) + cfhsi->aggregation_len += len; + else if (direction < 0) + cfhsi->aggregation_len -= len; +} + +static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) +{ + int i; + + if (cfhsi->aggregation_timeout < 0) + return true; + + for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { + if (cfhsi->qhead[i].qlen) + return true; + } + + /* TODO: Use aggregation_len instead */ + if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) + return true; + + return false; +} + +static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < CFHSI_PRIO_LAST; ++i) { + skb = skb_dequeue(&cfhsi->qhead[i]); + if (skb) + break; + } + + return skb; +} + +static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) +{ + int i, len = 0; + for (i = 0; i < CFHSI_PRIO_LAST; ++i) + len += skb_queue_len(&cfhsi->qhead[i]); + return len; +} + static void cfhsi_abort_tx(struct cfhsi *cfhsi) { struct sk_buff *skb; for (;;) { spin_lock_bh(&cfhsi->lock); - skb = skb_dequeue(&cfhsi->qhead); + skb = cfhsi_dequeue(cfhsi); if (!skb) break; cfhsi->ndev->stats.tx_errors++; cfhsi->ndev->stats.tx_dropped++; + cfhsi_update_aggregation_stats(cfhsi, skb, -1); spin_unlock_bh(&cfhsi->lock); kfree_skb(skb); } cfhsi->tx_state = CFHSI_TX_STATE_IDLE; if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - mod_timer(&cfhsi->timer, + mod_timer(&cfhsi->inactivity_timer, jiffies + cfhsi->inactivity_timeout); spin_unlock_bh(&cfhsi->lock); } @@ -169,7 +236,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) struct sk_buff *skb; u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - skb = skb_dequeue(&cfhsi->qhead); + skb = cfhsi_dequeue(cfhsi); if (!skb) return 0; @@ -196,11 +263,16 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) pemb += hpad; /* Update network statistics. */ + spin_lock_bh(&cfhsi->lock); cfhsi->ndev->stats.tx_packets++; cfhsi->ndev->stats.tx_bytes += skb->len; + cfhsi_update_aggregation_stats(cfhsi, skb, -1); + spin_unlock_bh(&cfhsi->lock); /* Copy in embedded CAIF frame. */ skb_copy_bits(skb, 0, pemb, skb->len); + + /* Consume the SKB */ consume_skb(skb); skb = NULL; } @@ -214,7 +286,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) int tpad = 0; if (!skb) - skb = skb_dequeue(&cfhsi->qhead); + skb = cfhsi_dequeue(cfhsi); if (!skb) break; @@ -233,8 +305,11 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) pfrm += hpad; /* Update network statistics. */ + spin_lock_bh(&cfhsi->lock); cfhsi->ndev->stats.tx_packets++; cfhsi->ndev->stats.tx_bytes += skb->len; + cfhsi_update_aggregation_stats(cfhsi, skb, -1); + spin_unlock_bh(&cfhsi->lock); /* Copy in CAIF frame. */ skb_copy_bits(skb, 0, pfrm, skb->len); @@ -244,6 +319,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Update frame pointer. */ pfrm += skb->len + tpad; + + /* Consume the SKB */ consume_skb(skb); skb = NULL; @@ -258,8 +335,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) } /* Check if we can piggy-back another descriptor. */ - skb = skb_peek(&cfhsi->qhead); - if (skb) + if (cfhsi_can_send_aggregate(cfhsi)) desc->header |= CFHSI_PIGGY_DESC; else desc->header &= ~CFHSI_PIGGY_DESC; @@ -267,61 +343,71 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) return CFHSI_DESC_SZ + pld_len; } -static void cfhsi_tx_done(struct cfhsi *cfhsi) +static void cfhsi_start_tx(struct cfhsi *cfhsi) { - struct cfhsi_desc *desc = NULL; - int len = 0; - int res; + struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; + int len, res; dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; - desc = (struct cfhsi_desc *)cfhsi->tx_buf; - do { - /* - * Send flow on if flow off has been previously signalled - * and number of packets is below low water mark. - */ - spin_lock_bh(&cfhsi->lock); - if (cfhsi->flow_off_sent && - cfhsi->qhead.qlen <= cfhsi->q_low_mark && - cfhsi->cfdev.flowctrl) { - - cfhsi->flow_off_sent = 0; - cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); - } - spin_unlock_bh(&cfhsi->lock); - /* Create HSI frame. */ - do { - len = cfhsi_tx_frm(desc, cfhsi); - if (!len) { - spin_lock_bh(&cfhsi->lock); - if (unlikely(skb_peek(&cfhsi->qhead))) { - spin_unlock_bh(&cfhsi->lock); - continue; - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - /* Start inactivity timer. */ - mod_timer(&cfhsi->timer, - jiffies + cfhsi->inactivity_timeout); + len = cfhsi_tx_frm(desc, cfhsi); + if (!len) { + spin_lock_bh(&cfhsi->lock); + if (unlikely(cfhsi_tx_queue_len(cfhsi))) { spin_unlock_bh(&cfhsi->lock); - goto done; + res = -EAGAIN; + continue; } - } while (!len); + cfhsi->tx_state = CFHSI_TX_STATE_IDLE; + /* Start inactivity timer. */ + mod_timer(&cfhsi->inactivity_timer, + jiffies + cfhsi->inactivity_timeout); + spin_unlock_bh(&cfhsi->lock); + break; + } /* Set up new transfer. */ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); - if (WARN_ON(res < 0)) { + if (WARN_ON(res < 0)) dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", __func__, res); - } } while (res < 0); +} + +static void cfhsi_tx_done(struct cfhsi *cfhsi) +{ + dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); + + if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) + return; + + /* + * Send flow on if flow off has been previously signalled + * and number of packets is below low water mark. + */ + spin_lock_bh(&cfhsi->lock); + if (cfhsi->flow_off_sent && + cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && + cfhsi->cfdev.flowctrl) { + + cfhsi->flow_off_sent = 0; + cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); + } + + if (cfhsi_can_send_aggregate(cfhsi)) { + spin_unlock_bh(&cfhsi->lock); + cfhsi_start_tx(cfhsi); + } else { + mod_timer(&cfhsi->aggregation_timer, + jiffies + cfhsi->aggregation_timeout); + spin_unlock_bh(&cfhsi->lock); + } -done: return; } @@ -560,7 +646,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) /* Update inactivity timer if pending. */ spin_lock_bh(&cfhsi->lock); - mod_timer_pending(&cfhsi->timer, + mod_timer_pending(&cfhsi->inactivity_timer, jiffies + cfhsi->inactivity_timeout); spin_unlock_bh(&cfhsi->lock); @@ -793,12 +879,12 @@ wake_ack: spin_lock_bh(&cfhsi->lock); - /* Resume transmit if queue is not empty. */ - if (!skb_peek(&cfhsi->qhead)) { + /* Resume transmit if queues are not empty. */ + if (!cfhsi_tx_queue_len(cfhsi)) { dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", __func__); /* Start inactivity timer. */ - mod_timer(&cfhsi->timer, + mod_timer(&cfhsi->inactivity_timer, jiffies + cfhsi->inactivity_timeout); spin_unlock_bh(&cfhsi->lock); return; @@ -934,20 +1020,53 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv) wake_up_interruptible(&cfhsi->wake_down_wait); } +static void cfhsi_aggregation_tout(unsigned long arg) +{ + struct cfhsi *cfhsi = (struct cfhsi *)arg; + + dev_dbg(&cfhsi->ndev->dev, "%s.\n", + __func__); + + cfhsi_start_tx(cfhsi); +} + static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) { struct cfhsi *cfhsi = NULL; int start_xfer = 0; int timer_active; + int prio; if (!dev) return -EINVAL; cfhsi = netdev_priv(dev); + switch (skb->priority) { + case TC_PRIO_BESTEFFORT: + case TC_PRIO_FILLER: + case TC_PRIO_BULK: + prio = CFHSI_PRIO_BEBK; + break; + case TC_PRIO_INTERACTIVE_BULK: + prio = CFHSI_PRIO_VI; + break; + case TC_PRIO_INTERACTIVE: + prio = CFHSI_PRIO_VO; + break; + case TC_PRIO_CONTROL: + default: + prio = CFHSI_PRIO_CTL; + break; + } + spin_lock_bh(&cfhsi->lock); - skb_queue_tail(&cfhsi->qhead, skb); + /* Update aggregation statistics */ + cfhsi_update_aggregation_stats(cfhsi, skb, 1); + + /* Queue the SKB */ + skb_queue_tail(&cfhsi->qhead[prio], skb); /* Sanity check; xmit should not be called after unregister_netdev */ if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { @@ -958,7 +1077,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) /* Send flow off if number of packets is above high water mark. */ if (!cfhsi->flow_off_sent && - cfhsi->qhead.qlen > cfhsi->q_high_mark && + cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && cfhsi->cfdev.flowctrl) { cfhsi->flow_off_sent = 1; cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); @@ -970,12 +1089,18 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) } if (!start_xfer) { + /* Send aggregate if it is possible */ + bool aggregate_ready = + cfhsi_can_send_aggregate(cfhsi) && + del_timer(&cfhsi->aggregation_timer) > 0; spin_unlock_bh(&cfhsi->lock); + if (aggregate_ready) + cfhsi_start_tx(cfhsi); return 0; } /* Delete inactivity timer if started. */ - timer_active = del_timer_sync(&cfhsi->timer); + timer_active = del_timer_sync(&cfhsi->inactivity_timer); spin_unlock_bh(&cfhsi->lock); @@ -1004,28 +1129,11 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static int cfhsi_open(struct net_device *dev) -{ - netif_wake_queue(dev); - - return 0; -} - -static int cfhsi_close(struct net_device *dev) -{ - netif_stop_queue(dev); - - return 0; -} - -static const struct net_device_ops cfhsi_ops = { - .ndo_open = cfhsi_open, - .ndo_stop = cfhsi_close, - .ndo_start_xmit = cfhsi_xmit -}; +static const struct net_device_ops cfhsi_ops; static void cfhsi_setup(struct net_device *dev) { + int i; struct cfhsi *cfhsi = netdev_priv(dev); dev->features = 0; dev->netdev_ops = &cfhsi_ops; @@ -1034,7 +1142,8 @@ static void cfhsi_setup(struct net_device *dev) dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; dev->tx_queue_len = 0; dev->destructor = free_netdev; - skb_queue_head_init(&cfhsi->qhead); + for (i = 0; i < CFHSI_PRIO_LAST; ++i) + skb_queue_head_init(&cfhsi->qhead[i]); cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; cfhsi->cfdev.use_frag = false; cfhsi->cfdev.use_stx = false; @@ -1046,7 +1155,7 @@ int cfhsi_probe(struct platform_device *pdev) { struct cfhsi *cfhsi = NULL; struct net_device *ndev; - struct cfhsi_dev *dev; + int res; ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup); @@ -1057,6 +1166,34 @@ int cfhsi_probe(struct platform_device *pdev) cfhsi->ndev = ndev; cfhsi->pdev = pdev; + /* Assign the HSI device. */ + cfhsi->dev = pdev->dev.platform_data; + + /* Assign the driver to this HSI device. */ + cfhsi->dev->drv = &cfhsi->drv; + + /* Register network device. */ + res = register_netdev(ndev); + if (res) { + dev_err(&ndev->dev, "%s: Registration error: %d.\n", + __func__, res); + free_netdev(ndev); + } + /* Add CAIF HSI device to list. */ + spin_lock(&cfhsi_list_lock); + list_add_tail(&cfhsi->list, &cfhsi_list); + spin_unlock(&cfhsi_list_lock); + + return res; +} + +static int cfhsi_open(struct net_device *ndev) +{ + struct cfhsi *cfhsi = netdev_priv(ndev); + int res; + + clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); + /* Initialize state vaiables. */ cfhsi->tx_state = CFHSI_TX_STATE_IDLE; cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; @@ -1066,12 +1203,6 @@ int cfhsi_probe(struct platform_device *pdev) cfhsi->q_low_mark = LOW_WATER_MARK; cfhsi->q_high_mark = HIGH_WATER_MARK; - /* Assign the HSI device. */ - dev = (struct cfhsi_dev *)pdev->dev.platform_data; - cfhsi->dev = dev; - - /* Assign the driver to this HSI device. */ - dev->drv = &cfhsi->drv; /* * Allocate a TX buffer with the size of a HSI packet descriptors @@ -1111,6 +1242,9 @@ int cfhsi_probe(struct platform_device *pdev) cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; } + /* Initialize aggregation timeout */ + cfhsi->aggregation_timeout = aggregation_timeout; + /* Initialize recieve vaiables. */ cfhsi->rx_ptr = cfhsi->rx_buf; cfhsi->rx_len = CFHSI_DESC_SZ; @@ -1136,9 +1270,9 @@ int cfhsi_probe(struct platform_device *pdev) clear_bit(CFHSI_AWAKE, &cfhsi->bits); /* Create work thread. */ - cfhsi->wq = create_singlethread_workqueue(pdev->name); + cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name); if (!cfhsi->wq) { - dev_err(&ndev->dev, "%s: Failed to create work queue.\n", + dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n", __func__); res = -ENODEV; goto err_create_wq; @@ -1150,18 +1284,17 @@ int cfhsi_probe(struct platform_device *pdev) init_waitqueue_head(&cfhsi->flush_fifo_wait); /* Setup the inactivity timer. */ - init_timer(&cfhsi->timer); - cfhsi->timer.data = (unsigned long)cfhsi; - cfhsi->timer.function = cfhsi_inactivity_tout; + init_timer(&cfhsi->inactivity_timer); + cfhsi->inactivity_timer.data = (unsigned long)cfhsi; + cfhsi->inactivity_timer.function = cfhsi_inactivity_tout; /* Setup the slowpath RX timer. */ init_timer(&cfhsi->rx_slowpath_timer); cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; - - /* Add CAIF HSI device to list. */ - spin_lock(&cfhsi_list_lock); - list_add_tail(&cfhsi->list, &cfhsi_list); - spin_unlock(&cfhsi_list_lock); + /* Setup the aggregation timer. */ + init_timer(&cfhsi->aggregation_timer); + cfhsi->aggregation_timer.data = (unsigned long)cfhsi; + cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; /* Activate HSI interface. */ res = cfhsi->dev->cfhsi_up(cfhsi->dev); @@ -1175,21 +1308,10 @@ int cfhsi_probe(struct platform_device *pdev) /* Flush FIFO */ res = cfhsi_flush_fifo(cfhsi); if (res) { - dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n", + dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n", __func__, res); goto err_net_reg; } - - /* Register network device. */ - res = register_netdev(ndev); - if (res) { - dev_err(&ndev->dev, "%s: Registration error: %d.\n", - __func__, res); - goto err_net_reg; - } - - netif_stop_queue(ndev); - return res; err_net_reg: @@ -1203,18 +1325,14 @@ int cfhsi_probe(struct platform_device *pdev) err_alloc_rx: kfree(cfhsi->tx_buf); err_alloc_tx: - free_netdev(ndev); - return res; } -static void cfhsi_shutdown(struct cfhsi *cfhsi) +static int cfhsi_close(struct net_device *ndev) { + struct cfhsi *cfhsi = netdev_priv(ndev); u8 *tx_buf, *rx_buf, *flip_buf; - /* Stop TXing */ - netif_tx_stop_all_queues(cfhsi->ndev); - /* going to shutdown driver */ set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); @@ -1222,8 +1340,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) flush_workqueue(cfhsi->wq); /* Delete timers if pending */ - del_timer_sync(&cfhsi->timer); + del_timer_sync(&cfhsi->inactivity_timer); del_timer_sync(&cfhsi->rx_slowpath_timer); + del_timer_sync(&cfhsi->aggregation_timer); /* Cancel pending RX request (if any) */ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); @@ -1241,15 +1360,19 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) /* Deactivate interface */ cfhsi->dev->cfhsi_down(cfhsi->dev); - /* Finally unregister the network device. */ - unregister_netdev(cfhsi->ndev); - /* Free buffers. */ kfree(tx_buf); kfree(rx_buf); kfree(flip_buf); + return 0; } +static const struct net_device_ops cfhsi_ops = { + .ndo_open = cfhsi_open, + .ndo_stop = cfhsi_close, + .ndo_start_xmit = cfhsi_xmit +}; + int cfhsi_remove(struct platform_device *pdev) { struct list_head *list_node; @@ -1266,10 +1389,6 @@ int cfhsi_remove(struct platform_device *pdev) /* Remove from list. */ list_del(list_node); spin_unlock(&cfhsi_list_lock); - - /* Shutdown driver. */ - cfhsi_shutdown(cfhsi); - return 0; } } @@ -1300,8 +1419,7 @@ static void __exit cfhsi_exit_module(void) list_del(list_node); spin_unlock(&cfhsi_list_lock); - /* Shutdown driver. */ - cfhsi_shutdown(cfhsi); + unregister_netdevice(cfhsi->ndev); spin_lock(&cfhsi_list_lock); } @@ -1326,8 +1444,6 @@ static int __init cfhsi_init_module(void) goto err_dev_register; } - return result; - err_dev_register: return result; } diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c index 5b2041319a3..bc497d71885 100644 --- a/drivers/net/caif/caif_shmcore.c +++ b/drivers/net/caif/caif_shmcore.c @@ -13,6 +13,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/if_arp.h> +#include <linux/io.h> #include <net/caif/caif_device.h> #include <net/caif/caif_shm.h> @@ -647,6 +648,9 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev) if (pshm_dev->shm_loopback) tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; else + /* + * FIXME: the result of ioremap is not a pointer - arnd + */ tx_buf->desc_vptr = ioremap(tx_buf->phy_addr, TX_BUF_SZ); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index c5fe3a3db8c..f03d7a481a8 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -687,18 +687,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) if (priv->do_get_state) priv->do_get_state(dev, &state); - NLA_PUT_U32(skb, IFLA_CAN_STATE, state); - NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm); - NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms); - NLA_PUT(skb, IFLA_CAN_BITTIMING, - sizeof(priv->bittiming), &priv->bittiming); - NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); - if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec)) - NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec); - if (priv->bittiming_const) - NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, - sizeof(*priv->bittiming_const), priv->bittiming_const); - + if (nla_put_u32(skb, IFLA_CAN_STATE, state) || + nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || + nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || + nla_put(skb, IFLA_CAN_BITTIMING, + sizeof(priv->bittiming), &priv->bittiming) || + nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) || + (priv->do_get_berr_counter && + !priv->do_get_berr_counter(dev, &bec) && + nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || + (priv->bittiming_const && + nla_put(skb, IFLA_CAN_BITTIMING_CONST, + sizeof(*priv->bittiming_const), priv->bittiming_const))) + goto nla_put_failure; return 0; nla_put_failure: @@ -714,9 +715,9 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - NLA_PUT(skb, IFLA_INFO_XSTATS, - sizeof(priv->can_stats), &priv->can_stats); - + if (nla_put(skb, IFLA_INFO_XSTATS, + sizeof(priv->can_stats), &priv->can_stats)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 2bb215e00eb..1226297e767 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -1274,17 +1274,7 @@ static struct pci_driver pch_can_pci_driver = { .resume = pch_can_resume, }; -static int __init pch_can_pci_init(void) -{ - return pci_register_driver(&pch_can_pci_driver); -} -module_init(pch_can_pci_init); - -static void __exit pch_can_pci_exit(void) -{ - pci_unregister_driver(&pch_can_pci_driver); -} -module_exit(pch_can_pci_exit); +module_pci_driver(pch_can_pci_driver); MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 36f4f9780c3..5c6d412bafb 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -371,16 +371,4 @@ static struct pci_driver ems_pci_driver = { .remove = ems_pci_del_card, }; -static int __init ems_pci_init(void) -{ - return pci_register_driver(&ems_pci_driver); -} - -static void __exit ems_pci_exit(void) -{ - pci_unregister_driver(&ems_pci_driver); -} - -module_init(ems_pci_init); -module_exit(ems_pci_exit); - +module_pci_driver(ems_pci_driver); diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index ed004cebd31..23ed6ea4c7c 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -397,15 +397,4 @@ static struct pci_driver kvaser_pci_driver = { .remove = __devexit_p(kvaser_pci_remove_one), }; -static int __init kvaser_pci_init(void) -{ - return pci_register_driver(&kvaser_pci_driver); -} - -static void __exit kvaser_pci_exit(void) -{ - pci_unregister_driver(&kvaser_pci_driver); -} - -module_init(kvaser_pci_init); -module_exit(kvaser_pci_exit); +module_pci_driver(kvaser_pci_driver); diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 5f92b865f64..f0a12962f7b 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -749,14 +749,4 @@ static struct pci_driver peak_pci_driver = { .remove = __devexit_p(peak_pci_remove), }; -static int __init peak_pci_init(void) -{ - return pci_register_driver(&peak_pci_driver); -} -module_init(peak_pci_init); - -static void __exit peak_pci_exit(void) -{ - pci_unregister_driver(&peak_pci_driver); -} -module_exit(peak_pci_exit); +module_pci_driver(peak_pci_driver); diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a227586ddd5..8bc95982840 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -609,15 +609,4 @@ static struct pci_driver plx_pci_driver = { .remove = plx_pci_del_card, }; -static int __init plx_pci_init(void) -{ - return pci_register_driver(&plx_pci_driver); -} - -static void __exit plx_pci_exit(void) -{ - pci_unregister_driver(&plx_pci_driver); -} - -module_init(plx_pci_init); -module_exit(plx_pci_exit); +module_pci_driver(plx_pci_driver); diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 41719da2e17..1a8eef2c3d5 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -69,7 +69,6 @@ #define TX_TIMEOUT (400*HZ/1000) #include <linux/module.h> -#include <linux/mca.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/string.h> @@ -102,7 +101,7 @@ static int el3_debug = 2; #endif /* Used to do a global count of all the cards in the system. Must be - * a global variable so that the mca/eisa probe routines can increment + * a global variable so that the eisa probe routines can increment * it */ static int el3_cards = 0; #define EL3_MAX_CARDS 8 @@ -163,7 +162,7 @@ enum RxFilter { */ #define SKB_QUEUE_SIZE 64 -enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; +enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_EISA }; struct el3_private { spinlock_t lock; @@ -505,41 +504,6 @@ static struct eisa_driver el3_eisa_driver = { static int eisa_registered; #endif -#ifdef CONFIG_MCA -static int el3_mca_probe(struct device *dev); - -static short el3_mca_adapter_ids[] __initdata = { - 0x627c, - 0x627d, - 0x62db, - 0x62f6, - 0x62f7, - 0x0000 -}; - -static char *el3_mca_adapter_names[] __initdata = { - "3Com 3c529 EtherLink III (10base2)", - "3Com 3c529 EtherLink III (10baseT)", - "3Com 3c529 EtherLink III (test mode)", - "3Com 3c529 EtherLink III (TP or coax)", - "3Com 3c529 EtherLink III (TP)", - NULL -}; - -static struct mca_driver el3_mca_driver = { - .id_table = el3_mca_adapter_ids, - .driver = { - .name = "3c529", - .bus = &mca_bus_type, - .probe = el3_mca_probe, - .remove = __devexit_p(el3_device_remove), - .suspend = el3_suspend, - .resume = el3_resume, - }, -}; -static int mca_registered; -#endif /* CONFIG_MCA */ - static const struct net_device_ops netdev_ops = { .ndo_open = el3_open, .ndo_stop = el3_close, @@ -600,76 +564,6 @@ static void el3_common_remove (struct net_device *dev) free_netdev (dev); } -#ifdef CONFIG_MCA -static int __init el3_mca_probe(struct device *device) -{ - /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, - * heavily modified by Chris Beauregard - * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA - * probing. - * - * redone for multi-card detection by ZP Gu (zpg@castle.net) - * now works as a module */ - - short i; - int ioaddr, irq, if_port; - __be16 phys_addr[3]; - struct net_device *dev = NULL; - u_char pos4, pos5; - struct mca_device *mdev = to_mca_device(device); - int slot = mdev->slot; - int err; - - pos4 = mca_device_read_stored_pos(mdev, 4); - pos5 = mca_device_read_stored_pos(mdev, 5); - - ioaddr = ((short)((pos4&0xfc)|0x02)) << 8; - irq = pos5 & 0x0f; - - - pr_info("3c529: found %s at slot %d\n", - el3_mca_adapter_names[mdev->index], slot + 1); - - /* claim the slot */ - strncpy(mdev->name, el3_mca_adapter_names[mdev->index], - sizeof(mdev->name)); - mca_device_set_claim(mdev, 1); - - if_port = pos4 & 0x03; - - irq = mca_device_transform_irq(mdev, irq); - ioaddr = mca_device_transform_ioport(mdev, ioaddr); - if (el3_debug > 2) { - pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); - } - EL3WINDOW(0); - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - - dev = alloc_etherdev(sizeof (struct el3_private)); - if (dev == NULL) { - release_region(ioaddr, EL3_IO_EXTENT); - return -ENOMEM; - } - - netdev_boot_setup_check(dev); - - el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA); - dev_set_drvdata(device, dev); - err = el3_common_init(dev); - - if (err) { - dev_set_drvdata(device, NULL); - free_netdev(dev); - return -ENOMEM; - } - - el3_devs[el3_cards++] = dev; - return 0; -} - -#endif /* CONFIG_MCA */ - #ifdef CONFIG_EISA static int __init el3_eisa_probe (struct device *device) { @@ -1547,11 +1441,6 @@ static int __init el3_init_module(void) if (!ret) eisa_registered = 1; #endif -#ifdef CONFIG_MCA - ret = mca_register_driver(&el3_mca_driver); - if (!ret) - mca_registered = 1; -#endif #ifdef CONFIG_PNP if (pnp_registered) @@ -1563,10 +1452,6 @@ static int __init el3_init_module(void) if (eisa_registered) ret = 0; #endif -#ifdef CONFIG_MCA - if (mca_registered) - ret = 0; -#endif return ret; } @@ -1584,10 +1469,6 @@ static void __exit el3_cleanup_module(void) if (eisa_registered) eisa_driver_unregister(&el3_eisa_driver); #endif -#ifdef CONFIG_MCA - if (mca_registered) - mca_unregister_driver(&el3_mca_driver); -#endif } module_init (el3_init_module); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index e04ade44424..2e538676924 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -60,6 +60,7 @@ config PCMCIA_AXNET config AX88796 tristate "ASIX AX88796 NE2000 clone support" depends on (ARM || MIPS || SUPERH) + select CRC32 select PHYLIB select MDIO_BITBANG ---help--- @@ -181,18 +182,6 @@ config NE2000 To compile this driver as a module, choose M here. The module will be called ne. -config NE2_MCA - tristate "NE/2 (ne2000 MCA version) support" - depends on MCA_LEGACY - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called ne2. - config NE2K_PCI tristate "PCI NE2000 and clones support (see help)" depends on PCI @@ -266,18 +255,6 @@ config STNIC If unsure, say N. -config ULTRAMCA - tristate "SMC Ultra MCA support" - depends on MCA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type and are running - an MCA based system (PS/2), say Y and read the Ethernet-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called smc-mca. - config ULTRA tristate "SMC Ultra support" depends on ISA diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index 3337d7fb434..d13790b7fd2 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -24,6 +24,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o -obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o obj-$(CONFIG_WD80x3) += wd.o 8390.o obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 11476ca95e9..203ff9dccad 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -501,6 +501,7 @@ static const struct ethtool_ops ax_ethtool_ops = { .get_settings = ax_get_settings, .set_settings = ax_set_settings, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; #ifdef CONFIG_AX88796_93CX6 diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index dbefd5658c1..8322c54972f 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -635,6 +635,7 @@ static const struct ethtool_ops etherh_ethtool_ops = { .get_settings = etherh_get_settings, .set_settings = etherh_set_settings, .get_drvinfo = etherh_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops etherh_netdev_ops = { diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c deleted file mode 100644 index ef85839f43d..00000000000 --- a/drivers/net/ethernet/8390/ne2.c +++ /dev/null @@ -1,798 +0,0 @@ -/* ne2.c: A NE/2 Ethernet Driver for Linux. */ -/* - Based on the NE2000 driver written by Donald Becker (1992-94). - modified by Wim Dumon (Apr 1996) - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as wimpie@linux.cc.kuleuven.ac.be - - Currently supported: NE/2 - This patch was never tested on other MCA-ethernet adapters, but it - might work. Just give it a try and let me know if you have problems. - Also mail me if it really works, please! - - Changelog: - Mon Feb 3 16:26:02 MET 1997 - - adapted the driver to work with the 2.1.25 kernel - - multiple ne2 support (untested) - - module support (untested) - - Fri Aug 28 00:18:36 CET 1998 (David Weinehall) - - fixed a few minor typos - - made the MODULE_PARM conditional (it only works with the v2.1.x kernels) - - fixed the module support (Now it's working...) - - Mon Sep 7 19:01:44 CET 1998 (David Weinehall) - - added support for Arco Electronics AE/2-card (experimental) - - Mon Sep 14 09:53:42 CET 1998 (David Weinehall) - - added support for Compex ENET-16MC/P (experimental) - - Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren) - - Miscellaneous bugfixes - - Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson) - - Cleanup - - Wed Sep 23 14:33:34 CET 1998 (David Weinehall) - - Restructuring and rewriting for v2.1.x compliance - - Wed Oct 14 17:19:21 CET 1998 (David Weinehall) - - Added code that unregisters irq and proc-info - - Version# bump - - Mon Nov 16 15:28:23 CET 1998 (Wim Dumon) - - pass 'dev' as last parameter of request_irq in stead of 'NULL' - - Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold) - - added support for the D-Link DE-320CT - - * WARNING - ------- - This is alpha-test software. It is not guaranteed to work. As a - matter of fact, I'm quite sure there are *LOTS* of bugs in here. I - would like to hear from you if you use this driver, even if it works. - If it doesn't work, be sure to send me a mail with the problems ! -*/ - -static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/mca-legacy.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#include "8390.h" - -#define DRV_NAME "ne2" - -/* Some defines that people can play with if so inclined. */ - -/* Do we perform extra sanity checks on stuff ? */ -/* #define NE_SANITY_CHECK */ - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - - -/* ---- No user-serviceable parts below ---- */ - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT 0x30 - -#define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -/* From the .ADF file: */ -static unsigned int addresses[7] __initdata = - {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0}; -static int irqs[4] __initdata = {3, 4, 5, 9}; - -/* From the D-Link ADF file: */ -static unsigned int dlink_addresses[4] __initdata = - {0x300, 0x320, 0x340, 0x360}; -static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15}; - -struct ne2_adapters_t { - unsigned int id; - char *name; -}; - -static struct ne2_adapters_t ne2_adapters[] __initdata = { - { 0x6354, "Arco Ethernet Adapter AE/2" }, - { 0x70DE, "Compex ENET-16 MC/P" }, - { 0x7154, "Novell Ethernet Adapter NE/2" }, - { 0x56ea, "D-Link DE-320CT" }, - { 0x0000, NULL } -}; - -extern int netcard_probe(struct net_device *dev); - -static int ne2_probe1(struct net_device *dev, int slot); - -static void ne_reset_8390(struct net_device *dev); -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); - - -/* - * special code to read the DE-320's MAC address EEPROM. In contrast to a - * standard NE design, this is a serial EEPROM (93C46) that has to be read - * bit by bit. The EEPROM cotrol port at base + 0x1e has the following - * layout: - * - * Bit 0 = Data out (read from EEPROM) - * Bit 1 = Data in (write to EEPROM) - * Bit 2 = Clock - * Bit 3 = Chip Select - * Bit 7 = ~50 kHz clock for defined delays - * - */ - -static void __init dlink_put_eeprom(unsigned char value, unsigned int addr) -{ - int z; - unsigned char v1, v2; - - /* write the value to the NIC EEPROM register */ - - outb(value, addr + 0x1e); - - /* now wait the clock line to toggle twice. Effectively, we are - waiting (at least) for one clock cycle */ - - for (z = 0; z < 2; z++) { - do { - v1 = inb(addr + 0x1e); - v2 = inb(addr + 0x1e); - } - while (!((v1 ^ v2) & 0x80)); - } -} - -static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr) -{ - /* shift data bit into correct position */ - - bit = bit << 1; - - /* write value, keep clock line high for two cycles */ - - dlink_put_eeprom(0x09 | bit, addr); - dlink_put_eeprom(0x0d | bit, addr); - dlink_put_eeprom(0x0d | bit, addr); - dlink_put_eeprom(0x09 | bit, addr); -} - -static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr) -{ - int z; - - /* adjust bits so that they are left-aligned in a 16-bit-word */ - - value = value << (16 - len); - - /* shift bits out to the EEPROM */ - - for (z = 0; z < len; z++) { - dlink_send_eeprom_bit((value & 0x8000) >> 15, addr); - value = value << 1; - } -} - -static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr) -{ - int z; - unsigned int value = 0; - - /* pull the CS line low for a moment. This resets the EEPROM- - internal logic, and makes it ready for a new command. */ - - dlink_put_eeprom(0x01, addr); - dlink_put_eeprom(0x09, addr); - - /* send one start bit, read command (1 - 0), plus the address to - the EEPROM */ - - dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr); - - /* get the data word. We clock by sending 0s to the EEPROM, which - get ignored during the read process */ - - for (z = 0; z < 16; z++) { - dlink_send_eeprom_bit(0, addr); - value = (value << 1) | (inb(addr + 0x1e) & 0x01); - } - - return value; -} - -/* - * Note that at boot, this probe only picks up one card at a time. - */ - -static int __init do_ne2_probe(struct net_device *dev) -{ - static int current_mca_slot = -1; - int i; - int adapter_found = 0; - - /* Do not check any supplied i/o locations. - POS registers usually don't fail :) */ - - /* MCA cards have POS registers. - Autodetecting MCA cards is extremely simple. - Just search for the card. */ - - for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) { - current_mca_slot = - mca_find_unused_adapter(ne2_adapters[i].id, 0); - - if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) { - int res; - mca_set_adapter_name(current_mca_slot, - ne2_adapters[i].name); - mca_mark_as_used(current_mca_slot); - - res = ne2_probe1(dev, current_mca_slot); - if (res) - mca_mark_as_unused(current_mca_slot); - return res; - } - } - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init ne2_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_ne2_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int ne2_procinfo(char *buf, int slot, struct net_device *dev) -{ - int len=0; - - len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); - len += sprintf(buf+len, "Driver written by Wim Dumon "); - len += sprintf(buf+len, "<wimpie@kotnet.org>\n"); - len += sprintf(buf+len, "Modified by "); - len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n"); - len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n"); - len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); - len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); - len += sprintf(buf+len, "IRQ : %d\n", dev->irq); - len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr); - - return len; -} - -static int __init ne2_probe1(struct net_device *dev, int slot) -{ - int i, base_addr, irq, retval; - unsigned char POS; - unsigned char SA_prom[32]; - const char *name = "NE/2"; - int start_page, stop_page; - static unsigned version_printed; - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("NE/2 ethercard found in slot %d:", slot); - - /* Read base IO and IRQ from the POS-registers */ - POS = mca_read_stored_pos(slot, 2); - if(!(POS % 2)) { - printk(" disabled.\n"); - return -ENODEV; - } - - /* handle different POS register structure for D-Link card */ - - if (mca_read_stored_pos(slot, 0) == 0xea) { - base_addr = dlink_addresses[(POS >> 5) & 0x03]; - irq = dlink_irqs[(POS >> 2) & 0x07]; - } - else { - i = (POS & 0xE)>>1; - /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0" - " en zou het moeten werken -> %d\n", i); - The above line was for remote testing, thanx to sdog ... */ - base_addr = addresses[i - 1]; - irq = irqs[(POS & 0x60)>>5]; - } - - if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME)) - return -EBUSY; - -#ifdef DEBUG - printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS, - base_addr, irq); -#endif - -#ifndef CRYNWR_WAY - /* Reset the card the way they do it in the Crynwr packet driver */ - for (i=0; i<8; i++) - outb(0x0, base_addr + NE_RESET); - inb(base_addr + NE_RESET); - outb(0x21, base_addr + NE_CMD); - if (inb(base_addr + NE_CMD) != 0x21) { - printk("NE/2 adapter not responding\n"); - retval = -ENODEV; - goto out; - } - - /* In the crynwr sources they do a RAM-test here. I skip it. I suppose - my RAM is okay. Suppose your memory is broken. Then this test - should fail and you won't be able to use your card. But if I do not - test, you won't be able to use your card, neither. So this test - won't help you. */ - -#else /* _I_ never tested it this way .. Go ahead and try ...*/ - /* Reset card. Who knows what dain-bramaged state it was left in. */ - { - unsigned long reset_start_time = jiffies; - - /* DON'T change these to inb_p/outb_p or reset will fail on - clones.. */ - outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); - - while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(" not found (no reset ack).\n"); - retval = -ENODEV; - goto out; - } - - outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */ - } -#endif - - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to - NS8390p_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct { - unsigned char value, offset; - } program_seq[] = { - /* Select page 0 */ - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, - {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, base_addr + - program_seq[i].offset); - - } - for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) { - SA_prom[i] = inb(base_addr + NE_DATAPORT); - } - - /* I don't know whether the previous sequence includes the general - board reset procedure, so better don't omit it and just overwrite - the garbage read from a DE-320 with correct stuff. */ - - if (mca_read_stored_pos(slot, 0) == 0xea) { - unsigned int v; - - for (i = 0; i < 3; i++) { - v = dlink_get_eeprom(i, base_addr); - SA_prom[(i << 1) ] = v & 0xff; - SA_prom[(i << 1) + 1] = (v >> 8) & 0xff; - } - } - - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - dev->irq=irq; - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" unable to get IRQ %d (irqval=%d).\n", - dev->irq, retval); - goto out; - } - - dev->base_addr = base_addr; - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; - - printk(" %pM\n", dev->dev_addr); - - printk("%s: %s found at %#x, using IRQ %d.\n", - dev->name, name, base_addr, dev->irq); - - mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (2 == 2); - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne_reset_8390; - ei_status.block_input = &ne_block_input; - ei_status.block_output = &ne_block_output; - ei_status.get_8390_hdr = &ne_get_8390_hdr; - - ei_status.priv = slot; - - dev->netdev_ops = &eip_netdev_ops; - NS8390p_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - mca_set_adapter_procfn( ei_status.priv, NULL, NULL); - free_irq(dev->irq, dev); -out: - release_region(base_addr, NE_IO_EXTENT); - return retval; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ -static void ne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - if (ei_debug > 1) - printk("resetting the 8390 t=%ld...", jiffies); - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk("%s: ne_reset_8390() did not complete.\n", - dev->name); - break; - } - outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page) -{ - - int nic_base = dev->base_addr; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.word16) - insw(NE_BASE + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)>>1); - else - insb(NE_BASE + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)); - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for - hints. The NEx000 doesn't share the on-board packet memory -- you have - to put the packet out through the "remote DMA" dataport using outb. */ - -static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ -#ifdef NE_SANITY_CHECK - int xfer_count = count; -#endif - int nic_base = dev->base_addr; - char *buf = skb->data; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - insw(NE_BASE + NE_DATAPORT,buf,count>>1); - if (count & 0x01) { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); -#ifdef NE_SANITY_CHECK - xfer_count++; -#endif - } - } else { - insb(NE_BASE + NE_DATAPORT, buf, count); - } - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. If you see - this message you either 1) have a slightly incompatible clone - or 2) have noise/speed problems with your bus. */ - if (ei_debug > 1) { /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == low) - break; - } while (--tries > 0); - if (tries <= 0) - printk("%s: RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - int nic_base = NE_BASE; - unsigned long dma_start; -#ifdef NE_SANITY_CHECK - int retries = 0; -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - -#ifdef NE_SANITY_CHECK -retry: -#endif - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - outb_p(0x42, nic_base + EN0_RCNTLO); - outb_p(0x00, nic_base + EN0_RCNTHI); - outb_p(0x42, nic_base + EN0_RSARLO); - outb_p(0x00, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - /* Make certain that the dummy read has occurred. */ - SLOW_DOWN_IO; - SLOW_DOWN_IO; - SLOW_DOWN_IO; -#endif - - outb_p(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - outsw(NE_BASE + NE_DATAPORT, buf, count>>1); - } else { - outsb(NE_BASE + NE_DATAPORT, buf, count); - } - - dma_start = jiffies; - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. */ - - if (ei_debug > 1) { /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - if (tries <= 0) { - printk("%s: Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk("%s: timeout waiting for Tx RDC.\n", dev->name); - ne_reset_8390(dev); - NS8390p_init(dev, 1); - break; - } - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - - -#ifdef MODULE -#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ -static struct net_device *dev_ne[MAX_NE_CARDS]; -static int io[MAX_NE_CARDS]; -static int irq[MAX_NE_CARDS]; -static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(bad, int, NULL, 0); -MODULE_PARM_DESC(io, "(ignored)"); -MODULE_PARM_DESC(irq, "(ignored)"); -MODULE_PARM_DESC(bad, "(ignored)"); - -/* Module code fixed by David Weinehall */ - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->mem_end = bad[this_dev]; - dev->base_addr = io[this_dev]; - if (do_ne2_probe(dev) == 0) { - dev_ne[found++] = dev; - continue; - } - free_netdev(dev); - break; - } - if (found) - return 0; - printk(KERN_WARNING "ne2.c: No NE/2 card found\n"); - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - mca_mark_as_unused(ei_status.priv); - mca_set_adapter_procfn( ei_status.priv, NULL, NULL); - free_irq(dev->irq, dev); - release_region(dev->base_addr, NE_IO_EXTENT); -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = dev_ne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c deleted file mode 100644 index 7a68590f280..00000000000 --- a/drivers/net/ethernet/8390/smc-mca.c +++ /dev/null @@ -1,575 +0,0 @@ -/* smc-mca.c: A SMC Ultra ethernet driver for linux. */ -/* - Most of this driver, except for ultramca_probe is nearly - verbatim from smc-ultra.c by Donald Becker. The rest is - written and copyright 1996 by David Weis, weisd3458@uni.edu - - This is a driver for the SMC Ultra and SMC EtherEZ ethercards. - - This driver uses the cards in the 8390-compatible, shared memory mode. - Most of the run-time complexity is handled by the generic code in - 8390.c. - - This driver enables the shared memory only when doing the actual data - transfers to avoid a bug in early version of the card that corrupted - data transferred by a AHA1542. - - This driver does not support the programmed-I/O data transfer mode of - the EtherEZ. That support (if available) is smc-ez.c. Nor does it - use the non-8390-compatible "Altego" mode. (No support currently planned.) - - Changelog: - - Paul Gortmaker : multiple card support for module users. - David Weis : Micro Channel-ized it. - Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A - Christopher Turcksin : Changed MCA-probe so that multiple adapters are - found correctly (Jul 16, 1997) - Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997) - Tom Sightler : Fixed minor detection bug caused by above merge - Tom Sightler : Added support for three more Western Digital - MCA-adapters - Tom Sightler : Added support for 2.2.x mca_find_unused_adapter - Hartmut Schmidt : - Modified parameter detection to handle each - card differently depending on a switch-list - - 'card_ver' removed from the adapter list - - Some minor bug fixes -*/ - -#include <linux/mca.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> - -#include <asm/io.h> - -#include "8390.h" - -#define DRV_NAME "smc-mca" - -static int ultramca_open(struct net_device *dev); -static void ultramca_reset_8390(struct net_device *dev); -static void ultramca_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultramca_block_input(struct net_device *dev, int count, - struct sk_buff *skb, - int ring_offset); -static void ultramca_block_output(struct net_device *dev, int count, - const unsigned char *buf, - const int start_page); -static int ultramca_close_card(struct net_device *dev); - -#define START_PG 0x00 /* First page of TX buffer */ - -#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ -#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ -#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ -#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ -#define ULTRA_IO_EXTENT 32 -#define EN0_ERWCNT 0x08 /* Early receive warning count. */ - -#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0 -#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1 -#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2 -#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3 -#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4 -#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5 -#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6 -#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7 - -struct smc_mca_adapters_t { - unsigned int id; - char *name; -}; - -#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */ - -static int ultra_io[MAX_ULTRAMCA_CARDS]; -static int ultra_irq[MAX_ULTRAMCA_CARDS]; -MODULE_LICENSE("GPL"); - -module_param_array(ultra_io, int, NULL, 0); -module_param_array(ultra_irq, int, NULL, 0); -MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)"); -MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)"); - -static const struct { - unsigned int base_addr; -} addr_table[] = { - { 0x0800 }, - { 0x1800 }, - { 0x2800 }, - { 0x3800 }, - { 0x4800 }, - { 0x5800 }, - { 0x6800 }, - { 0x7800 }, - { 0x8800 }, - { 0x9800 }, - { 0xa800 }, - { 0xb800 }, - { 0xc800 }, - { 0xd800 }, - { 0xe800 }, - { 0xf800 } -}; - -#define MEM_MASK 64 - -static const struct { - unsigned char mem_index; - unsigned long mem_start; - unsigned char num_pages; -} mem_table[] = { - { 16, 0x0c0000, 40 }, - { 18, 0x0c4000, 40 }, - { 20, 0x0c8000, 40 }, - { 22, 0x0cc000, 40 }, - { 24, 0x0d0000, 40 }, - { 26, 0x0d4000, 40 }, - { 28, 0x0d8000, 40 }, - { 30, 0x0dc000, 40 }, - {144, 0xfc0000, 40 }, - {148, 0xfc8000, 40 }, - {154, 0xfd0000, 40 }, - {156, 0xfd8000, 40 }, - { 0, 0x0c0000, 20 }, - { 1, 0x0c2000, 20 }, - { 2, 0x0c4000, 20 }, - { 3, 0x0c6000, 20 } -}; - -#define IRQ_MASK 243 -static const struct { - unsigned char new_irq; - unsigned char old_irq; -} irq_table[] = { - { 3, 3 }, - { 4, 4 }, - { 10, 10 }, - { 14, 15 } -}; - -static short smc_mca_adapter_ids[] __initdata = { - 0x61c8, - 0x61c9, - 0x6fc0, - 0x6fc1, - 0x6fc2, - 0xefd4, - 0xefd5, - 0xefe5, - 0x0000 -}; - -static char *smc_mca_adapter_names[] __initdata = { - "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", - "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", - "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", - "WD Starcard PLUS/A (WD8003ST/A)", - "WD Ethercard PLUS 10T/A (WD8003W/A)", - "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)", - "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)", - "IBM PS/2 Adapter/A for Ethernet", - NULL -}; - -static int ultra_found = 0; - - -static const struct net_device_ops ultramca_netdev_ops = { - .ndo_open = ultramca_open, - .ndo_stop = ultramca_close_card, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_rx_mode = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ultramca_probe(struct device *gen_dev) -{ - unsigned short ioaddr; - struct net_device *dev; - unsigned char reg4, num_pages; - struct mca_device *mca_dev = to_mca_device(gen_dev); - char slot = mca_dev->slot; - unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff; - int i, rc; - int adapter = mca_dev->index; - int tbase = 0; - int tirq = 0; - int base_addr = ultra_io[ultra_found]; - int irq = ultra_irq[ultra_found]; - - if (base_addr || irq) { - printk(KERN_INFO "Probing for SMC MCA adapter"); - if (base_addr) { - printk(KERN_INFO " at I/O address 0x%04x%c", - base_addr, irq ? ' ' : '\n'); - } - if (irq) { - printk(KERN_INFO "using irq %d\n", irq); - } - } - - tirq = 0; - tbase = 0; - - /* If we're trying to match a specificied irq or io address, - * we'll reject the adapter found unless it's the one we're - * looking for */ - - pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */ - pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */ - pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */ - pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */ - - /* Test the following conditions: - * - If an irq parameter is supplied, compare it - * with the irq of the adapter we found - * - If a base_addr paramater is given, compare it - * with the base_addr of the adapter we found - * - Check that the irq and the base_addr of the - * adapter we found is not already in use by - * this driver - */ - - switch (mca_dev->index) { - case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: - case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: - case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: - case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: - { - tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr; - tirq = irq_table[(pos5 & 0xc) >> 2].new_irq; - break; - } - case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: - case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: - case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: - case _efe5_IBM_PS2_Adapter_A_for_Ethernet: - { - tbase = ((pos2 & 0x0fe) * 0x10); - tirq = irq_table[(pos5 & 3)].old_irq; - break; - } - } - - if(!tirq || !tbase || - (irq && irq != tirq) || - (base_addr && tbase != base_addr)) - /* FIXME: we're trying to force the ordering of the - * devices here, there should be a way of getting this - * to happen */ - return -ENXIO; - - /* Adapter found. */ - dev = alloc_ei_netdev(); - if(!dev) - return -ENODEV; - - SET_NETDEV_DEV(dev, gen_dev); - mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); - mca_device_set_claim(mca_dev, 1); - - printk(KERN_INFO "smc_mca: %s found in slot %d\n", - smc_mca_adapter_names[adapter], slot + 1); - - ultra_found++; - - dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase); - dev->irq = mca_device_transform_irq(mca_dev, tirq); - dev->mem_start = 0; - num_pages = 40; - - switch (adapter) { /* card-# in const array above [hs] */ - case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: - case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: - { - for (i = 0; i < 16; i++) { /* taking 16 counts - * up to 15 [hs] */ - if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) { - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start); - num_pages = mem_table[i].num_pages; - } - } - break; - } - case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: - case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: - case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: - case _efe5_IBM_PS2_Adapter_A_for_Ethernet: - { - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000)); - num_pages = 0x40; - break; - } - case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: - case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: - { - /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates - * the index of the 0x2000 step. - * beware different number of pages [hs] - */ - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf)))); - num_pages = 0x20 + (2 * (pos3 & 0x10)); - break; - } - } - - /* sanity check, shouldn't happen */ - if (dev->mem_start == 0) { - rc = -ENODEV; - goto err_unclaim; - } - - if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) { - rc = -ENODEV; - goto err_unclaim; - } - - reg4 = inb(ioaddr + 4) & 0x7f; - outb(reg4, ioaddr + 4); - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM", - slot + 1, ioaddr, dev->dev_addr); - - /* Switch from the station address to the alternate register set - * and read the useful registers there. - */ - - outb(0x80 | reg4, ioaddr + 4); - - /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. - */ - - outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); - - /* Switch back to the station address register set so that - * the MS-DOS driver can find the card after a warm boot. - */ - - outb(reg4, ioaddr + 4); - - dev_set_drvdata(gen_dev, dev); - - /* The 8390 isn't at the base address, so fake the offset - */ - - dev->base_addr = ioaddr + ULTRA_NIC_OFFSET; - - ei_status.name = "SMC Ultra MCA"; - ei_status.word16 = 1; - ei_status.tx_start_page = START_PG; - ei_status.rx_start_page = START_PG + TX_PAGES; - ei_status.stop_page = num_pages; - - ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256); - if (!ei_status.mem) { - rc = -ENOMEM; - goto err_release_region; - } - - dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256; - - printk(", IRQ %d memory %#lx-%#lx.\n", - dev->irq, dev->mem_start, dev->mem_end - 1); - - ei_status.reset_8390 = &ultramca_reset_8390; - ei_status.block_input = &ultramca_block_input; - ei_status.block_output = &ultramca_block_output; - ei_status.get_8390_hdr = &ultramca_get_8390_hdr; - - ei_status.priv = slot; - - dev->netdev_ops = &ultramca_netdev_ops; - - NS8390_init(dev, 0); - - rc = register_netdev(dev); - if (rc) - goto err_unmap; - - return 0; - -err_unmap: - iounmap(ei_status.mem); -err_release_region: - release_region(ioaddr, ULTRA_IO_EXTENT); -err_unclaim: - mca_device_set_claim(mca_dev, 0); - free_netdev(dev); - return rc; -} - -static int ultramca_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - int retval; - - if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) - return retval; - - outb(ULTRA_MEMENB, ioaddr); /* Enable memory */ - outb(0x80, ioaddr + 5); /* ??? */ - outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ - outb(0x04, ioaddr + 5); /* ??? */ - - /* Set the early receive warning level in window 0 high enough not - * to receive ERW interrupts. - */ - - /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr); - * outb(0xff, dev->base_addr + EN0_ERWCNT); - */ - - ei_open(dev); - return 0; -} - -static void ultramca_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - - outb(ULTRA_RESET, ioaddr); - if (ei_debug > 1) - printk("resetting Ultra, t=%ld...", jiffies); - ei_status.txing = 0; - - outb(0x80, ioaddr + 5); /* ??? */ - outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ - - if (ei_debug > 1) - printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. - */ - -static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8); - -#ifdef notdef - /* Officially this is what we are doing, but the readl() is faster */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif -} - -/* Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. - */ - -static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256; - - if (ring_offset + count > ei_status.stop_page * 256) { - /* We must wrap the input move. */ - int semi_count = ei_status.stop_page * 256 - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } else { - memcpy_fromio(skb->data, xfer_start, count); - } - -} - -static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8); - - memcpy_toio(shmem, buf, count); -} - -static int ultramca_close_card(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - - netif_stop_queue(dev); - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); - - NS8390_init(dev, 0); - /* We should someday disable shared memory and change to 8-bit mode - * "just in case"... - */ - - return 0; -} - -static int ultramca_remove(struct device *gen_dev) -{ - struct mca_device *mca_dev = to_mca_device(gen_dev); - struct net_device *dev = dev_get_drvdata(gen_dev); - - if (dev) { - /* NB: ultra_close_card() does free_irq */ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; - - unregister_netdev(dev); - mca_device_set_claim(mca_dev, 0); - release_region(ioaddr, ULTRA_IO_EXTENT); - iounmap(ei_status.mem); - free_netdev(dev); - } - return 0; -} - - -static struct mca_driver ultra_driver = { - .id_table = smc_mca_adapter_ids, - .driver = { - .name = "smc-mca", - .bus = &mca_bus_type, - .probe = ultramca_probe, - .remove = ultramca_remove, - } -}; - -static int __init ultramca_init_module(void) -{ - if(!MCA_bus) - return -ENXIO; - - mca_register_driver(&ultra_driver); - - return ultra_found ? 0 : -ENXIO; -} - -static void __exit ultramca_cleanup_module(void) -{ - mca_unregister_driver(&ultra_driver); -} -module_init(ultramca_init_module); -module_exit(ultramca_cleanup_module); - diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index c63a64cb608..a11af5cc484 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -174,6 +174,7 @@ source "drivers/net/ethernet/tile/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" source "drivers/net/ethernet/via/Kconfig" +source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 9676a5109d9..878ad32b93f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -73,5 +73,6 @@ obj-$(CONFIG_TILE_NET) += tile/ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ +obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index d896816512c..d920a529ba2 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -114,15 +114,6 @@ static int rx_copybreak /* = 0 */; #define DMA_BURST_SIZE 128 #endif -/* Used to pass the media type, etc. - Both 'options[]' and 'full_duplex[]' exist for driver interoperability. - The media type is usually passed in 'options[]'. - These variables are deprecated, use ethtool instead. -Ion -*/ -#define MAX_UNITS 8 /* More are supported, limit only on options */ -static int options[MAX_UNITS] = {0, }; -static int full_duplex[MAX_UNITS] = {0, }; - /* Operational parameters that are set at compile time. */ /* The "native" ring sizes are either 256 or 2048. @@ -192,8 +183,6 @@ module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param(intr_latency, int, 0); module_param(small_frames, int, 0); -module_param_array(options, int, NULL, 0); -module_param_array(full_duplex, int, NULL, 0); module_param(enable_hw_cksum, int, 0); MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); MODULE_PARM_DESC(mtu, "MTU (all boards)"); @@ -201,8 +190,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-6)"); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); -MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex"); -MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)"); MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); /* @@ -657,10 +644,10 @@ static const struct net_device_ops netdev_ops = { static int __devinit starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct device *d = &pdev->dev; struct netdev_private *np; - int i, irq, option, chip_idx = ent->driver_data; + int i, irq, chip_idx = ent->driver_data; struct net_device *dev; - static int card_idx = -1; long ioaddr; void __iomem *base; int drv_flags, io_size; @@ -673,15 +660,13 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, printk(version); #endif - card_idx++; - if (pci_enable_device (pdev)) return -EIO; ioaddr = pci_resource_start(pdev, 0); io_size = pci_resource_len(pdev, 0); if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { - printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); + dev_err(d, "no PCI MEM resources, aborting\n"); return -ENODEV; } @@ -694,14 +679,14 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, irq = pdev->irq; if (pci_request_regions (pdev, DRV_NAME)) { - printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); + dev_err(d, "cannot reserve PCI resources, aborting\n"); goto err_out_free_netdev; } base = ioremap(ioaddr, io_size); if (!base) { - printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", - card_idx, io_size, ioaddr); + dev_err(d, "cannot remap %#x @ %#lx, aborting\n", + io_size, ioaddr); goto err_out_free_res; } @@ -753,9 +738,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, /* wait a little longer */ udelay(1000); - dev->base_addr = (unsigned long)base; - dev->irq = irq; - np = netdev_priv(dev); np->dev = dev; np->base = base; @@ -772,21 +754,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, drv_flags = netdrv_tbl[chip_idx].drv_flags; - option = card_idx < MAX_UNITS ? options[card_idx] : 0; - if (dev->mem_start) - option = dev->mem_start; - - /* The lower four bits are the media type. */ - if (option & 0x200) - np->mii_if.full_duplex = 1; - - if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) - np->mii_if.full_duplex = 1; - - if (np->mii_if.full_duplex) - np->mii_if.force_media = 1; - else - np->mii_if.force_media = 0; np->speed100 = 1; /* timer resolution is 128 * 0.8us */ @@ -909,13 +876,14 @@ static int netdev_open(struct net_device *dev) const __be32 *fw_rx_data, *fw_tx_data; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; + const int irq = np->pci_dev->irq; int i, retval; size_t tx_size, rx_size; size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; /* Do we ever need to reset the chip??? */ - retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); + retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (retval) return retval; @@ -924,7 +892,7 @@ static int netdev_open(struct net_device *dev) writel(1, ioaddr + PCIDeviceConfig); if (debug > 1) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", - dev->name, dev->irq); + dev->name, irq); /* Allocate the various queues. */ if (!np->queue_mem) { @@ -935,7 +903,7 @@ static int netdev_open(struct net_device *dev) np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); if (np->queue_mem == NULL) { - free_irq(dev->irq, dev); + free_irq(irq, dev); return -ENOMEM; } @@ -1962,7 +1930,7 @@ static int netdev_close(struct net_device *dev) } } - free_irq(dev->irq, dev); + free_irq(np->pci_dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index ab4daeccdf9..f816426e108 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -548,6 +548,25 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev, return 0; } +static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_SYS_HARDWARE; + info->phc_index = -1; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + return 0; +} + static const struct ethtool_ops bfin_mac_ethtool_ops = { .get_settings = bfin_mac_ethtool_getsettings, .set_settings = bfin_mac_ethtool_setsettings, @@ -555,6 +574,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { .get_drvinfo = bfin_mac_ethtool_getdrvinfo, .get_wol = bfin_mac_ethtool_getwol, .set_wol = bfin_mac_ethtool_setwol, + .get_ts_info = bfin_mac_ethtool_get_ts_info, }; /**************************************************************************/ diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c index 86dd95766a6..c771de71612 100644 --- a/drivers/net/ethernet/amd/depca.c +++ b/drivers/net/ethernet/amd/depca.c @@ -155,23 +155,10 @@ 2 depca's in a PC). ************************************************************************ - Support for MCA EtherWORKS cards added 11-3-98. + Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted) Verified to work with up to 2 DE212 cards in a system (although not fully stress-tested). - Currently known bugs/limitations: - - Note: with the MCA stuff as a module, it trusts the MCA configuration, - not the command line for IRQ and memory address. You can - specify them if you want, but it will throw your values out. - You still have to pass the IO address it was configured as - though. - - ************************************************************************ - TO DO: - ------ - - Revision History ---------------- @@ -261,10 +248,6 @@ #include <asm/io.h> #include <asm/dma.h> -#ifdef CONFIG_MCA -#include <linux/mca.h> -#endif - #ifdef CONFIG_EISA #include <linux/eisa.h> #endif @@ -360,44 +343,6 @@ static struct eisa_driver depca_eisa_driver = { }; #endif -#ifdef CONFIG_MCA -/* -** Adapter ID for the MCA EtherWORKS DE210/212 adapter -*/ -#define DE210_ID 0x628d -#define DE212_ID 0x6def - -static short depca_mca_adapter_ids[] = { - DE210_ID, - DE212_ID, - 0x0000 -}; - -static char *depca_mca_adapter_name[] = { - "DEC EtherWORKS MC Adapter (DE210)", - "DEC EtherWORKS MC Adapter (DE212)", - NULL -}; - -static enum depca_type depca_mca_adapter_type[] = { - de210, - de212, - 0 -}; - -static int depca_mca_probe (struct device *); - -static struct mca_driver depca_mca_driver = { - .id_table = depca_mca_adapter_ids, - .driver = { - .name = depca_string, - .bus = &mca_bus_type, - .probe = depca_mca_probe, - .remove = __devexit_p(depca_device_remove), - }, -}; -#endif - static int depca_isa_probe (struct platform_device *); static int __devexit depca_isa_remove(struct platform_device *pdev) @@ -464,8 +409,7 @@ struct depca_private { char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */ enum depca_type adapter; /* Adapter type */ enum { - DEPCA_BUS_MCA = 1, - DEPCA_BUS_ISA, + DEPCA_BUS_ISA = 1, DEPCA_BUS_EISA, } depca_bus; /* type of bus */ struct depca_init init_block; /* Shadow Initialization block */ @@ -624,12 +568,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) dev_name(device), depca_signature[lp->adapter], ioaddr); switch (lp->depca_bus) { -#ifdef CONFIG_MCA - case DEPCA_BUS_MCA: - printk(" (MCA slot %d)", to_mca_device(device)->slot + 1); - break; -#endif - #ifdef CONFIG_EISA case DEPCA_BUS_EISA: printk(" (EISA slot %d)", to_eisa_device(device)->slot); @@ -661,10 +599,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) if (nicsr & BUF) { nicsr &= ~BS; /* DEPCA RAM in top 32k */ netRAM -= 32; - - /* Only EISA/ISA needs start address to be re-computed */ - if (lp->depca_bus != DEPCA_BUS_MCA) - mem_start += 0x8000; + mem_start += 0x8000; } if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init))) @@ -1079,7 +1014,8 @@ static int depca_rx(struct net_device *dev) } else { lp->pktStats.multicast++; } - } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { + } else if (ether_addr_equal(buf, + dev->dev_addr)) { lp->pktStats.unicast++; } @@ -1324,130 +1260,6 @@ static int __init depca_common_init (u_long ioaddr, struct net_device **devp) return status; } -#ifdef CONFIG_MCA -/* -** Microchannel bus I/O device probe -*/ -static int __init depca_mca_probe(struct device *device) -{ - unsigned char pos[2]; - unsigned char where; - unsigned long iobase, mem_start; - int irq, err; - struct mca_device *mdev = to_mca_device (device); - struct net_device *dev; - struct depca_private *lp; - - /* - ** Search for the adapter. If an address has been given, search - ** specifically for the card at that address. Otherwise find the - ** first card in the system. - */ - - pos[0] = mca_device_read_stored_pos(mdev, 2); - pos[1] = mca_device_read_stored_pos(mdev, 3); - - /* - ** IO of card is handled by bits 1 and 2 of pos0. - ** - ** bit2 bit1 IO - ** 0 0 0x2c00 - ** 0 1 0x2c10 - ** 1 0 0x2c20 - ** 1 1 0x2c30 - */ - where = (pos[0] & 6) >> 1; - iobase = 0x2c00 + (0x10 * where); - - /* - ** Found the adapter we were looking for. Now start setting it up. - ** - ** First work on decoding the IRQ. It's stored in the lower 4 bits - ** of pos1. Bits are as follows (from the ADF file): - ** - ** Bits - ** 3 2 1 0 IRQ - ** -------------------- - ** 0 0 1 0 5 - ** 0 0 0 1 9 - ** 0 1 0 0 10 - ** 1 0 0 0 11 - */ - where = pos[1] & 0x0f; - switch (where) { - case 1: - irq = 9; - break; - case 2: - irq = 5; - break; - case 4: - irq = 10; - break; - case 8: - irq = 11; - break; - default: - printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where); - return -EINVAL; - } - - /* - ** Shared memory address of adapter is stored in bits 3-5 of pos0. - ** They are mapped as follows: - ** - ** Bit - ** 5 4 3 Memory Addresses - ** 0 0 0 C0000-CFFFF (64K) - ** 1 0 0 C8000-CFFFF (32K) - ** 0 0 1 D0000-DFFFF (64K) - ** 1 0 1 D8000-DFFFF (32K) - ** 0 1 0 E0000-EFFFF (64K) - ** 1 1 0 E8000-EFFFF (32K) - */ - where = (pos[0] & 0x18) >> 3; - mem_start = 0xc0000 + (where * 0x10000); - if (pos[0] & 0x20) { - mem_start += 0x8000; - } - - /* claim the slot */ - strncpy(mdev->name, depca_mca_adapter_name[mdev->index], - sizeof(mdev->name)); - mca_device_set_claim(mdev, 1); - - /* - ** Get everything allocated and initialized... (almost just - ** like the ISA and EISA probes) - */ - irq = mca_device_transform_irq(mdev, irq); - iobase = mca_device_transform_ioport(mdev, iobase); - - if ((err = depca_common_init (iobase, &dev))) - goto out_unclaim; - - dev->irq = irq; - dev->base_addr = iobase; - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_MCA; - lp->adapter = depca_mca_adapter_type[mdev->index]; - lp->mem_start = mem_start; - - if ((err = depca_hw_init(dev, device))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (iobase, DEPCA_TOTAL_SIZE); - out_unclaim: - mca_device_set_claim(mdev, 0); - - return err; -} -#endif - /* ** ISA bus I/O device probe */ @@ -2058,15 +1870,10 @@ static int __init depca_module_init (void) { int err = 0; -#ifdef CONFIG_MCA - err = mca_register_driver(&depca_mca_driver); - if (err) - goto err; -#endif #ifdef CONFIG_EISA err = eisa_driver_register(&depca_eisa_driver); if (err) - goto err_mca; + goto err_eisa; #endif err = platform_driver_register(&depca_isa_driver); if (err) @@ -2078,11 +1885,6 @@ static int __init depca_module_init (void) err_eisa: #ifdef CONFIG_EISA eisa_driver_unregister(&depca_eisa_driver); -err_mca: -#endif -#ifdef CONFIG_MCA - mca_unregister_driver(&depca_mca_driver); -err: #endif return err; } @@ -2090,9 +1892,6 @@ err: static void __exit depca_module_exit (void) { int i; -#ifdef CONFIG_MCA - mca_unregister_driver (&depca_mca_driver); -#endif #ifdef CONFIG_EISA eisa_driver_unregister (&depca_eisa_driver); #endif diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index ca70e16b6e2..b2bf324631d 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -74,8 +74,6 @@ #define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) #define MAX_JUMBO_FRAME_SIZE (6*1024) -#define MAX_TSO_FRAME_SIZE (7*1024) -#define MAX_TX_OFFLOAD_THRESH (9*1024) #define AT_MAX_RECEIVE_QUEUE 4 #define AT_DEF_RECEIVE_QUEUE 1 @@ -100,7 +98,7 @@ #define ATL1C_ASPM_L0s_ENABLE 0x0001 #define ATL1C_ASPM_L1_ENABLE 0x0002 -#define AT_REGS_LEN (75 * sizeof(u32)) +#define AT_REGS_LEN (74 * sizeof(u32)) #define AT_EEPROM_LEN 512 #define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) @@ -297,20 +295,6 @@ enum atl1c_dma_req_block { atl1c_dma_req_4096 = 5 }; -enum atl1c_rss_mode { - atl1c_rss_mode_disable = 0, - atl1c_rss_sig_que = 1, - atl1c_rss_mul_que_sig_int = 2, - atl1c_rss_mul_que_mul_int = 4, -}; - -enum atl1c_rss_type { - atl1c_rss_disable = 0, - atl1c_rss_ipv4 = 1, - atl1c_rss_ipv4_tcp = 2, - atl1c_rss_ipv6 = 4, - atl1c_rss_ipv6_tcp = 8 -}; enum atl1c_nic_type { athr_l1c = 0, @@ -388,7 +372,6 @@ struct atl1c_hw { enum atl1c_dma_order dma_order; enum atl1c_dma_rcb rcb_value; enum atl1c_dma_req_block dmar_block; - enum atl1c_dma_req_block dmaw_block; u16 device_id; u16 vendor_id; @@ -399,8 +382,6 @@ struct atl1c_hw { u16 phy_id2; u32 intr_mask; - u8 dmaw_dly_cnt; - u8 dmar_dly_cnt; u8 preamble_len; u16 max_frame_size; @@ -440,10 +421,6 @@ struct atl1c_hw { #define ATL1C_FPGA_VERSION 0x8000 u16 link_cap_flags; #define ATL1C_LINK_CAP_1000M 0x0001 - u16 cmb_tpd; - u16 cmb_rrd; - u16 cmb_rx_timer; /* 2us resolution */ - u16 cmb_tx_timer; u32 smb_timer; u16 rrd_thresh; /* Threshold of number of RRD produced to trigger @@ -451,9 +428,6 @@ struct atl1c_hw { u16 tpd_thresh; u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ u8 rfd_burst; - enum atl1c_rss_type rss_type; - enum atl1c_rss_mode rss_mode; - u8 rss_hash_bits; u32 base_cpu; u32 indirect_tab; u8 mac_addr[ETH_ALEN]; @@ -462,12 +436,12 @@ struct atl1c_hw { bool phy_configured; bool re_autoneg; bool emi_ca; + bool msi_lnkpatch; /* link patch for specific platforms */ }; /* * atl1c_ring_header represents a single, contiguous block of DMA space - * mapped for the three descriptor rings (tpd, rfd, rrd) and the two - * message blocks (cmb, smb) described below + * mapped for the three descriptor rings (tpd, rfd, rrd) described below */ struct atl1c_ring_header { void *desc; /* virtual address */ @@ -541,16 +515,6 @@ struct atl1c_rrd_ring { u16 next_to_clean; }; -struct atl1c_cmb { - void *cmb; - dma_addr_t dma; -}; - -struct atl1c_smb { - void *smb; - dma_addr_t dma; -}; - /* board specific private data structure */ struct atl1c_adapter { struct net_device *netdev; @@ -586,11 +550,8 @@ struct atl1c_adapter { /* All Descriptor memory */ struct atl1c_ring_header ring_header; struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; - struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE]; - struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE]; - struct atl1c_cmb cmb; - struct atl1c_smb smb; - int num_rx_queues; + struct atl1c_rfd_ring rfd_ring; + struct atl1c_rrd_ring rrd_ring; u32 bd_number; /* board number;*/ }; @@ -618,8 +579,14 @@ struct atl1c_adapter { #define AT_WRITE_REGW(a, reg, value) (\ writew((value), ((a)->hw_addr + reg))) -#define AT_READ_REGW(a, reg) (\ - readw((a)->hw_addr + reg)) +#define AT_READ_REGW(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readw((a)->hw_addr + reg); \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } else { \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } \ + } while (0) #define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c index 0a9326aa58b..859ea844ba0 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c @@ -141,8 +141,7 @@ static void atl1c_get_regs(struct net_device *netdev, memset(p, 0, AT_REGS_LEN); - regs->version = 0; - AT_READ_REG(hw, REG_VPD_CAP, p++); + regs->version = 1; AT_READ_REG(hw, REG_PM_CTRL, p++); AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); AT_READ_REG(hw, REG_TWSI_CTRL, p++); @@ -154,7 +153,7 @@ static void atl1c_get_regs(struct net_device *netdev, AT_READ_REG(hw, REG_LINK_CTRL, p++); AT_READ_REG(hw, REG_IDLE_STATUS, p++); AT_READ_REG(hw, REG_MDIO_CTRL, p++); - AT_READ_REG(hw, REG_SERDES_LOCK, p++); + AT_READ_REG(hw, REG_SERDES, p++); AT_READ_REG(hw, REG_MAC_CTRL, p++); AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); @@ -167,9 +166,9 @@ static void atl1c_get_regs(struct net_device *netdev, AT_READ_REG(hw, REG_WOL_CTRL, p++); atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); - regs_buff[73] = (u32) phy_data; + regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data; atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); - regs_buff[74] = (u32) phy_data; + regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data; } static int atl1c_get_eeprom_len(struct net_device *netdev) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index bd1667cbffa..ff9c73859d4 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -43,7 +43,7 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw) return 0; } -void atl1c_hw_set_mac_addr(struct atl1c_hw *hw) +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr) { u32 value; /* @@ -51,35 +51,48 @@ void atl1c_hw_set_mac_addr(struct atl1c_hw *hw) * 0: 6AF600DC 1: 000B * low dword */ - value = (((u32)hw->mac_addr[2]) << 24) | - (((u32)hw->mac_addr[3]) << 16) | - (((u32)hw->mac_addr[4]) << 8) | - (((u32)hw->mac_addr[5])) ; + value = mac_addr[2] << 24 | + mac_addr[3] << 16 | + mac_addr[4] << 8 | + mac_addr[5]; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); /* hight dword */ - value = (((u32)hw->mac_addr[0]) << 8) | - (((u32)hw->mac_addr[1])) ; + value = mac_addr[0] << 8 | + mac_addr[1]; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); } +/* read mac address from hardware register */ +static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr) +{ + u32 addr[2]; + + AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); + AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); + + *(u32 *) ð_addr[2] = htonl(addr[0]); + *(u16 *) ð_addr[0] = htons((u16)addr[1]); + + return is_valid_ether_addr(eth_addr); +} + /* * atl1c_get_permanent_address * return 0 if get valid mac address, */ static int atl1c_get_permanent_address(struct atl1c_hw *hw) { - u32 addr[2]; u32 i; u32 otp_ctrl_data; u32 twsi_ctrl_data; - u32 ltssm_ctrl_data; - u32 wol_data; - u8 eth_addr[ETH_ALEN]; u16 phy_data; bool raise_vol = false; + /* MAC-address from BIOS is the 1st priority */ + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + /* init */ - addr[0] = addr[1] = 0; AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); if (atl1c_check_eeprom_exist(hw)) { if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { @@ -91,33 +104,17 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) msleep(1); } } - - if (hw->nic_type == athr_l2c_b || - hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) - goto out; - phy_data &= 0xFF7F; - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) - goto out; - phy_data |= 0x8; - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + /* raise voltage temporally for l2cb */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data &= ~ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data |= VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); udelay(20); raise_vol = true; } - /* close open bit of ReadOnly*/ - AT_READ_REG(hw, REG_LTSSM_ID_CTRL, <ssm_ctrl_data); - ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO; - AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data); - - /* clear any WOL settings */ - AT_WRITE_REG(hw, REG_WOL_CTRL, 0); - AT_READ_REG(hw, REG_WOL_CTRL, &wol_data); - AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; @@ -138,37 +135,18 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) msleep(1); } if (raise_vol) { - if (hw->nic_type == athr_l2c_b || - hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d || - hw->nic_type == athr_l1d_2) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) - goto out; - phy_data |= 0x80; - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) - goto out; - phy_data &= 0xFFF7; - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); - udelay(20); - } + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data |= ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); } - /* maybe MAC-address is from BIOS */ - AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); - AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); - *(u32 *) ð_addr[2] = swab32(addr[0]); - *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); - - if (is_valid_ether_addr(eth_addr)) { - memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) return 0; - } -out: return -1; } @@ -278,33 +256,158 @@ void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) } /* - * Reads the value from a PHY register - * hw - Struct containing variables accessed by shared code - * reg_addr - address of the PHY register to read + * wait mdio module be idle + * return true: idle + * false: still busy */ -int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw) { u32 val; int i; - val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | - MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | - MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + for (i = 0; i < MDIO_MAX_AC_TO; i++) { + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START))) + break; + udelay(10); + } + + return i != MDIO_MAX_AC_TO; +} + +void atl1c_stop_phy_polling(struct atl1c_hw *hw) +{ + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, 0); + atl1c_wait_mdio_idle(hw); +} + +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) +{ + u32 val; + + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, 1) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + atl1c_wait_mdio_idle(hw); + val |= MDIO_CTRL_AP_EN; + val &= ~MDIO_CTRL_START; AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + udelay(30); +} - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - AT_READ_REG(hw, REG_MDIO_CTRL, &val); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; + +/* + * atl1c_read_phy_core + * core funtion to read register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to read + */ +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + *phy_data = 0; + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT | + MDIO_CTRL_OP_READ; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; } - if (!(val & (MDIO_START | MDIO_BUSY))) { - *phy_data = (u16)val; - return 0; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA); + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * atl1c_write_phy_core + * core funtion to write to register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to write + */ +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START; } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); - return -1; + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data); } /* @@ -315,27 +418,47 @@ int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) */ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) { - int i; - u32 val; + return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data); +} - val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | - (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | - MDIO_SUP_PREAMBLE | MDIO_START | - MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; +/* read from PHY extension register */ +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} - AT_WRITE_REG(hw, REG_MDIO_CTRL, val); +/* write to PHY extension register */ +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} - for (i = 0; i < MDIO_WAIT_TIMES; i++) { - udelay(2); - AT_READ_REG(hw, REG_MDIO_CTRL, &val); - if (!(val & (MDIO_START | MDIO_BUSY))) - break; - } +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + int err; - if (!(val & (MDIO_START | MDIO_BUSY))) - return 0; + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data); - return -1; + return err; +} + +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; } /* @@ -380,119 +503,100 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) void atl1c_phy_disable(struct atl1c_hw *hw) { - AT_WRITE_REGW(hw, REG_GPHY_CTRL, - GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); + atl1c_power_saving(hw, 0); } -static void atl1c_phy_magic_data(struct atl1c_hw *hw) -{ - u16 data; - - data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | - ((1 & ANA_INTERVAL_SEL_TIMER_MASK) << - ANA_INTERVAL_SEL_TIMER_SHIFT); - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18); - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG | - ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | - ANA_SERDES_EN_LCKDT; - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5); - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - data = (44 & ANA_LONG_CABLE_TH_100_MASK) | - ((33 & ANA_SHORT_CABLE_TH_100_MASK) << - ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM | - ANA_BP_SMALL_BW; - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54); - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) << - ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) << - ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) << - ANA_IECHO_ADJ_0_SHIFT); - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4); - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) << - ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE | - ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M; - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0); - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - if (hw->ctrl_flags & ATL1C_HIB_DISABLE) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0) - return; - data &= ~ANA_TOP_PS_EN; - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - - atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11); - if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0) - return; - data &= ~ANA_PS_HIB_EN; - atl1c_write_phy_reg(hw, MII_DBG_DATA, data); - } -} int atl1c_phy_reset(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; u16 phy_data; - u32 phy_ctrl_data = GPHY_CTRL_DEFAULT; - u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN; + u32 phy_ctrl_data, lpi_ctrl; int err; - if (hw->ctrl_flags & ATL1C_HIB_DISABLE) - phy_ctrl_data &= ~GPHY_CTRL_HIB_EN; - + /* reset PHY core */ + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data); + phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ | + GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS); + phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST; + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) + phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + else + phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); AT_WRITE_FLUSH(hw); - msleep(40); - phy_ctrl_data |= GPHY_CTRL_EXT_RESET; - AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + udelay(10); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET); AT_WRITE_FLUSH(hw); - msleep(10); + udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */ + /* switch clock */ if (hw->nic_type == athr_l2c_b) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A); - atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF); + atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD, + phy_data & ~CFGLPSPD_RSTCNT_CLK125SW); } - if (hw->nic_type == athr_l2c_b || - hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d || - hw->nic_type == athr_l1d_2) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); - atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); - atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); - msleep(20); + /* tx-half amplitude issue fix */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data); + phy_data |= CABLE1TH_DET_EN; + atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data); } - if (hw->nic_type == athr_l1d) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); - atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); + + /* clear bit3 of dbgport 3B to lower voltage */ + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + } + /* power saving config */ + phy_data = + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ? + L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data); + /* hib */ + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + } else { + /* disable pws */ + atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, + phy_data & ~LEGCYPS_EN); + /* disable hibernate */ + atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG, + phy_data & HIBNEG_PSHIB_EN); } - if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 - || hw->nic_type == athr_l2c) { - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); - atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); + /* disable AZ(EEE) by default */ + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 || + hw->nic_type == athr_l2c_b2) { + AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl); + AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN); + atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0); + atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3, + L2CB_CLDCTRL3); } - err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); + + /* other debug port to set */ + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF); + /* UNH-IOL test issue, set bit7 */ + atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG, + TST100BTCFG_DEF | TST100BTCFG_LITCH_EN); + + /* set phy interrupt mask */ + phy_data = IER_LINK_UP | IER_LINK_DOWN; + err = atl1c_write_phy_reg(hw, MII_IER, phy_data); if (err) { if (netif_msg_hw(adapter)) dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n"); return err; } - if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) - atl1c_phy_magic_data(hw); return 0; } @@ -589,7 +693,8 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) return 0; } -int atl1c_phy_power_saving(struct atl1c_hw *hw) +/* select one link mode to get lower power consumption */ +int atl1c_phy_to_ps_link(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; struct pci_dev *pdev = adapter->pdev; @@ -660,3 +765,101 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw) return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); } + +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 master_ctrl, mac_ctrl, phy_ctrl; + u32 wol_ctrl, speed; + u16 phy_data; + + wol_ctrl = 0; + speed = adapter->link_speed == SPEED_1000 ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl); + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl); + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl); + + master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed); + mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN); + if (adapter->link_duplex == FULL_DUPLEX) + mac_ctrl |= MAC_CTRL_DUPLX; + phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS); + phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE | + GPHY_CTRL_HIB_EN; + if (!wufc) { /* without WoL */ + master_ctrl |= MASTER_CTRL_CLK_SEL_DIS; + phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + hw->phy_configured = false; /* re-init PHY when resume */ + return 0; + } + phy_ctrl |= GPHY_CTRL_EXT_RESET; + if (wufc & AT_WUFC_MAG) { + mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN; + wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11) + wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN; + } + if (wufc & AT_WUFC_LNKC) { + wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { + dev_dbg(&pdev->dev, "%s: write phy MII_IER faild.\n", + atl1c_driver_name); + } + } + /* clear PHY interrupt */ + atl1c_read_phy_reg(hw, MII_ISR, &phy_data); + + dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n", + atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl); + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl); + + return 0; +} + + +/* configure phy after Link change Event */ +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed) +{ + u16 phy_val; + bool adj_thresh = false; + + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) + adj_thresh = true; + + if (link_speed != SPEED_0) { /* link up */ + /* az with brcm, half-amp */ + if (hw->nic_type == athr_l1d_2) { + atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6, + &phy_val); + phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN); + phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ? + AZ_ANADECT_LONG : AZ_ANADECT_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val); + } + /* threshold adjust */ + if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP); + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + L1D_SYSMODCTRL_IECHOADJ_DEF); + } + } else { /* link down */ + if (adj_thresh && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, + L1D_MSE16DB_DOWN); + } + } +} diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h index 655fc6c4a8a..17d935bdde0 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h @@ -25,12 +25,18 @@ #include <linux/types.h> #include <linux/mii.h> +#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK)) +#define FIELD_SETX(_x, _name, _v) \ +(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT)) + struct atl1c_adapter; struct atl1c_hw; /* function prototype */ void atl1c_phy_disable(struct atl1c_hw *hw); -void atl1c_hw_set_mac_addr(struct atl1c_hw *hw); +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr); int atl1c_phy_reset(struct atl1c_hw *hw); int atl1c_read_mac_addr(struct atl1c_hw *hw); int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); @@ -42,47 +48,45 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); int atl1c_phy_init(struct atl1c_hw *hw); int atl1c_check_eeprom_exist(struct atl1c_hw *hw); int atl1c_restart_autoneg(struct atl1c_hw *hw); -int atl1c_phy_power_saving(struct atl1c_hw *hw); +int atl1c_phy_to_ps_link(struct atl1c_hw *hw); +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc); +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw); +void atl1c_stop_phy_polling(struct atl1c_hw *hw); +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel); +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data); +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data); +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data); +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data); +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed); + +/* hw-ids */ +#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 +#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 +#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ +#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ +#define L2CB_V10 0xc0 +#define L2CB_V11 0xc1 + /* register definition */ #define REG_DEVICE_CAP 0x5C #define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 #define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 -#define REG_DEVICE_CTRL 0x60 -#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 -#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 -#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 -#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 +#define DEVICE_CTRL_MAXRRS_MIN 2 #define REG_LINK_CTRL 0x68 #define LINK_CTRL_L0S_EN 0x01 #define LINK_CTRL_L1_EN 0x02 #define LINK_CTRL_EXT_SYNC 0x80 -#define REG_VPD_CAP 0x6C -#define VPD_CAP_ID_MASK 0xff -#define VPD_CAP_ID_SHIFT 0 -#define VPD_CAP_NEXT_PTR_MASK 0xFF -#define VPD_CAP_NEXT_PTR_SHIFT 8 -#define VPD_CAP_VPD_ADDR_MASK 0x7FFF -#define VPD_CAP_VPD_ADDR_SHIFT 16 -#define VPD_CAP_VPD_FLAG 0x80000000 - -#define REG_VPD_DATA 0x70 - -#define REG_PCIE_UC_SEVERITY 0x10C -#define PCIE_UC_SERVRITY_TRN 0x00000001 -#define PCIE_UC_SERVRITY_DLP 0x00000010 -#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000 -#define PCIE_UC_SERVRITY_FCP 0x00002000 -#define PCIE_UC_SERVRITY_CPL_TO 0x00004000 -#define PCIE_UC_SERVRITY_CA 0x00008000 -#define PCIE_UC_SERVRITY_UC 0x00010000 -#define PCIE_UC_SERVRITY_ROV 0x00020000 -#define PCIE_UC_SERVRITY_MLFP 0x00040000 -#define PCIE_UC_SERVRITY_ECRC 0x00080000 -#define PCIE_UC_SERVRITY_UR 0x00100000 - #define REG_DEV_SERIALNUM_CTRL 0x200 #define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ #define REG_DEV_MAC_SEL_SHIFT 0 @@ -90,25 +94,17 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define REG_DEV_SERIAL_NUM_EN_SHIFT 1 #define REG_TWSI_CTRL 0x218 +#define TWSI_CTLR_FREQ_MASK 0x3UL +#define TWSI_CTRL_FREQ_SHIFT 24 +#define TWSI_CTRL_FREQ_100K 0 +#define TWSI_CTRL_FREQ_200K 1 +#define TWSI_CTRL_FREQ_300K 2 +#define TWSI_CTRL_FREQ_400K 3 +#define TWSI_CTRL_LD_EXIST BIT(23) +#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */ +#define TWSI_CTRL_SW_LDSTART BIT(11) #define TWSI_CTRL_LD_OFFSET_MASK 0xFF #define TWSI_CTRL_LD_OFFSET_SHIFT 0 -#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 -#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 -#define TWSI_CTRL_SW_LDSTART 0x800 -#define TWSI_CTRL_HW_LDSTART 0x1000 -#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F -#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 -#define TWSI_CTRL_LD_EXIST 0x400000 -#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 -#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 -#define TWSI_CTRL_FREQ_SEL_100K 0 -#define TWSI_CTRL_FREQ_SEL_200K 1 -#define TWSI_CTRL_FREQ_SEL_300K 2 -#define TWSI_CTRL_FREQ_SEL_400K 3 -#define TWSI_CTRL_SMB_SLV_ADDR -#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 -#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 - #define REG_PCIE_DEV_MISC_CTRL 0x21C #define PCIE_DEV_MISC_EXT_PIPE 0x2 @@ -118,16 +114,23 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 #define REG_PCIE_PHYMISC 0x1000 -#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 +#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2) +#define PCIE_PHYMISC_NFTS_MASK 0xFFUL +#define PCIE_PHYMISC_NFTS_SHIFT 16 #define REG_PCIE_PHYMISC2 0x1004 -#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 -#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 -#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 -#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 +#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL +#define PCIE_PHYMISC2_L0S_TH_SHIFT 18 +#define L2CB1_PCIE_PHYMISC2_L0S_TH 3 +#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL +#define PCIE_PHYMISC2_CDR_BW_SHIFT 16 +#define L2CB1_PCIE_PHYMISC2_CDR_BW 3 #define REG_TWSI_DEBUG 0x1108 -#define TWSI_DEBUG_DEV_EXIST 0x20000000 +#define TWSI_DEBUG_DEV_EXIST BIT(29) + +#define REG_DMA_DBG 0x1114 +#define DMA_DBG_VENDOR_MSG BIT(0) #define REG_EEPROM_CTRL 0x12C0 #define EEPROM_CTRL_DATA_HI_MASK 0xFFFF @@ -140,56 +143,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define REG_EEPROM_DATA_LO 0x12C4 #define REG_OTP_CTRL 0x12F0 -#define OTP_CTRL_CLK_EN 0x0002 +#define OTP_CTRL_CLK_EN BIT(1) #define REG_PM_CTRL 0x12F8 -#define PM_CTRL_SDES_EN 0x00000001 -#define PM_CTRL_RBER_EN 0x00000002 -#define PM_CTRL_CLK_REQ_EN 0x00000004 -#define PM_CTRL_ASPM_L1_EN 0x00000008 -#define PM_CTRL_SERDES_L1_EN 0x00000010 -#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020 -#define PM_CTRL_SERDES_PD_EX_L1 0x00000040 -#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080 -#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF -#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 -#define PM_CTRL_ASPM_L0S_EN 0x00001000 -#define PM_CTRL_CLK_SWH_L1 0x00002000 -#define PM_CTRL_CLK_PWM_VER1_1 0x00004000 -#define PM_CTRL_RCVR_WT_TIMER 0x00008000 -#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF -#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 -#define PM_CTRL_PM_REQ_TIMER_MASK 0xF -#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 -#define PM_CTRL_LCKDET_TIMER_MASK 0xF +#define PM_CTRL_HOTRST BIT(31) +#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on + * thrghput(setting in 15A0) */ +#define PM_CTRL_SA_DLY_EN BIT(29) +#define PM_CTRL_L0S_BUFSRX_EN BIT(28) +#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL #define PM_CTRL_LCKDET_TIMER_SHIFT 24 -#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 -#define PM_CTRL_SA_DLY_EN 0x20000000 -#define PM_CTRL_MAC_ASPM_CHK 0x40000000 -#define PM_CTRL_HOTRST 0x80000000 +#define PM_CTRL_LCKDET_TIMER_DEF 0xC +#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL +#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @ + * ->L0s not L1 */ +#define PM_CTRL_PM_REQ_TO_DEF 0xF +#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */ +#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */ +#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16 +#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0 +#define L1D_PMCTRL_L1_ENTRY_TM_2US 1 +#define L1D_PMCTRL_L1_ENTRY_TM_4US 2 +#define L1D_PMCTRL_L1_ENTRY_TM_8US 3 +#define L1D_PMCTRL_L1_ENTRY_TM_16US 4 +#define L1D_PMCTRL_L1_ENTRY_TM_24US 5 +#define L1D_PMCTRL_L1_ENTRY_TM_32US 6 +#define L1D_PMCTRL_L1_ENTRY_TM_63US 7 +#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */ +#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 +#define L2CB1_PM_CTRL_L1_ENTRY_TM 7 +#define L1C_PM_CTRL_L1_ENTRY_TM 0xF +#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */ +#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */ +#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */ +#define PM_CTRL_ASPM_L0S_EN BIT(12) +#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */ +#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/ +#define L1D_PMCTRL_L0S_TIMER_SHIFT 8 +#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */ +#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 +#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7) +#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */ +#define PM_CTRL_SERDES_PLL_L1_EN BIT(5) +#define PM_CTRL_SERDES_L1_EN BIT(4) +#define PM_CTRL_ASPM_L1_EN BIT(3) +#define PM_CTRL_CLK_REQ_EN BIT(2) +#define PM_CTRL_RBER_EN BIT(1) +#define PM_CTRL_SPRSDWER_EN BIT(0) #define REG_LTSSM_ID_CTRL 0x12FC #define LTSSM_ID_EN_WRO 0x1000 + + /* Selene Master Control Register */ #define REG_MASTER_CTRL 0x1400 -#define MASTER_CTRL_SOFT_RST 0x1 -#define MASTER_CTRL_TEST_MODE_MASK 0x3 -#define MASTER_CTRL_TEST_MODE_SHIFT 2 -#define MASTER_CTRL_BERT_START 0x10 -#define MASTER_CTRL_OOB_DIS_OFF 0x40 -#define MASTER_CTRL_SA_TIMER_EN 0x80 -#define MASTER_CTRL_MTIMER_EN 0x100 -#define MASTER_CTRL_MANUAL_INT 0x200 -#define MASTER_CTRL_TX_ITIMER_EN 0x400 -#define MASTER_CTRL_RX_ITIMER_EN 0x800 -#define MASTER_CTRL_CLK_SEL_DIS 0x1000 -#define MASTER_CTRL_CLK_SWH_MODE 0x2000 -#define MASTER_CTRL_INT_RDCLR 0x4000 -#define MASTER_CTRL_REV_NUM_SHIFT 16 -#define MASTER_CTRL_REV_NUM_MASK 0xff -#define MASTER_CTRL_DEV_ID_SHIFT 24 -#define MASTER_CTRL_DEV_ID_MASK 0x7f -#define MASTER_CTRL_OTP_SEL 0x80000000 +#define MASTER_CTRL_OTP_SEL BIT(31) +#define MASTER_DEV_NUM_MASK 0x7FUL +#define MASTER_DEV_NUM_SHIFT 24 +#define MASTER_REV_NUM_MASK 0xFFUL +#define MASTER_REV_NUM_SHIFT 16 +#define MASTER_CTRL_INT_RDCLR BIT(14) +#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from + * serdes, not sw to 25M */ +#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */ +#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */ +#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */ +#define MASTER_CTRL_MANUTIMER_EN BIT(8) +#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */ +#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */ +#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */ +#define MASTER_CTRL_BERT_START BIT(4) +#define MASTER_PCIE_TSTMOD_MASK 3UL +#define MASTER_PCIE_TSTMOD_SHIFT 2 +#define MASTER_PCIE_RST BIT(1) +#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */ +#define DMA_MAC_RST_TO 50 /* Timer Initial Value Register */ #define REG_MANUAL_TIMER_INIT 0x1404 @@ -201,87 +229,85 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define IRQ_MODRT_RX_TIMER_SHIFT 16 #define REG_GPHY_CTRL 0x140C -#define GPHY_CTRL_EXT_RESET 0x1 -#define GPHY_CTRL_RTL_MODE 0x2 -#define GPHY_CTRL_LED_MODE 0x4 -#define GPHY_CTRL_ANEG_NOW 0x8 -#define GPHY_CTRL_REV_ANEG 0x10 -#define GPHY_CTRL_GATE_25M_EN 0x20 -#define GPHY_CTRL_LPW_EXIT 0x40 -#define GPHY_CTRL_PHY_IDDQ 0x80 -#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 -#define GPHY_CTRL_GIGA_DIS 0x200 -#define GPHY_CTRL_HIB_EN 0x400 -#define GPHY_CTRL_HIB_PULSE 0x800 -#define GPHY_CTRL_SEL_ANA_RST 0x1000 -#define GPHY_CTRL_PHY_PLL_ON 0x2000 -#define GPHY_CTRL_PWDOWN_HW 0x4000 -#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000 - -#define GPHY_CTRL_DEFAULT ( \ - GPHY_CTRL_SEL_ANA_RST |\ - GPHY_CTRL_HIB_PULSE |\ - GPHY_CTRL_HIB_EN) - -#define GPHY_CTRL_PW_WOL_DIS ( \ - GPHY_CTRL_SEL_ANA_RST |\ - GPHY_CTRL_HIB_PULSE |\ - GPHY_CTRL_HIB_EN |\ - GPHY_CTRL_PWDOWN_HW |\ - GPHY_CTRL_PHY_IDDQ) - -#define GPHY_CTRL_POWER_SAVING ( \ - GPHY_CTRL_SEL_ANA_RST |\ - GPHY_CTRL_HIB_EN |\ - GPHY_CTRL_HIB_PULSE |\ - GPHY_CTRL_PWDOWN_HW |\ - GPHY_CTRL_PHY_IDDQ) +#define GPHY_CTRL_ADDR_MASK 0x1FUL +#define GPHY_CTRL_ADDR_SHIFT 19 +#define GPHY_CTRL_BP_VLTGSW BIT(18) +#define GPHY_CTRL_100AB_EN BIT(17) +#define GPHY_CTRL_10AB_EN BIT(16) +#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15) +#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */ +#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */ +#define GPHY_CTRL_SEL_ANA_RST BIT(12) +#define GPHY_CTRL_HIB_PULSE BIT(11) +#define GPHY_CTRL_HIB_EN BIT(10) +#define GPHY_CTRL_GIGA_DIS BIT(9) +#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */ +#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */ +#define GPHY_CTRL_LPW_EXIT BIT(6) +#define GPHY_CTRL_GATE_25M_EN BIT(5) +#define GPHY_CTRL_REV_ANEG BIT(4) +#define GPHY_CTRL_ANEG_NOW BIT(3) +#define GPHY_CTRL_LED_MODE BIT(2) +#define GPHY_CTRL_RTL_MODE BIT(1) +#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */ +#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */ +#define GPHY_CTRL_CLS (\ + GPHY_CTRL_LED_MODE |\ + GPHY_CTRL_100AB_EN |\ + GPHY_CTRL_PHY_PLL_ON) + /* Block IDLE Status Register */ -#define REG_IDLE_STATUS 0x1410 -#define IDLE_STATUS_MASK 0x00FF -#define IDLE_STATUS_RXMAC_NO_IDLE 0x1 -#define IDLE_STATUS_TXMAC_NO_IDLE 0x2 -#define IDLE_STATUS_RXQ_NO_IDLE 0x4 -#define IDLE_STATUS_TXQ_NO_IDLE 0x8 -#define IDLE_STATUS_DMAR_NO_IDLE 0x10 -#define IDLE_STATUS_DMAW_NO_IDLE 0x20 -#define IDLE_STATUS_SMB_NO_IDLE 0x40 -#define IDLE_STATUS_CMB_NO_IDLE 0x80 +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_SFORCE_MASK 0xFUL +#define IDLE_STATUS_SFORCE_SHIFT 14 +#define IDLE_STATUS_CALIB_DONE BIT(13) +#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL +#define IDLE_STATUS_CALIB_RES_SHIFT 8 +#define IDLE_STATUS_CALIBERR_MASK 0xFUL +#define IDLE_STATUS_CALIBERR_SHIFT 4 +#define IDLE_STATUS_TXQ_BUSY BIT(3) +#define IDLE_STATUS_RXQ_BUSY BIT(2) +#define IDLE_STATUS_TXMAC_BUSY BIT(1) +#define IDLE_STATUS_RXMAC_BUSY BIT(0) +#define IDLE_STATUS_MASK (\ + IDLE_STATUS_TXQ_BUSY |\ + IDLE_STATUS_RXQ_BUSY |\ + IDLE_STATUS_TXMAC_BUSY |\ + IDLE_STATUS_RXMAC_BUSY) /* MDIO Control Register */ #define REG_MDIO_CTRL 0x1414 -#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit - * control data to write to PHY - * MII management register */ -#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit - * status data that was read - * from the PHY MII management register */ -#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ -#define MDIO_REG_ADDR_SHIFT 16 -#define MDIO_RW 0x200000 /* 1: read, 0: write */ -#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ -#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO - * master. And this bit is self - * cleared after one cycle */ -#define MDIO_CLK_SEL_SHIFT 24 -#define MDIO_CLK_25_4 0 -#define MDIO_CLK_25_6 2 -#define MDIO_CLK_25_8 3 -#define MDIO_CLK_25_10 4 -#define MDIO_CLK_25_14 5 -#define MDIO_CLK_25_20 6 -#define MDIO_CLK_25_28 7 -#define MDIO_BUSY 0x8000000 -#define MDIO_AP_EN 0x10000000 -#define MDIO_WAIT_TIMES 10 - -/* MII PHY Status Register */ -#define REG_PHY_STATUS 0x1418 -#define PHY_GENERAL_STATUS_MASK 0xFFFF -#define PHY_STATUS_RECV_ENABLE 0x0001 -#define PHY_OE_PWSP_STATUS_MASK 0x07FF -#define PHY_OE_PWSP_STATUS_SHIFT 16 -#define PHY_STATUS_LPW_STATE 0x80000000 +#define MDIO_CTRL_MODE_EXT BIT(30) +#define MDIO_CTRL_POST_READ BIT(29) +#define MDIO_CTRL_AP_EN BIT(28) +#define MDIO_CTRL_BUSY BIT(27) +#define MDIO_CTRL_CLK_SEL_MASK 0x7UL +#define MDIO_CTRL_CLK_SEL_SHIFT 24 +#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */ +#define MDIO_CTRL_CLK_25_6 2 +#define MDIO_CTRL_CLK_25_8 3 +#define MDIO_CTRL_CLK_25_10 4 +#define MDIO_CTRL_CLK_25_32 5 +#define MDIO_CTRL_CLK_25_64 6 +#define MDIO_CTRL_CLK_25_128 7 +#define MDIO_CTRL_START BIT(23) +#define MDIO_CTRL_SPRES_PRMBL BIT(22) +#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */ +#define MDIO_CTRL_REG_MASK 0x1FUL +#define MDIO_CTRL_REG_SHIFT 16 +#define MDIO_CTRL_DATA_MASK 0xFFFFUL +#define MDIO_CTRL_DATA_SHIFT 0 +#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */ + +/* for extension reg access */ +#define REG_MDIO_EXTN 0x1448 +#define MDIO_EXTN_PORTAD_MASK 0x1FUL +#define MDIO_EXTN_PORTAD_SHIFT 21 +#define MDIO_EXTN_DEVAD_MASK 0x1FUL +#define MDIO_EXTN_DEVAD_SHIFT 16 +#define MDIO_EXTN_REG_MASK 0xFFFFUL +#define MDIO_EXTN_REG_SHIFT 0 + /* BIST Control and Status Register0 (for the Packet Memory) */ #define REG_BIST0_CTRL 0x141c #define BIST0_NOW 0x1 @@ -299,50 +325,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define BIST1_FUSE_FLAG 0x4 /* SerDes Lock Detect Control and Status Register */ -#define REG_SERDES_LOCK 0x1424 -#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal - * comes from Analog SerDes */ -#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ -#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE -#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3 -#define SERDES_OVCLK_18_25 0x0 -#define SERDES_OVCLK_12_18 0x1 -#define SERDES_OVCLK_0_4 0x2 -#define SERDES_OVCLK_4_12 0x3 -#define SERDES_MAC_CLK_SLOWDOWN 0x20000 -#define SERDES_PYH_CLK_SLOWDOWN 0x40000 +#define REG_SERDES 0x1424 +#define SERDES_PHY_CLK_SLOWDOWN BIT(18) +#define SERDES_MAC_CLK_SLOWDOWN BIT(17) +#define SERDES_SELFB_PLL_MASK 0x3UL +#define SERDES_SELFB_PLL_SHIFT 14 +#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */ +#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */ +#define SERDES_BUFS_RX_EN BIT(11) +#define SERDES_PD_RX BIT(10) +#define SERDES_PLL_EN BIT(9) +#define SERDES_EN BIT(8) +#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */ +#define SERDES_SELFB_PLL_CSR_MASK 0x3UL +#define SERDES_SELFB_PLL_CSR_SHIFT 4 +#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */ +#define SERDES_VCO_SLOW BIT(3) +#define SERDES_VCO_FAST BIT(2) +#define SERDES_LOCK_DETECT_EN BIT(1) +#define SERDES_LOCK_DETECT BIT(0) + +#define REG_LPI_DECISN_TIMER 0x143C +#define L2CB_LPI_DESISN_TIMER 0x7D00 + +#define REG_LPI_CTRL 0x1440 +#define LPI_CTRL_CHK_DA BIT(31) +#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL +#define LPI_CTRL_ENH_TO_SHIFT 12 +#define LPI_CTRL_ENH_TH_MASK 0x1FUL +#define LPI_CTRL_ENH_TH_SHIFT 6 +#define LPI_CTRL_ENH_EN BIT(5) +#define LPI_CTRL_CHK_RX BIT(4) +#define LPI_CTRL_CHK_STATE BIT(3) +#define LPI_CTRL_GMII BIT(2) +#define LPI_CTRL_TO_PHY BIT(1) +#define LPI_CTRL_EN BIT(0) + +#define REG_LPI_WAIT 0x1444 +#define LPI_WAIT_TIMER_MASK 0xFFFFUL +#define LPI_WAIT_TIMER_SHIFT 0 /* MAC Control Register */ #define REG_MAC_CTRL 0x1480 -#define MAC_CTRL_TX_EN 0x1 -#define MAC_CTRL_RX_EN 0x2 -#define MAC_CTRL_TX_FLOW 0x4 -#define MAC_CTRL_RX_FLOW 0x8 -#define MAC_CTRL_LOOPBACK 0x10 -#define MAC_CTRL_DUPLX 0x20 -#define MAC_CTRL_ADD_CRC 0x40 -#define MAC_CTRL_PAD 0x80 -#define MAC_CTRL_LENCHK 0x100 -#define MAC_CTRL_HUGE_EN 0x200 -#define MAC_CTRL_PRMLEN_SHIFT 10 -#define MAC_CTRL_PRMLEN_MASK 0xf -#define MAC_CTRL_RMV_VLAN 0x4000 -#define MAC_CTRL_PROMIS_EN 0x8000 -#define MAC_CTRL_TX_PAUSE 0x10000 -#define MAC_CTRL_SCNT 0x20000 -#define MAC_CTRL_SRST_TX 0x40000 -#define MAC_CTRL_TX_SIMURST 0x80000 -#define MAC_CTRL_SPEED_SHIFT 20 -#define MAC_CTRL_SPEED_MASK 0x3 -#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 -#define MAC_CTRL_TX_HUGE 0x800000 -#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 -#define MAC_CTRL_MC_ALL_EN 0x2000000 -#define MAC_CTRL_BC_EN 0x4000000 -#define MAC_CTRL_DBG 0x8000000 -#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000 -#define MAC_CTRL_HASH_ALG_CRC32 0x20000000 -#define MAC_CTRL_SPEED_MODE_SW 0x40000000 +#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */ +#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */ +#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28) +#define MAC_CTRL_DBG BIT(27) +#define MAC_CTRL_BC_EN BIT(26) +#define MAC_CTRL_MC_ALL_EN BIT(25) +#define MAC_CTRL_RX_CHKSUM_EN BIT(24) +#define MAC_CTRL_TX_HUGE BIT(23) +#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22) +#define MAC_CTRL_SPEED_MASK 3UL +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_TX_SIMURST BIT(19) +#define MAC_CTRL_SCNT BIT(17) +#define MAC_CTRL_TX_PAUSE BIT(16) +#define MAC_CTRL_PROMIS_EN BIT(15) +#define MAC_CTRL_RMV_VLAN BIT(14) +#define MAC_CTRL_PRMLEN_MASK 0xFUL +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_HUGE_EN BIT(9) +#define MAC_CTRL_LENCHK BIT(8) +#define MAC_CTRL_PAD BIT(7) +#define MAC_CTRL_ADD_CRC BIT(6) +#define MAC_CTRL_DUPLX BIT(5) +#define MAC_CTRL_LOOPBACK BIT(4) +#define MAC_CTRL_RX_FLOW BIT(3) +#define MAC_CTRL_TX_FLOW BIT(2) +#define MAC_CTRL_RX_EN BIT(1) +#define MAC_CTRL_TX_EN BIT(0) /* MAC IPG/IFG Control Register */ #define REG_MAC_IPG_IFG 0x1484 @@ -386,34 +443,53 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); /* Wake-On-Lan control register */ #define REG_WOL_CTRL 0x14a0 -#define WOL_PATTERN_EN 0x00000001 -#define WOL_PATTERN_PME_EN 0x00000002 -#define WOL_MAGIC_EN 0x00000004 -#define WOL_MAGIC_PME_EN 0x00000008 -#define WOL_LINK_CHG_EN 0x00000010 -#define WOL_LINK_CHG_PME_EN 0x00000020 -#define WOL_PATTERN_ST 0x00000100 -#define WOL_MAGIC_ST 0x00000200 -#define WOL_LINKCHG_ST 0x00000400 -#define WOL_CLK_SWITCH_EN 0x00008000 -#define WOL_PT0_EN 0x00010000 -#define WOL_PT1_EN 0x00020000 -#define WOL_PT2_EN 0x00040000 -#define WOL_PT3_EN 0x00080000 -#define WOL_PT4_EN 0x00100000 -#define WOL_PT5_EN 0x00200000 -#define WOL_PT6_EN 0x00400000 +#define WOL_PT7_MATCH BIT(31) +#define WOL_PT6_MATCH BIT(30) +#define WOL_PT5_MATCH BIT(29) +#define WOL_PT4_MATCH BIT(28) +#define WOL_PT3_MATCH BIT(27) +#define WOL_PT2_MATCH BIT(26) +#define WOL_PT1_MATCH BIT(25) +#define WOL_PT0_MATCH BIT(24) +#define WOL_PT7_EN BIT(23) +#define WOL_PT6_EN BIT(22) +#define WOL_PT5_EN BIT(21) +#define WOL_PT4_EN BIT(20) +#define WOL_PT3_EN BIT(19) +#define WOL_PT2_EN BIT(18) +#define WOL_PT1_EN BIT(17) +#define WOL_PT0_EN BIT(16) +#define WOL_LNKCHG_ST BIT(10) +#define WOL_MAGIC_ST BIT(9) +#define WOL_PATTERN_ST BIT(8) +#define WOL_OOB_EN BIT(6) +#define WOL_LINK_CHG_PME_EN BIT(5) +#define WOL_LINK_CHG_EN BIT(4) +#define WOL_MAGIC_PME_EN BIT(3) +#define WOL_MAGIC_EN BIT(2) +#define WOL_PATTERN_PME_EN BIT(1) +#define WOL_PATTERN_EN BIT(0) /* WOL Length ( 2 DWORD ) */ -#define REG_WOL_PATTERN_LEN 0x14a4 -#define WOL_PT_LEN_MASK 0x7f -#define WOL_PT0_LEN_SHIFT 0 -#define WOL_PT1_LEN_SHIFT 8 -#define WOL_PT2_LEN_SHIFT 16 -#define WOL_PT3_LEN_SHIFT 24 -#define WOL_PT4_LEN_SHIFT 0 -#define WOL_PT5_LEN_SHIFT 8 -#define WOL_PT6_LEN_SHIFT 16 +#define REG_WOL_PTLEN1 0x14A4 +#define WOL_PTLEN1_3_MASK 0xFFUL +#define WOL_PTLEN1_3_SHIFT 24 +#define WOL_PTLEN1_2_MASK 0xFFUL +#define WOL_PTLEN1_2_SHIFT 16 +#define WOL_PTLEN1_1_MASK 0xFFUL +#define WOL_PTLEN1_1_SHIFT 8 +#define WOL_PTLEN1_0_MASK 0xFFUL +#define WOL_PTLEN1_0_SHIFT 0 + +#define REG_WOL_PTLEN2 0x14A8 +#define WOL_PTLEN2_7_MASK 0xFFUL +#define WOL_PTLEN2_7_SHIFT 24 +#define WOL_PTLEN2_6_MASK 0xFFUL +#define WOL_PTLEN2_6_SHIFT 16 +#define WOL_PTLEN2_5_MASK 0xFFUL +#define WOL_PTLEN2_5_SHIFT 8 +#define WOL_PTLEN2_4_MASK 0xFFUL +#define WOL_PTLEN2_4_SHIFT 0 /* Internal SRAM Partition Register */ #define RFDX_HEAD_ADDR_MASK 0x03FF @@ -458,66 +534,50 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); */ #define REG_RX_BASE_ADDR_HI 0x1540 #define REG_TX_BASE_ADDR_HI 0x1544 -#define REG_SMB_BASE_ADDR_HI 0x1548 -#define REG_SMB_BASE_ADDR_LO 0x154C #define REG_RFD0_HEAD_ADDR_LO 0x1550 -#define REG_RFD1_HEAD_ADDR_LO 0x1554 -#define REG_RFD2_HEAD_ADDR_LO 0x1558 -#define REG_RFD3_HEAD_ADDR_LO 0x155C #define REG_RFD_RING_SIZE 0x1560 #define RFD_RING_SIZE_MASK 0x0FFF #define REG_RX_BUF_SIZE 0x1564 #define RX_BUF_SIZE_MASK 0xFFFF #define REG_RRD0_HEAD_ADDR_LO 0x1568 -#define REG_RRD1_HEAD_ADDR_LO 0x156C -#define REG_RRD2_HEAD_ADDR_LO 0x1570 -#define REG_RRD3_HEAD_ADDR_LO 0x1574 #define REG_RRD_RING_SIZE 0x1578 #define RRD_RING_SIZE_MASK 0x0FFF -#define REG_HTPD_HEAD_ADDR_LO 0x157C -#define REG_NTPD_HEAD_ADDR_LO 0x1580 +#define REG_TPD_PRI1_ADDR_LO 0x157C +#define REG_TPD_PRI0_ADDR_LO 0x1580 #define REG_TPD_RING_SIZE 0x1584 #define TPD_RING_SIZE_MASK 0xFFFF -#define REG_CMB_BASE_ADDR_LO 0x1588 - -/* RSS about */ -#define REG_RSS_KEY0 0x14B0 -#define REG_RSS_KEY1 0x14B4 -#define REG_RSS_KEY2 0x14B8 -#define REG_RSS_KEY3 0x14BC -#define REG_RSS_KEY4 0x14C0 -#define REG_RSS_KEY5 0x14C4 -#define REG_RSS_KEY6 0x14C8 -#define REG_RSS_KEY7 0x14CC -#define REG_RSS_KEY8 0x14D0 -#define REG_RSS_KEY9 0x14D4 -#define REG_IDT_TABLE0 0x14E0 -#define REG_IDT_TABLE1 0x14E4 -#define REG_IDT_TABLE2 0x14E8 -#define REG_IDT_TABLE3 0x14EC -#define REG_IDT_TABLE4 0x14F0 -#define REG_IDT_TABLE5 0x14F4 -#define REG_IDT_TABLE6 0x14F8 -#define REG_IDT_TABLE7 0x14FC -#define REG_IDT_TABLE REG_IDT_TABLE0 -#define REG_RSS_HASH_VALUE 0x15B0 -#define REG_RSS_HASH_FLAG 0x15B4 -#define REG_BASE_CPU_NUMBER 0x15B8 /* TXQ Control Register */ -#define REG_TXQ_CTRL 0x1590 -#define TXQ_NUM_TPD_BURST_MASK 0xF -#define TXQ_NUM_TPD_BURST_SHIFT 0 -#define TXQ_CTRL_IP_OPTION_EN 0x10 -#define TXQ_CTRL_EN 0x20 -#define TXQ_CTRL_ENH_MODE 0x40 -#define TXQ_CTRL_LS_8023_EN 0x80 -#define TXQ_TXF_BURST_NUM_SHIFT 16 -#define TXQ_TXF_BURST_NUM_MASK 0xFFFF +#define REG_TXQ_CTRL 0x1590 +#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL +#define TXQ_TXF_BURST_NUM_SHIFT 16 +#define L1C_TXQ_TXF_BURST_PREF 0x200 +#define L2CB_TXQ_TXF_BURST_PREF 0x40 +#define TXQ_CTRL_PEDING_CLR BIT(8) +#define TXQ_CTRL_LS_8023_EN BIT(7) +#define TXQ_CTRL_ENH_MODE BIT(6) +#define TXQ_CTRL_EN BIT(5) +#define TXQ_CTRL_IP_OPTION_EN BIT(4) +#define TXQ_NUM_TPD_BURST_MASK 0xFUL +#define TXQ_NUM_TPD_BURST_SHIFT 0 +#define TXQ_NUM_TPD_BURST_DEF 5 +#define TXQ_CFGV (\ + FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\ + TXQ_CTRL_ENH_MODE |\ + TXQ_CTRL_LS_8023_EN |\ + TXQ_CTRL_IP_OPTION_EN) +#define L1C_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF)) +#define L2CB_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF)) + /* Jumbo packet Threshold for task offload */ #define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ #define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF +#define MAX_TSO_FRAME_SIZE (7*1024) #define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ #define TXF_WATER_MARK_MASK 0x0FFF @@ -537,26 +597,21 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define ASPM_THRUPUT_LIMIT_NO 0x00 #define ASPM_THRUPUT_LIMIT_1M 0x01 #define ASPM_THRUPUT_LIMIT_10M 0x02 -#define ASPM_THRUPUT_LIMIT_100M 0x04 -#define RXQ1_CTRL_EN 0x10 -#define RXQ2_CTRL_EN 0x20 -#define RXQ3_CTRL_EN 0x40 -#define IPV6_CHKSUM_CTRL_EN 0x80 -#define RSS_HASH_BITS_MASK 0x00FF -#define RSS_HASH_BITS_SHIFT 8 -#define RSS_HASH_IPV4 0x10000 -#define RSS_HASH_IPV4_TCP 0x20000 -#define RSS_HASH_IPV6 0x40000 -#define RSS_HASH_IPV6_TCP 0x80000 +#define ASPM_THRUPUT_LIMIT_100M 0x03 +#define IPV6_CHKSUM_CTRL_EN BIT(7) #define RXQ_RFD_BURST_NUM_MASK 0x003F #define RXQ_RFD_BURST_NUM_SHIFT 20 -#define RSS_MODE_MASK 0x0003 +#define RXQ_NUM_RFD_PREF_DEF 8 +#define RSS_MODE_MASK 3UL #define RSS_MODE_SHIFT 26 -#define RSS_NIP_QUEUE_SEL_MASK 0x1 -#define RSS_NIP_QUEUE_SEL_SHIFT 28 -#define RRS_HASH_CTRL_EN 0x20000000 -#define RX_CUT_THRU_EN 0x40000000 -#define RXQ_CTRL_EN 0x80000000 +#define RSS_MODE_DIS 0 +#define RSS_MODE_SQSI 1 +#define RSS_MODE_MQSI 2 +#define RSS_MODE_MQMI 3 +#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */ +#define RRS_HASH_CTRL_EN BIT(29) +#define RX_CUT_THRU_EN BIT(30) +#define RXQ_CTRL_EN BIT(31) #define REG_RFD_FREE_THRESH 0x15A4 #define RFD_FREE_THRESH_MASK 0x003F @@ -577,57 +632,45 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define RXD_DMA_DOWN_TIMER_SHIFT 16 /* DMA Engine Control Register */ -#define REG_DMA_CTRL 0x15C0 -#define DMA_CTRL_DMAR_IN_ORDER 0x1 -#define DMA_CTRL_DMAR_ENH_ORDER 0x2 -#define DMA_CTRL_DMAR_OUT_ORDER 0x4 -#define DMA_CTRL_RCB_VALUE 0x8 -#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007 -#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 -#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007 -#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 -#define DMA_CTRL_DMAR_REQ_PRI 0x400 -#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F -#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 -#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F -#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 -#define DMA_CTRL_CMB_EN 0x100000 -#define DMA_CTRL_SMB_EN 0x200000 -#define DMA_CTRL_CMB_NOW 0x400000 -#define MAC_CTRL_SMB_DIS 0x1000000 -#define DMA_CTRL_SMB_NOW 0x80000000 - -/* CMB/SMB Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_SMB_NOW BIT(31) +#define DMA_CTRL_WPEND_CLR BIT(30) +#define DMA_CTRL_RPEND_CLR BIT(29) +#define DMA_CTRL_WDLY_CNT_MASK 0xFUL +#define DMA_CTRL_WDLY_CNT_SHIFT 16 +#define DMA_CTRL_WDLY_CNT_DEF 4 +#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL +#define DMA_CTRL_RDLY_CNT_SHIFT 11 +#define DMA_CTRL_RDLY_CNT_DEF 15 +#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */ +#define DMA_CTRL_WREQ_BLEN_MASK 7UL +#define DMA_CTRL_WREQ_BLEN_SHIFT 7 +#define DMA_CTRL_RREQ_BLEN_MASK 7UL +#define DMA_CTRL_RREQ_BLEN_SHIFT 4 +#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */ +#define DMA_CTRL_RORDER_MODE_MASK 7UL +#define DMA_CTRL_RORDER_MODE_SHIFT 0 +#define DMA_CTRL_RORDER_MODE_OUT 4 +#define DMA_CTRL_RORDER_MODE_ENHANCE 2 +#define DMA_CTRL_RORDER_MODE_IN 1 + +/* INT-triggle/SMB Control Register */ #define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ #define SMB_STAT_TIMER_MASK 0xFFFFFF -#define REG_CMB_TPD_THRESH 0x15C8 -#define CMB_TPD_THRESH_MASK 0xFFFF -#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */ -#define CMB_TX_TIMER_MASK 0xFFFF +#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */ /* Mail box */ #define MB_RFDX_PROD_IDX_MASK 0xFFFF #define REG_MB_RFD0_PROD_IDX 0x15E0 -#define REG_MB_RFD1_PROD_IDX 0x15E4 -#define REG_MB_RFD2_PROD_IDX 0x15E8 -#define REG_MB_RFD3_PROD_IDX 0x15EC -#define MB_PRIO_PROD_IDX_MASK 0xFFFF -#define REG_MB_PRIO_PROD_IDX 0x15F0 -#define MB_HTPD_PROD_IDX_SHIFT 0 -#define MB_NTPD_PROD_IDX_SHIFT 16 - -#define MB_PRIO_CONS_IDX_MASK 0xFFFF -#define REG_MB_PRIO_CONS_IDX 0x15F4 -#define MB_HTPD_CONS_IDX_SHIFT 0 -#define MB_NTPD_CONS_IDX_SHIFT 16 +#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */ +#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */ +#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */ +#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */ #define REG_MB_RFD01_CONS_IDX 0x15F8 #define MB_RFD0_CONS_IDX_MASK 0x0000FFFF #define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 -#define REG_MB_RFD23_CONS_IDX 0x15FC -#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF -#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000 /* Interrupt Status Register */ #define REG_ISR 0x1600 @@ -705,13 +748,6 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define REG_INT_RETRIG_TIMER 0x1608 #define INT_RETRIG_TIMER_MASK 0xFFFF -#define REG_HDS_CTRL 0x160C -#define HDS_CTRL_EN 0x0001 -#define HDS_CTRL_BACKFILLSIZE_SHIFT 8 -#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF -#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20 -#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF - #define REG_MAC_RX_STATUS_BIN 0x1700 #define REG_MAC_RX_STATUS_END 0x175c #define REG_MAC_TX_STATUS_BIN 0x1760 @@ -796,73 +832,188 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define MII_DBG_ADDR 0x1D #define MII_DBG_DATA 0x1E -#define MII_ANA_CTRL_0 0x0 -#define ANA_RESTART_CAL 0x0001 -#define ANA_MANUL_SWICH_ON_SHIFT 0x1 -#define ANA_MANUL_SWICH_ON_MASK 0xF -#define ANA_MAN_ENABLE 0x0020 -#define ANA_SEL_HSP 0x0040 -#define ANA_EN_HB 0x0080 -#define ANA_EN_HBIAS 0x0100 -#define ANA_OEN_125M 0x0200 -#define ANA_EN_LCKDT 0x0400 -#define ANA_LCKDT_PHY 0x0800 -#define ANA_AFE_MODE 0x1000 -#define ANA_VCO_SLOW 0x2000 -#define ANA_VCO_FAST 0x4000 -#define ANA_SEL_CLK125M_DSP 0x8000 - -#define MII_ANA_CTRL_4 0x4 -#define ANA_IECHO_ADJ_MASK 0xF -#define ANA_IECHO_ADJ_3_SHIFT 0 -#define ANA_IECHO_ADJ_2_SHIFT 4 -#define ANA_IECHO_ADJ_1_SHIFT 8 -#define ANA_IECHO_ADJ_0_SHIFT 12 - -#define MII_ANA_CTRL_5 0x5 -#define ANA_SERDES_CDR_BW_SHIFT 0 -#define ANA_SERDES_CDR_BW_MASK 0x3 -#define ANA_MS_PAD_DBG 0x0004 -#define ANA_SPEEDUP_DBG 0x0008 -#define ANA_SERDES_TH_LOS_SHIFT 4 -#define ANA_SERDES_TH_LOS_MASK 0x3 -#define ANA_SERDES_EN_DEEM 0x0040 -#define ANA_SERDES_TXELECIDLE 0x0080 -#define ANA_SERDES_BEACON 0x0100 -#define ANA_SERDES_HALFTXDR 0x0200 -#define ANA_SERDES_SEL_HSP 0x0400 -#define ANA_SERDES_EN_PLL 0x0800 -#define ANA_SERDES_EN 0x1000 -#define ANA_SERDES_EN_LCKDT 0x2000 - -#define MII_ANA_CTRL_11 0xB -#define ANA_PS_HIB_EN 0x8000 - -#define MII_ANA_CTRL_18 0x12 -#define ANA_TEST_MODE_10BT_01SHIFT 0 -#define ANA_TEST_MODE_10BT_01MASK 0x3 -#define ANA_LOOP_SEL_10BT 0x0004 -#define ANA_RGMII_MODE_SW 0x0008 -#define ANA_EN_LONGECABLE 0x0010 -#define ANA_TEST_MODE_10BT_2 0x0020 -#define ANA_EN_10BT_IDLE 0x0400 -#define ANA_EN_MASK_TB 0x0800 -#define ANA_TRIGGER_SEL_TIMER_SHIFT 12 -#define ANA_TRIGGER_SEL_TIMER_MASK 0x3 -#define ANA_INTERVAL_SEL_TIMER_SHIFT 14 -#define ANA_INTERVAL_SEL_TIMER_MASK 0x3 - -#define MII_ANA_CTRL_41 0x29 -#define ANA_TOP_PS_EN 0x8000 - -#define MII_ANA_CTRL_54 0x36 -#define ANA_LONG_CABLE_TH_100_SHIFT 0 -#define ANA_LONG_CABLE_TH_100_MASK 0x3F -#define ANA_DESERVED 0x0040 -#define ANA_EN_LIT_CH 0x0080 -#define ANA_SHORT_CABLE_TH_100_SHIFT 8 -#define ANA_SHORT_CABLE_TH_100_MASK 0x3F -#define ANA_BP_BAD_LINK_ACCUM 0x4000 -#define ANA_BP_SMALL_BW 0x8000 +/***************************** debug port *************************************/ + +#define MIIDBG_ANACTRL 0x00 +#define ANACTRL_CLK125M_DELAY_EN 0x8000 +#define ANACTRL_VCO_FAST 0x4000 +#define ANACTRL_VCO_SLOW 0x2000 +#define ANACTRL_AFE_MODE_EN 0x1000 +#define ANACTRL_LCKDET_PHY 0x800 +#define ANACTRL_LCKDET_EN 0x400 +#define ANACTRL_OEN_125M 0x200 +#define ANACTRL_HBIAS_EN 0x100 +#define ANACTRL_HB_EN 0x80 +#define ANACTRL_SEL_HSP 0x40 +#define ANACTRL_CLASSA_EN 0x20 +#define ANACTRL_MANUSWON_SWR_MASK 3U +#define ANACTRL_MANUSWON_SWR_SHIFT 2 +#define ANACTRL_MANUSWON_SWR_2V 0 +#define ANACTRL_MANUSWON_SWR_1P9V 1 +#define ANACTRL_MANUSWON_SWR_1P8V 2 +#define ANACTRL_MANUSWON_SWR_1P7V 3 +#define ANACTRL_MANUSWON_BW3_4M 0x2 +#define ANACTRL_RESTART_CAL 0x1 +#define ANACTRL_DEF 0x02EF + +#define MIIDBG_SYSMODCTRL 0x04 +#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000 +#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000 +#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000 +#define SYSMODCTRL_IECHOADJ_PS_MASK 3U +#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10 +#define SYSMODCTRL_IECHOADJ_PS_40 3 +#define SYSMODCTRL_IECHOADJ_PS_20 2 +#define SYSMODCTRL_IECHOADJ_PS_0 1 +#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */ +#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U +#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8 +#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4 +#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2 +#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1 +#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */ + +/* for l1d & l2cb */ +#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000 +#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U +#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12 +#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8 +#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3 +#define SYSMODCTRL_IECHOADJ_VOL_100M15 1 +#define SYSMODCTRL_IECHOADJ_VOL_10M17 0 +#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0 +#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB + +#define MIIDBG_SRDSYSMOD 0x05 +#define SRDSYSMOD_LCKDET_EN 0x2000 +#define SRDSYSMOD_PLL_EN 0x800 +#define SRDSYSMOD_SEL_HSP 0x400 +#define SRDSYSMOD_HLFTXDR 0x200 +#define SRDSYSMOD_TXCLK_DELAY_EN 0x100 +#define SRDSYSMOD_TXELECIDLE 0x80 +#define SRDSYSMOD_DEEMP_EN 0x40 +#define SRDSYSMOD_MS_PAD 0x4 +#define SRDSYSMOD_CDR_ADC_VLTG 0x2 +#define SRDSYSMOD_CDR_DAC_1MA 0x1 +#define SRDSYSMOD_DEF 0x2C46 + +#define MIIDBG_CFGLPSPD 0x0A +#define CFGLPSPD_RSTCNT_MASK 3U +#define CFGLPSPD_RSTCNT_SHIFT 14 +#define CFGLPSPD_RSTCNT_CLK125SW 0x2000 + +#define MIIDBG_HIBNEG 0x0B +#define HIBNEG_PSHIB_EN 0x8000 +#define HIBNEG_WAKE_BOTH 0x4000 +#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000 +#define HIBNEG_HIB_PULSE 0x1000 +#define HIBNEG_GATE_25M_EN 0x800 +#define HIBNEG_RST_80U 0x400 +#define HIBNEG_RST_TIMER_MASK 3U +#define HIBNEG_RST_TIMER_SHIFT 8 +#define HIBNEG_GTX_CLK_DELAY_MASK 3U +#define HIBNEG_GTX_CLK_DELAY_SHIFT 5 +#define HIBNEG_BYPSS_BRKTIMER 0x10 +#define HIBNEG_DEF 0xBC40 + +#define MIIDBG_TST10BTCFG 0x12 +#define TST10BTCFG_INTV_TIMER_MASK 3U +#define TST10BTCFG_INTV_TIMER_SHIFT 14 +#define TST10BTCFG_TRIGER_TIMER_MASK 3U +#define TST10BTCFG_TRIGER_TIMER_SHIFT 12 +#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800 +#define TST10BTCFG_OFF_DAC_IDLE 0x400 +#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */ +#define TST10BTCFG_DEF 0x4C04 + +#define MIIDBG_AZ_ANADECT 0x15 +#define AZ_ANADECT_10BTRX_TH 0x8000 +#define AZ_ANADECT_BOTH_01CHNL 0x4000 +#define AZ_ANADECT_INTV_MASK 0x3FU +#define AZ_ANADECT_INTV_SHIFT 8 +#define AZ_ANADECT_THRESH_MASK 0xFU +#define AZ_ANADECT_THRESH_SHIFT 4 +#define AZ_ANADECT_CHNL_MASK 0xFU +#define AZ_ANADECT_CHNL_SHIFT 0 +#define AZ_ANADECT_DEF 0x3220 +#define AZ_ANADECT_LONG 0xb210 + +#define MIIDBG_MSE16DB 0x18 /* l1d */ +#define L1D_MSE16DB_UP 0x05EA +#define L1D_MSE16DB_DOWN 0x02EA + +#define MIIDBG_LEGCYPS 0x29 +#define LEGCYPS_EN 0x8000 +#define LEGCYPS_DAC_AMP1000_MASK 7U +#define LEGCYPS_DAC_AMP1000_SHIFT 12 +#define LEGCYPS_DAC_AMP100_MASK 7U +#define LEGCYPS_DAC_AMP100_SHIFT 9 +#define LEGCYPS_DAC_AMP10_MASK 7U +#define LEGCYPS_DAC_AMP10_SHIFT 6 +#define LEGCYPS_UNPLUG_TIMER_MASK 7U +#define LEGCYPS_UNPLUG_TIMER_SHIFT 3 +#define LEGCYPS_UNPLUG_DECT_EN 0x4 +#define LEGCYPS_ECNC_PS_EN 0x1 +#define L1D_LEGCYPS_DEF 0x129D +#define L1C_LEGCYPS_DEF 0x36DD + +#define MIIDBG_TST100BTCFG 0x36 +#define TST100BTCFG_NORMAL_BW_EN 0x8000 +#define TST100BTCFG_BADLNK_BYPASS 0x4000 +#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU +#define TST100BTCFG_SHORTCABL_TH_SHIFT 8 +#define TST100BTCFG_LITCH_EN 0x80 +#define TST100BTCFG_VLT_SW 0x40 +#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU +#define TST100BTCFG_LONGCABL_TH_SHIFT 0 +#define TST100BTCFG_DEF 0xE12C + +#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */ +#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU +#define VOLT_CTRL_CABLE1TH_SHIFT 7 +#define VOLT_CTRL_AMPCTRL_MASK 3U +#define VOLT_CTRL_AMPCTRL_SHIFT 5 +#define VOLT_CTRL_SW_BYPASS 0x10 +#define VOLT_CTRL_SWLOWEST 0x8 +#define VOLT_CTRL_DACAMP10_MASK 7U +#define VOLT_CTRL_DACAMP10_SHIFT 0 + +#define MIIDBG_CABLE1TH_DET 0x3E +#define CABLE1TH_DET_EN 0x8000 + + +/******* dev 3 *********/ +#define MIIEXT_PCS 3 + +#define MIIEXT_CLDCTRL3 0x8003 +#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 +#define CLDCTRL3_AZ_DISAMP 0x1000 +#define L2CB_CLDCTRL3 0x4D19 +#define L1D_CLDCTRL3 0xDD19 + +#define MIIEXT_CLDCTRL6 0x8006 +#define CLDCTRL6_CAB_LEN_MASK 0x1FFU +#define CLDCTRL6_CAB_LEN_SHIFT 0 +#define CLDCTRL6_CAB_LEN_SHORT 0x50 + +/********* dev 7 **********/ +#define MIIEXT_ANEG 7 + +#define MIIEXT_LOCAL_EEEADV 0x3C +#define LOCAL_EEEADV_1000BT 0x4 +#define LOCAL_EEEADV_100BT 0x2 + +#define MIIEXT_REMOTE_EEEADV 0x3D +#define REMOTE_EEEADV_1000BT 0x4 +#define REMOTE_EEEADV_100BT 0x2 + +#define MIIEXT_EEE_ANEG 0x8000 +#define EEE_ANEG_1000M 0x4 +#define EEE_ANEG_100M 0x2 #endif /*_ATL1C_HW_H_*/ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1ef0c9275de..9cc15701101 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -24,14 +24,6 @@ #define ATL1C_DRV_VERSION "1.0.1.0-NAPI" char atl1c_driver_name[] = "atl1c"; char atl1c_driver_version[] = ATL1C_DRV_VERSION; -#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 -#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 -#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ -#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ -#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ -#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ -#define L2CB_V10 0xc0 -#define L2CB_V11 0xc1 /* * atl1c_pci_tbl - PCI Device ID Table @@ -54,70 +46,72 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); -MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>"); -MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_AUTHOR("Jie Yang"); +MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>"); +MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATL1C_DRV_VERSION); static int atl1c_stop_mac(struct atl1c_hw *hw); -static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw); -static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw); static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); -static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup); -static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); -static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed); +static void atl1c_start_mac(struct atl1c_adapter *adapter); +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, int *work_done, int work_to_do); static int atl1c_up(struct atl1c_adapter *adapter); static void atl1c_down(struct atl1c_adapter *adapter); +static int atl1c_reset_mac(struct atl1c_hw *hw); +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); +static int atl1c_configure(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); static const u16 atl1c_pay_load_size[] = { 128, 256, 512, 1024, 2048, 4096, }; -static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] = -{ - REG_MB_RFD0_PROD_IDX, - REG_MB_RFD1_PROD_IDX, - REG_MB_RFD2_PROD_IDX, - REG_MB_RFD3_PROD_IDX -}; - -static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = -{ - REG_RFD0_HEAD_ADDR_LO, - REG_RFD1_HEAD_ADDR_LO, - REG_RFD2_HEAD_ADDR_LO, - REG_RFD3_HEAD_ADDR_LO -}; - -static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = -{ - REG_RRD0_HEAD_ADDR_LO, - REG_RRD1_HEAD_ADDR_LO, - REG_RRD2_HEAD_ADDR_LO, - REG_RRD3_HEAD_ADDR_LO -}; static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; static void atl1c_pcie_patch(struct atl1c_hw *hw) { - u32 data; + u32 mst_data, data; - AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); - data |= PCIE_PHYMISC_FORCE_RCV_DET; - AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + /* pclk sel could switch to 25M */ + AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data); + mst_data &= ~MASTER_CTRL_CLK_SEL_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data); + /* WoL/PCIE related settings */ + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); + data |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + } else { /* new dev set bit5 of MASTER */ + if (!(mst_data & MASTER_CTRL_WAKEN_25M)) + AT_WRITE_REG(hw, REG_MASTER_CTRL, + mst_data | MASTER_CTRL_WAKEN_25M); + } + /* aspm/PCIE setting only for l2cb 1.0 */ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); - - data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << - PCIE_PHYMISC2_SERDES_CDR_SHIFT); - data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; - data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK << - PCIE_PHYMISC2_SERDES_TH_SHIFT); - data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; + data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW, + L2CB1_PCIE_PHYMISC2_CDR_BW); + data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH, + L2CB1_PCIE_PHYMISC2_L0S_TH); AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); + /* extend L1 sync timer */ + AT_READ_REG(hw, REG_LINK_CTRL, &data); + data |= LINK_CTRL_EXT_SYNC; + AT_WRITE_REG(hw, REG_LINK_CTRL, data); + } + /* l2cb 1.x & l1d 1.x */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { + AT_READ_REG(hw, REG_PM_CTRL, &data); + data |= PM_CTRL_L0S_BUFSRX_EN; + AT_WRITE_REG(hw, REG_PM_CTRL, data); + /* clear vendor msg */ + AT_READ_REG(hw, REG_DMA_DBG, &data); + AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG); } } @@ -130,6 +124,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) u32 data; u32 pci_cmd; struct pci_dev *pdev = hw->adapter->pdev; + int pos; AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; @@ -142,14 +137,23 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + /* wol sts read-clear */ + AT_READ_REG(hw, REG_WOL_CTRL, &data); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* * Mask some pcie error bits */ - AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data); - data &= ~PCIE_UC_SERVRITY_DLP; - data &= ~PCIE_UC_SERVRITY_FCP; - AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + /* clear error status */ + pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_URD); AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); data &= ~LTSSM_ID_EN_WRO; @@ -158,11 +162,6 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) atl1c_pcie_patch(hw); if (flag & ATL1C_PCIE_L0S_L1_DISABLE) atl1c_disable_l0s_l1(hw); - if (flag & ATL1C_PCIE_PHY_RESET) - AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); - else - AT_WRITE_REG(hw, REG_GPHY_CTRL, - GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); msleep(5); } @@ -207,14 +206,14 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter) * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads * of the idle status register until the device is actually idle */ -static u32 atl1c_wait_until_idle(struct atl1c_hw *hw) +static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) { int timeout; u32 data; for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { AT_READ_REG(hw, REG_IDLE_STATUS, &data); - if ((data & IDLE_STATUS_MASK) == 0) + if ((data & modu_ctrl) == 0) return 0; msleep(1); } @@ -261,15 +260,16 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ - hw->hibernate = true; - if (atl1c_stop_mac(hw) != 0) - if (netif_msg_hw(adapter)) - dev_warn(&pdev->dev, "stop mac failed\n"); - atl1c_set_aspm(hw, false); netif_carrier_off(netdev); netif_stop_queue(netdev); - atl1c_phy_reset(hw); - atl1c_phy_init(&adapter->hw); + hw->hibernate = true; + if (atl1c_reset_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, "reset mac failed\n"); + atl1c_set_aspm(hw, SPEED_0); + atl1c_post_phy_linkchg(hw, SPEED_0); + atl1c_reset_dma_ring(adapter); + atl1c_configure(adapter); } else { /* Link Up */ hw->hibernate = false; @@ -283,10 +283,9 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; - atl1c_set_aspm(hw, true); - atl1c_enable_tx_ctrl(hw); - atl1c_enable_rx_ctrl(hw); - atl1c_setup_mac_ctrl(adapter); + atl1c_set_aspm(hw, speed); + atl1c_post_phy_linkchg(hw, speed); + atl1c_start_mac(adapter); if (netif_msg_link(adapter)) dev_info(&pdev->dev, "%s: %s NIC Link is Up<%d Mbps %s>\n", @@ -337,6 +336,9 @@ static void atl1c_common_task(struct work_struct *work) adapter = container_of(work, struct atl1c_adapter, common_task); netdev = adapter->netdev; + if (test_bit(__AT_DOWN, &adapter->flags)) + return; + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { netif_device_detach(netdev); atl1c_down(adapter); @@ -345,8 +347,11 @@ static void atl1c_common_task(struct work_struct *work) } if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, - &adapter->work_event)) + &adapter->work_event)) { + atl1c_irq_disable(adapter); atl1c_check_link_status(adapter); + atl1c_irq_enable(adapter); + } } @@ -470,7 +475,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); netdev->addr_assign_type &= ~NET_ADDR_RANDOM; - atl1c_hw_set_mac_addr(&adapter->hw); + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); return 0; } @@ -523,11 +528,16 @@ static int atl1c_set_features(struct net_device *netdev, static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; int old_mtu = netdev->mtu; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { + /* Fast Ethernet controller doesn't support jumbo packet */ + if (((hw->nic_type == athr_l2c || + hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) || + max_frame < ETH_ZLEN + ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); return -EINVAL; @@ -543,14 +553,6 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) netdev_update_features(netdev); atl1c_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); - if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { - u32 phy_data; - - AT_READ_REG(&adapter->hw, 0x1414, &phy_data); - phy_data |= 0x10000000; - AT_WRITE_REG(&adapter->hw, 0x1414, phy_data); - } - } return 0; } @@ -563,7 +565,7 @@ static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) struct atl1c_adapter *adapter = netdev_priv(netdev); u16 result; - atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + atl1c_read_phy_reg(&adapter->hw, reg_num, &result); return result; } @@ -572,7 +574,7 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id, { struct atl1c_adapter *adapter = netdev_priv(netdev); - atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); + atl1c_write_phy_reg(&adapter->hw, reg_num, val); } /* @@ -687,21 +689,15 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw) static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) { - u32 phy_status_data; u32 link_ctrl_data; atl1c_set_mac_type(hw); - AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | ATL1C_TXQ_MODE_ENHANCE; - if (link_ctrl_data & LINK_CTRL_L0S_EN) - hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; - if (link_ctrl_data & LINK_CTRL_L1_EN) - hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; - if (link_ctrl_data & LINK_CTRL_EXT_SYNC) - hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; + hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | + ATL1C_ASPM_L1_SUPPORT; hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; if (hw->nic_type == athr_l1c || @@ -710,6 +706,55 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; return 0; } + +struct atl1c_platform_patch { + u16 pci_did; + u8 pci_revid; + u16 subsystem_vid; + u16 subsystem_did; + u32 patch_flag; +#define ATL1C_LINK_PATCH 0x1 +}; +static const struct atl1c_platform_patch plats[] __devinitdata = { +{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, +{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, +{0x2062, 0xC0, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC0, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC0, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC1, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC1, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2802, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2801, 0x1}, +{0x1073, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1073, 0xC0, 0x1019, 0x1073, 0x1}, +{0x1073, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1083, 0xC0, 0x1019, 0x1083, 0x1}, +{0x1083, 0xC0, 0x1462, 0x7680, 0x1}, +{0x1083, 0xC0, 0x1565, 0x2803, 0x1}, +{0}, +}; + +static void __devinit atl1c_patch_assign(struct atl1c_hw *hw) +{ + int i = 0; + + hw->msi_lnkpatch = false; + + while (plats[i].pci_did != 0) { + if (plats[i].pci_did == hw->device_id && + plats[i].pci_revid == hw->revision_id && + plats[i].subsystem_vid == hw->subsystem_vendor_id && + plats[i].subsystem_did == hw->subsystem_id) { + if (plats[i].patch_flag & ATL1C_LINK_PATCH) + hw->msi_lnkpatch = true; + } + i++; + } +} /* * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) * @adapter: board private structure to initialize @@ -729,9 +774,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) device_set_wakeup_enable(&pdev->dev, false); adapter->link_speed = SPEED_0; adapter->link_duplex = FULL_DUPLEX; - adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE; adapter->tpd_ring[0].count = 1024; - adapter->rfd_ring[0].count = 512; + adapter->rfd_ring.count = 512; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; @@ -746,26 +790,18 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) dev_err(&pdev->dev, "set mac function pointers failed\n"); return -1; } + atl1c_patch_assign(hw); + hw->intr_mask = IMR_NORMAL_MASK; hw->phy_configured = false; hw->preamble_len = 7; hw->max_frame_size = adapter->netdev->mtu; - if (adapter->num_rx_queues < 2) { - hw->rss_type = atl1c_rss_disable; - hw->rss_mode = atl1c_rss_mode_disable; - } else { - hw->rss_type = atl1c_rss_ipv4; - hw->rss_mode = atl1c_rss_mul_que_mul_int; - hw->rss_hash_bits = 16; - } hw->autoneg_advertised = ADVERTISED_Autoneg; hw->indirect_tab = 0xE4E4E4E4; hw->base_cpu = 0; hw->ict = 50000; /* 100ms */ hw->smb_timer = 200000; /* 400ms */ - hw->cmb_tpd = 4; - hw->cmb_tx_timer = 1; /* 2 us */ hw->rx_imt = 200; hw->tx_imt = 1000; @@ -773,9 +809,6 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) hw->rfd_burst = 8; hw->dma_order = atl1c_dma_ord_out; hw->dmar_block = atl1c_dma_req_1024; - hw->dmaw_block = atl1c_dma_req_1024; - hw->dmar_dly_cnt = 15; - hw->dmaw_dly_cnt = 4; if (atl1c_alloc_queues(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); @@ -851,24 +884,22 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, */ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) { - struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; - struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; - int i, j; + int j; - for (i = 0; i < adapter->num_rx_queues; i++) { - for (j = 0; j < rfd_ring[i].count; j++) { - buffer_info = &rfd_ring[i].buffer_info[j]; - atl1c_clean_buffer(pdev, buffer_info, 0); - } - /* zero out the descriptor ring */ - memset(rfd_ring[i].desc, 0, rfd_ring[i].size); - rfd_ring[i].next_to_clean = 0; - rfd_ring[i].next_to_use = 0; - rrd_ring[i].next_to_use = 0; - rrd_ring[i].next_to_clean = 0; + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + atl1c_clean_buffer(pdev, buffer_info, 0); } + /* zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + rfd_ring->next_to_clean = 0; + rfd_ring->next_to_use = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; } /* @@ -877,8 +908,8 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) { struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; - struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; - struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1c_buffer *buffer_info; int i, j; @@ -890,15 +921,13 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) ATL1C_SET_BUFFER_STATE(&buffer_info[i], ATL1C_BUFFER_FREE); } - for (i = 0; i < adapter->num_rx_queues; i++) { - rfd_ring[i].next_to_use = 0; - rfd_ring[i].next_to_clean = 0; - rrd_ring[i].next_to_use = 0; - rrd_ring[i].next_to_clean = 0; - for (j = 0; j < rfd_ring[i].count; j++) { - buffer_info = &rfd_ring[i].buffer_info[j]; - ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); - } + rfd_ring->next_to_use = 0; + rfd_ring->next_to_clean = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); } } @@ -935,27 +964,23 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; - struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; - struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1c_ring_header *ring_header = &adapter->ring_header; - int num_rx_queues = adapter->num_rx_queues; int size; int i; int count = 0; int rx_desc_count = 0; u32 offset = 0; - rrd_ring[0].count = rfd_ring[0].count; + rrd_ring->count = rfd_ring->count; for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) tpd_ring[i].count = tpd_ring[0].count; - for (i = 1; i < adapter->num_rx_queues; i++) - rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count; - /* 2 tpd queue, one high priority queue, * another normal priority queue */ size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + - rfd_ring->count * num_rx_queues); + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) { dev_err(&pdev->dev, "kzalloc failed, size = %d\n", @@ -968,12 +993,11 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) count += tpd_ring[i].count; } - for (i = 0; i < num_rx_queues; i++) { - rfd_ring[i].buffer_info = - (struct atl1c_buffer *) (tpd_ring->buffer_info + count); - count += rfd_ring[i].count; - rx_desc_count += rfd_ring[i].count; - } + rfd_ring->buffer_info = + (struct atl1c_buffer *) (tpd_ring->buffer_info + count); + count += rfd_ring->count; + rx_desc_count += rfd_ring->count; + /* * real ring DMA buffer * each ring/block may need up to 8 bytes for alignment, hence the @@ -983,8 +1007,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + sizeof(struct atl1c_rx_free_desc) * rx_desc_count + sizeof(struct atl1c_recv_ret_status) * rx_desc_count + - sizeof(struct atl1c_hw_stats) + - 8 * 4 + 8 * 2 * num_rx_queues; + 8 * 4; ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, &ring_header->dma); @@ -1005,25 +1028,18 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) offset += roundup(tpd_ring[i].size, 8); } /* init RFD ring */ - for (i = 0; i < num_rx_queues; i++) { - rfd_ring[i].dma = ring_header->dma + offset; - rfd_ring[i].desc = (u8 *) ring_header->desc + offset; - rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * - rfd_ring[i].count; - offset += roundup(rfd_ring[i].size, 8); - } + rfd_ring->dma = ring_header->dma + offset; + rfd_ring->desc = (u8 *) ring_header->desc + offset; + rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count; + offset += roundup(rfd_ring->size, 8); /* init RRD ring */ - for (i = 0; i < num_rx_queues; i++) { - rrd_ring[i].dma = ring_header->dma + offset; - rrd_ring[i].desc = (u8 *) ring_header->desc + offset; - rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * - rrd_ring[i].count; - offset += roundup(rrd_ring[i].size, 8); - } + rrd_ring->dma = ring_header->dma + offset; + rrd_ring->desc = (u8 *) ring_header->desc + offset; + rrd_ring->size = sizeof(struct atl1c_recv_ret_status) * + rrd_ring->count; + offset += roundup(rrd_ring->size, 8); - adapter->smb.dma = ring_header->dma + offset; - adapter->smb.smb = (u8 *)ring_header->desc + offset; return 0; err_nomem: @@ -1034,26 +1050,20 @@ err_nomem: static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; - struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *) - adapter->rfd_ring; - struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *) - adapter->rrd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) adapter->tpd_ring; - struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; - struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; - int i; - u32 data; /* TPD */ AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, (u32)((tpd_ring[atl1c_trans_normal].dma & AT_DMA_HI_ADDR_MASK) >> 32)); /* just enable normal priority TX queue */ - AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO, + AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO, (u32)(tpd_ring[atl1c_trans_normal].dma & AT_DMA_LO_ADDR_MASK)); - AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO, + AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO, (u32)(tpd_ring[atl1c_trans_high].dma & AT_DMA_LO_ADDR_MASK)); AT_WRITE_REG(hw, REG_TPD_RING_SIZE, @@ -1062,31 +1072,21 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) /* RFD */ AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, - (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); - for (i = 0; i < adapter->num_rx_queues; i++) - AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i], - (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); + (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO, + (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK)); AT_WRITE_REG(hw, REG_RFD_RING_SIZE, - rfd_ring[0].count & RFD_RING_SIZE_MASK); + rfd_ring->count & RFD_RING_SIZE_MASK); AT_WRITE_REG(hw, REG_RX_BUF_SIZE, adapter->rx_buffer_len & RX_BUF_SIZE_MASK); /* RRD */ - for (i = 0; i < adapter->num_rx_queues; i++) - AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i], - (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO, + (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK)); AT_WRITE_REG(hw, REG_RRD_RING_SIZE, - (rrd_ring[0].count & RRD_RING_SIZE_MASK)); + (rrd_ring->count & RRD_RING_SIZE_MASK)); - /* CMB */ - AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK); - - /* SMB */ - AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI, - (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); - AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, - (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); if (hw->nic_type == athr_l2c_b) { AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); @@ -1097,13 +1097,6 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ } - if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) { - /* Power Saving for L2c_B */ - AT_READ_REG(hw, REG_SERDES_LOCK, &data); - data |= SERDES_MAC_CLK_SLOWDOWN; - data |= SERDES_PYH_CLK_SLOWDOWN; - AT_WRITE_REG(hw, REG_SERDES_LOCK, data); - } /* Load all of base address above */ AT_WRITE_REG(hw, REG_LOAD_PTR, 1); } @@ -1111,32 +1104,26 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) static void atl1c_configure_tx(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; - u32 dev_ctrl_data; - u32 max_pay_load; + int max_pay_load; u16 tx_offload_thresh; u32 txq_ctrl_data; - u32 max_pay_load_data; - tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; + tx_offload_thresh = MAX_TSO_FRAME_SIZE; AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); - AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); - max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & - DEVICE_CTRL_MAX_PAYLOAD_MASK; - hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); - max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & - DEVICE_CTRL_MAX_RREQ_SZ_MASK; + max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); - - txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << - TXQ_NUM_TPD_BURST_SHIFT; - if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) - txq_ctrl_data |= TXQ_CTRL_ENH_MODE; - max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & - TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; - if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) - max_pay_load_data >>= 1; - txq_ctrl_data |= max_pay_load_data; + /* + * if BIOS had changed the dam-read-max-length to an invalid value, + * restore it to default value + */ + if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { + pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); + hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; + } + txq_ctrl_data = + hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? + L2CB_TXQ_CFGV : L1C_TXQ_CFGV; AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); } @@ -1151,34 +1138,13 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter) if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; - if (hw->rss_type == atl1c_rss_ipv4) - rxq_ctrl_data |= RSS_HASH_IPV4; - if (hw->rss_type == atl1c_rss_ipv4_tcp) - rxq_ctrl_data |= RSS_HASH_IPV4_TCP; - if (hw->rss_type == atl1c_rss_ipv6) - rxq_ctrl_data |= RSS_HASH_IPV6; - if (hw->rss_type == atl1c_rss_ipv6_tcp) - rxq_ctrl_data |= RSS_HASH_IPV6_TCP; - if (hw->rss_type != atl1c_rss_disable) - rxq_ctrl_data |= RRS_HASH_CTRL_EN; - - rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) << - RSS_MODE_SHIFT; - rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << - RSS_HASH_BITS_SHIFT; - if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) - rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M & - ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; - AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); -} - -static void atl1c_configure_rss(struct atl1c_adapter *adapter) -{ - struct atl1c_hw *hw = &adapter->hw; + /* aspm for gigabit */ + if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) + rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT, + ASPM_THRUPUT_LIMIT_100M); - AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); - AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); } static void atl1c_configure_dma(struct atl1c_adapter *adapter) @@ -1186,36 +1152,11 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter) struct atl1c_hw *hw = &adapter->hw; u32 dma_ctrl_data; - dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI; - if (hw->ctrl_flags & ATL1C_CMB_ENABLE) - dma_ctrl_data |= DMA_CTRL_CMB_EN; - if (hw->ctrl_flags & ATL1C_SMB_ENABLE) - dma_ctrl_data |= DMA_CTRL_SMB_EN; - else - dma_ctrl_data |= MAC_CTRL_SMB_DIS; - - switch (hw->dma_order) { - case atl1c_dma_ord_in: - dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER; - break; - case atl1c_dma_ord_enh: - dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER; - break; - case atl1c_dma_ord_out: - dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER; - break; - default: - break; - } - - dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT; - dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) - << DMA_CTRL_DMAW_BURST_LEN_SHIFT; - dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) - << DMA_CTRL_DMAR_DLY_CNT_SHIFT; - dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) - << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) | + DMA_CTRL_RREQ_PRI_DATA | + FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | + FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) | + FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF); AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); } @@ -1230,52 +1171,53 @@ static int atl1c_stop_mac(struct atl1c_hw *hw) u32 data; AT_READ_REG(hw, REG_RXQ_CTRL, &data); - data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | - RXQ3_CTRL_EN | RXQ_CTRL_EN); + data &= ~RXQ_CTRL_EN; AT_WRITE_REG(hw, REG_RXQ_CTRL, data); AT_READ_REG(hw, REG_TXQ_CTRL, &data); data &= ~TXQ_CTRL_EN; - AT_WRITE_REG(hw, REG_TWSI_CTRL, data); + AT_WRITE_REG(hw, REG_TXQ_CTRL, data); - atl1c_wait_until_idle(hw); + atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY); AT_READ_REG(hw, REG_MAC_CTRL, &data); data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); AT_WRITE_REG(hw, REG_MAC_CTRL, data); - return (int)atl1c_wait_until_idle(hw); -} - -static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw) -{ - u32 data; - - AT_READ_REG(hw, REG_RXQ_CTRL, &data); - switch (hw->adapter->num_rx_queues) { - case 4: - data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN); - break; - case 3: - data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN); - break; - case 2: - data |= RXQ1_CTRL_EN; - break; - default: - break; - } - data |= RXQ_CTRL_EN; - AT_WRITE_REG(hw, REG_RXQ_CTRL, data); + return (int)atl1c_wait_until_idle(hw, + IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY); } -static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw) +static void atl1c_start_mac(struct atl1c_adapter *adapter) { - u32 data; + struct atl1c_hw *hw = &adapter->hw; + u32 mac, txq, rxq; + + hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false; + hw->mac_speed = adapter->link_speed == SPEED_1000 ? + atl1c_mac_speed_1000 : atl1c_mac_speed_10_100; + + AT_READ_REG(hw, REG_TXQ_CTRL, &txq); + AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); + AT_READ_REG(hw, REG_MAC_CTRL, &mac); + + txq |= TXQ_CTRL_EN; + rxq |= RXQ_CTRL_EN; + mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW | + MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW | + MAC_CTRL_ADD_CRC | MAC_CTRL_PAD | + MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN | + MAC_CTRL_HASH_ALG_CRC32; + if (hw->mac_duplex) + mac |= MAC_CTRL_DUPLX; + else + mac &= ~MAC_CTRL_DUPLX; + mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); + mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); - AT_READ_REG(hw, REG_TXQ_CTRL, &data); - data |= TXQ_CTRL_EN; - AT_WRITE_REG(hw, REG_TXQ_CTRL, data); + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac); } /* @@ -1287,10 +1229,7 @@ static int atl1c_reset_mac(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; struct pci_dev *pdev = adapter->pdev; - u32 master_ctrl_data = 0; - - AT_WRITE_REG(hw, REG_IMR, 0); - AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); + u32 ctrl_data = 0; atl1c_stop_mac(hw); /* @@ -1299,194 +1238,148 @@ static int atl1c_reset_mac(struct atl1c_hw *hw) * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ - AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); - master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; - AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) - & 0xFFFF)); + AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data); + ctrl_data |= MASTER_CTRL_OOB_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST); AT_WRITE_FLUSH(hw); msleep(10); /* Wait at least 10ms for All module to be Idle */ - if (atl1c_wait_until_idle(hw)) { + if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) { dev_err(&pdev->dev, "MAC state machine can't be idle since" " disabled for 10ms second\n"); return -1; } + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data); + + /* driver control speed/duplex */ + AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW); + + /* clk switch setting */ + AT_READ_REG(hw, REG_SERDES, &ctrl_data); + switch (hw->nic_type) { + case athr_l2c_b: + ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN | + SERDES_MAC_CLK_SLOWDOWN); + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + case athr_l2c_b2: + case athr_l1d_2: + ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN; + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + default: + break; + } + return 0; } static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) { - u32 pm_ctrl_data; + u16 ctrl_flags = hw->ctrl_flags; - AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); - pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << - PM_CTRL_L1_ENTRY_TIMER_SHIFT); - pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; - pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; - pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; - pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK; - pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; - - pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN; - pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN; - pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; - AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); + hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); + atl1c_set_aspm(hw, SPEED_0); + hw->ctrl_flags = ctrl_flags; } /* * Set ASPM state. * Enable/disable L0s/L1 depend on link state. */ -static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) { u32 pm_ctrl_data; - u32 link_ctrl_data; - u32 link_l1_timer = 0xF; + u32 link_l1_timer; AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); - AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_MAC_ASPM_CHK); + /* L1 timer */ + if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S; + link_l1_timer = + link_speed == SPEED_1000 || link_speed == SPEED_100 ? + L1D_PMCTRL_L1_ENTRY_TM_16US : 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer); + } else { + link_l1_timer = hw->nic_type == athr_l2c_b ? + L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM; + if (link_speed != SPEED_1000 && link_speed != SPEED_100) + link_l1_timer = 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_L1_ENTRY_TIMER, link_l1_timer); + } - pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; - pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << - PM_CTRL_L1_ENTRY_TIMER_SHIFT); - pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << - PM_CTRL_LCKDET_TIMER_SHIFT); - pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT; + /* L0S/L1 enable */ + if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) + pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK; + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK; + /* l2cb & l1d & l2cb2 & l1d2 */ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || - hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { - link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; - if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { - if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) - link_ctrl_data |= LINK_CTRL_EXT_SYNC; - } - - AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); - - pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; - pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << - PM_CTRL_PM_REQ_TIMER_SHIFT); - pm_ctrl_data |= AT_ASPM_L1_TIMER << - PM_CTRL_PM_REQ_TIMER_SHIFT; - pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; - pm_ctrl_data &= ~PM_CTRL_HOTRST; - pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; - pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; - } - pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; - if (linkup) { - pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; - pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; - if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) - pm_ctrl_data |= PM_CTRL_ASPM_L1_EN; - if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) - pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; - - if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || - hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { - if (hw->nic_type == athr_l2c_b) - if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) - pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; - pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; - pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; - pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; - pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; - if (hw->adapter->link_speed == SPEED_100 || - hw->adapter->link_speed == SPEED_1000) { - pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << - PM_CTRL_L1_ENTRY_TIMER_SHIFT); - if (hw->nic_type == athr_l2c_b) - link_l1_timer = 7; - else if (hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d_2) - link_l1_timer = 4; - pm_ctrl_data |= link_l1_timer << - PM_CTRL_L1_ENTRY_TIMER_SHIFT; - } - } else { - pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; - pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN; - pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN; - pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF); + pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER | + PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_SA_DLY_EN | + PM_CTRL_HOTRST); + /* disable l0s if link down or l2cb */ + if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; - pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; - + } else { /* l1c */ + pm_ctrl_data = + FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0); + if (link_speed != SPEED_0) { + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN; + pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1 | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_ASPM_L1_EN); + } else { /* link down */ + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_ASPM_L0S_EN); } - } else { - pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; - pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; - pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; - pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; - - if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) - pm_ctrl_data |= PM_CTRL_ASPM_L1_EN; - else - pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; } AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); return; } -static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) -{ - struct atl1c_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 mac_ctrl_data; - - mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; - mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); - - if (adapter->link_duplex == FULL_DUPLEX) { - hw->mac_duplex = true; - mac_ctrl_data |= MAC_CTRL_DUPLX; - } - - if (adapter->link_speed == SPEED_1000) - hw->mac_speed = atl1c_mac_speed_1000; - else - hw->mac_speed = atl1c_mac_speed_10_100; - - mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) << - MAC_CTRL_SPEED_SHIFT; - - mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); - mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) << - MAC_CTRL_PRMLEN_SHIFT); - - __atl1c_vlan_mode(netdev->features, &mac_ctrl_data); - - mac_ctrl_data |= MAC_CTRL_BC_EN; - if (netdev->flags & IFF_PROMISC) - mac_ctrl_data |= MAC_CTRL_PROMIS_EN; - if (netdev->flags & IFF_ALLMULTI) - mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; - - mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; - if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d_2) { - mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; - mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; - } - AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); -} - /* * atl1c_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ -static int atl1c_configure(struct atl1c_adapter *adapter) +static int atl1c_configure_mac(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; u32 master_ctrl_data = 0; u32 intr_modrt_data; u32 data; + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN | + MASTER_CTRL_RX_ITIMER_EN | + MASTER_CTRL_INT_RDCLR); /* clear interrupt status */ AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); /* Clear any WOL status */ @@ -1525,30 +1418,39 @@ static int atl1c_configure(struct atl1c_adapter *adapter) master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); - if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { - AT_WRITE_REG(hw, REG_CMB_TPD_THRESH, - hw->cmb_tpd & CMB_TPD_THRESH_MASK); - AT_WRITE_REG(hw, REG_CMB_TX_TIMER, - hw->cmb_tx_timer & CMB_TX_TIMER_MASK); - } + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, + hw->smb_timer & SMB_STAT_TIMER_MASK); - if (hw->ctrl_flags & ATL1C_SMB_ENABLE) - AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, - hw->smb_timer & SMB_STAT_TIMER_MASK); /* set MTU */ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); - /* HDS, disable */ - AT_WRITE_REG(hw, REG_HDS_CTRL, 0); atl1c_configure_tx(adapter); atl1c_configure_rx(adapter); - atl1c_configure_rss(adapter); atl1c_configure_dma(adapter); return 0; } +static int atl1c_configure(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num; + + atl1c_init_ring_ptrs(adapter); + atl1c_set_multi(netdev); + atl1c_restore_vlan(adapter); + + num = atl1c_alloc_rx_buffer(adapter); + if (unlikely(num == 0)) + return -ENOMEM; + + if (atl1c_configure_mac(adapter)) + return -EIO; + + return 0; +} + static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) { u16 hw_reg_addr = 0; @@ -1635,16 +1537,11 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, struct pci_dev *pdev = adapter->pdev; u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; - u16 shift; - u32 data; + u16 reg; - if (type == atl1c_trans_high) - shift = MB_HTPD_CONS_IDX_SHIFT; - else - shift = MB_NTPD_CONS_IDX_SHIFT; + reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; - AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data); - hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK; + AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; @@ -1746,9 +1643,9 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) { - struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid]; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; struct atl1c_buffer *buffer_info, *next_info; struct sk_buff *skb; @@ -1800,7 +1697,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid /* TODO: update mailbox here */ wmb(); rfd_ring->next_to_use = rfd_next_to_use; - AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid], + AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX, rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); } @@ -1839,7 +1736,7 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, rfd_ring->next_to_clean = rfd_index; } -static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, int *work_done, int work_to_do) { u16 rfd_num, rfd_index; @@ -1847,8 +1744,8 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, u16 length; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; - struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que]; - struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que]; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; struct sk_buff *skb; struct atl1c_recv_ret_status *rrs; struct atl1c_buffer *buffer_info; @@ -1914,7 +1811,7 @@ rrs_checked: count++; } if (count) - atl1c_alloc_rx_buffer(adapter, que); + atl1c_alloc_rx_buffer(adapter); } /* @@ -1931,7 +1828,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (!netif_carrier_ok(adapter->netdev)) goto quit_polling; /* just enable one RXQ */ - atl1c_clean_rx_irq(adapter, 0, &work_done, budget); + atl1c_clean_rx_irq(adapter, &work_done, budget); if (work_done < budget) { quit_polling: @@ -2206,23 +2103,10 @@ static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) { struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; - u32 prod_data; + u16 reg; - AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data); - switch (type) { - case atl1c_trans_high: - prod_data &= 0xFFFF0000; - prod_data |= tpd_ring->next_to_use & 0xFFFF; - break; - case atl1c_trans_normal: - prod_data &= 0x0000FFFF; - prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16; - break; - default: - break; - } - wmb(); - AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data); + reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX; + AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use); } static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, @@ -2307,8 +2191,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter) "Unable to allocate MSI interrupt Error: %d\n", err); adapter->have_msi = false; - } else - netdev->irq = pdev->irq; + } if (!adapter->have_msi) flags |= IRQF_SHARED; @@ -2328,44 +2211,38 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter) return err; } + +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter) +{ + /* release tx-pending skbs and reset tx/rx ring index */ + atl1c_clean_tx_ring(adapter, atl1c_trans_normal); + atl1c_clean_tx_ring(adapter, atl1c_trans_high); + atl1c_clean_rx_ring(adapter); +} + static int atl1c_up(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int num; int err; - int i; netif_carrier_off(netdev); - atl1c_init_ring_ptrs(adapter); - atl1c_set_multi(netdev); - atl1c_restore_vlan(adapter); - for (i = 0; i < adapter->num_rx_queues; i++) { - num = atl1c_alloc_rx_buffer(adapter, i); - if (unlikely(num == 0)) { - err = -ENOMEM; - goto err_alloc_rx; - } - } - - if (atl1c_configure(adapter)) { - err = -EIO; + err = atl1c_configure(adapter); + if (unlikely(err)) goto err_up; - } err = atl1c_request_irq(adapter); if (unlikely(err)) goto err_up; + atl1c_check_link_status(adapter); clear_bit(__AT_DOWN, &adapter->flags); napi_enable(&adapter->napi); atl1c_irq_enable(adapter); - atl1c_check_link_status(adapter); netif_start_queue(netdev); return err; err_up: -err_alloc_rx: atl1c_clean_rx_ring(adapter); return err; } @@ -2383,15 +2260,15 @@ static void atl1c_down(struct atl1c_adapter *adapter) napi_disable(&adapter->napi); atl1c_irq_disable(adapter); atl1c_free_irq(adapter); + /* disable ASPM if device inactive */ + atl1c_disable_l0s_l1(&adapter->hw); /* reset MAC to disable all RX/TX */ atl1c_reset_mac(&adapter->hw); msleep(1); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; - atl1c_clean_tx_ring(adapter, atl1c_trans_normal); - atl1c_clean_tx_ring(adapter, atl1c_trans_high); - atl1c_clean_rx_ring(adapter); + atl1c_reset_dma_ring(adapter); } /* @@ -2424,13 +2301,6 @@ static int atl1c_open(struct net_device *netdev) if (unlikely(err)) goto err_up; - if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { - u32 phy_data; - - AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data); - phy_data |= MDIO_AP_EN; - AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data); - } return 0; err_up: @@ -2456,6 +2326,8 @@ static int atl1c_close(struct net_device *netdev) struct atl1c_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + set_bit(__AT_DOWN, &adapter->flags); + cancel_work_sync(&adapter->common_task); atl1c_down(adapter); atl1c_free_ring_resources(adapter); return 0; @@ -2467,10 +2339,6 @@ static int atl1c_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; - u32 mac_ctrl_data = 0; - u32 master_ctrl_data = 0; - u32 wol_ctrl_data = 0; - u16 mii_intr_status_data = 0; u32 wufc = adapter->wol; atl1c_disable_l0s_l1(hw); @@ -2481,75 +2349,10 @@ static int atl1c_suspend(struct device *dev) netif_device_detach(netdev); if (wufc) - if (atl1c_phy_power_saving(hw) != 0) + if (atl1c_phy_to_ps_link(hw) != 0) dev_dbg(&pdev->dev, "phy power saving failed"); - AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); - AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); - - master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; - mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT); - mac_ctrl_data |= (((u32)adapter->hw.preamble_len & - MAC_CTRL_PRMLEN_MASK) << - MAC_CTRL_PRMLEN_SHIFT); - mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT); - mac_ctrl_data &= ~MAC_CTRL_DUPLX; - - if (wufc) { - mac_ctrl_data |= MAC_CTRL_RX_EN; - if (adapter->link_speed == SPEED_1000 || - adapter->link_speed == SPEED_0) { - mac_ctrl_data |= atl1c_mac_speed_1000 << - MAC_CTRL_SPEED_SHIFT; - mac_ctrl_data |= MAC_CTRL_DUPLX; - } else - mac_ctrl_data |= atl1c_mac_speed_10_100 << - MAC_CTRL_SPEED_SHIFT; - - if (adapter->link_duplex == DUPLEX_FULL) - mac_ctrl_data |= MAC_CTRL_DUPLX; - - /* turn on magic packet wol */ - if (wufc & AT_WUFC_MAG) - wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - - if (wufc & AT_WUFC_LNKC) { - wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; - /* only link up can wake up */ - if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { - dev_dbg(&pdev->dev, "%s: read write phy " - "register failed.\n", - atl1c_driver_name); - } - } - /* clear phy interrupt */ - atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); - /* Config MAC Ctrl register */ - __atl1c_vlan_mode(netdev->features, &mac_ctrl_data); - - /* magic packet maybe Broadcast&multicast&Unicast frame */ - if (wufc & AT_WUFC_MAG) - mac_ctrl_data |= MAC_CTRL_BC_EN; - - dev_dbg(&pdev->dev, - "%s: suspend MAC=0x%x\n", - atl1c_driver_name, mac_ctrl_data); - AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); - AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); - AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); - - AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | - GPHY_CTRL_EXT_RESET); - } else { - AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING); - master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS; - mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; - mac_ctrl_data |= MAC_CTRL_DUPLX; - AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); - AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); - AT_WRITE_REG(hw, REG_WOL_CTRL, 0); - hw->phy_configured = false; /* re-init PHY when resume */ - } + atl1c_power_saving(hw, wufc); return 0; } @@ -2562,8 +2365,7 @@ static int atl1c_resume(struct device *dev) struct atl1c_adapter *adapter = netdev_priv(netdev); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); - atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | - ATL1C_PCIE_PHY_RESET); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); atl1c_phy_reset(&adapter->hw); atl1c_reset_mac(&adapter->hw); @@ -2616,7 +2418,6 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); - netdev->irq = pdev->irq; netdev->netdev_ops = &atl1c_netdev_ops; netdev->watchdog_timeo = AT_TX_WATCHDOG; atl1c_set_ethtool_ops(netdev); @@ -2706,14 +2507,13 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "cannot map device registers\n"); goto err_ioremap; } - netdev->base_addr = (unsigned long)adapter->hw.hw_addr; /* init mii data */ adapter->mii.dev = netdev; adapter->mii.mdio_read = atl1c_mdio_read; adapter->mii.mdio_write = atl1c_mdio_write; adapter->mii.phy_id_mask = 0x1f; - adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); setup_timer(&adapter->phy_config_timer, atl1c_phy_config, (unsigned long)adapter); @@ -2723,8 +2523,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "net device private data init failed\n"); goto err_sw_init; } - atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | - ATL1C_PCIE_PHY_RESET); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); /* Init GPHY as early as possible due to power saving issue */ atl1c_phy_reset(&adapter->hw); @@ -2752,7 +2551,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr); - atl1c_hw_set_mac_addr(&adapter->hw); + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); INIT_WORK(&adapter->common_task, atl1c_common_task); adapter->work_event = 0; err = register_netdev(netdev); @@ -2796,6 +2595,8 @@ static void __devexit atl1c_remove(struct pci_dev *pdev) struct atl1c_adapter *adapter = netdev_priv(netdev); unregister_netdev(netdev); + /* restore permanent address */ + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); atl1c_phy_disable(&adapter->hw); iounmap(adapter->hw.hw_addr); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 93ff2b23128..1220e511ced 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1883,27 +1883,24 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter) int err = 0; adapter->have_msi = true; - err = pci_enable_msi(adapter->pdev); + err = pci_enable_msi(pdev); if (err) { - netdev_dbg(adapter->netdev, + netdev_dbg(netdev, "Unable to allocate MSI interrupt Error: %d\n", err); adapter->have_msi = false; - } else - netdev->irq = pdev->irq; - + } if (!adapter->have_msi) flags |= IRQF_SHARED; - err = request_irq(adapter->pdev->irq, atl1e_intr, flags, - netdev->name, netdev); + err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); if (adapter->have_msi) - pci_disable_msi(adapter->pdev); + pci_disable_msi(pdev); return err; } - netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); + netdev_dbg(netdev, "atl1e_request_irq OK\n"); return err; } @@ -2233,7 +2230,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); - netdev->irq = pdev->irq; netdev->netdev_ops = &atl1e_netdev_ops; netdev->watchdog_timeo = AT_TX_WATCHDOG; @@ -2319,7 +2315,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev, netdev_err(netdev, "cannot map device registers\n"); goto err_ioremap; } - netdev->base_addr = (unsigned long)adapter->hw.hw_addr; /* init mii data */ adapter->mii.dev = netdev; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index c926857e820..5d10884e508 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -266,7 +266,7 @@ static s32 atl1_reset_hw(struct atl1_hw *hw) * interrupts & Clear any pending interrupt events */ /* - * iowrite32(0, hw->hw_addr + REG_IMR); + * atlx_irq_disable(adapter); * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); */ @@ -1917,7 +1917,7 @@ next: return num_alloc; } -static void atl1_intr_rx(struct atl1_adapter *adapter) +static int atl1_intr_rx(struct atl1_adapter *adapter, int budget) { int i, count; u16 length; @@ -1933,7 +1933,7 @@ static void atl1_intr_rx(struct atl1_adapter *adapter) rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); - while (1) { + while (count < budget) { rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); i = 1; if (likely(rrd->xsz.valid)) { /* packet valid */ @@ -2032,7 +2032,7 @@ rrd_ok: __vlan_hwaccel_put_tag(skb, vlan_tag); } - netif_rx(skb); + netif_receive_skb(skb); /* let protocol layer free skb */ buffer_info->skb = NULL; @@ -2065,14 +2065,17 @@ rrd_ok: iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock(&adapter->mb_lock); } + + return count; } -static void atl1_intr_tx(struct atl1_adapter *adapter) +static int atl1_intr_tx(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 sw_tpd_next_to_clean; u16 cmb_tpd_next_to_clean; + int count = 0; sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); @@ -2092,12 +2095,16 @@ static void atl1_intr_tx(struct atl1_adapter *adapter) if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; + + count++; } atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); + + return count; } static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) @@ -2439,6 +2446,49 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } +static int atl1_rings_clean(struct napi_struct *napi, int budget) +{ + struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi); + int work_done = atl1_intr_rx(adapter, budget); + + if (atl1_intr_tx(adapter)) + work_done = budget; + + /* Let's come again to process some more packets */ + if (work_done >= budget) + return work_done; + + napi_complete(napi); + /* re-enable Interrupt */ + if (likely(adapter->int_enabled)) + atlx_imr_set(adapter, IMR_NORMAL_MASK); + return work_done; +} + +static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) +{ + if (!napi_schedule_prep(&adapter->napi)) + /* It is possible in case even the RX/TX ints are disabled via IMR + * register the ISR bits are set anyway (but do not produce IRQ). + * To handle such situation the napi functions used to check is + * something scheduled or not. + */ + return 0; + + __napi_schedule(&adapter->napi); + + /* + * Disable RX/TX ints via IMR register if it is + * allowed. NAPI handler must reenable them in same + * way. + */ + if (!adapter->int_enabled) + return 1; + + atlx_imr_set(adapter, IMR_NORXTX_MASK); + return 1; +} + /* * atl1_intr - Interrupt Handler * @irq: interrupt number @@ -2449,78 +2499,74 @@ static irqreturn_t atl1_intr(int irq, void *data) { struct atl1_adapter *adapter = netdev_priv(data); u32 status; - int max_ints = 10; status = adapter->cmb.cmb->int_stats; if (!status) return IRQ_NONE; - do { - /* clear CMB interrupt status at once */ - adapter->cmb.cmb->int_stats = 0; - - if (status & ISR_GPHY) /* clear phy status */ - atlx_clear_phy_int(adapter); + /* clear CMB interrupt status at once, + * but leave rx/tx interrupt status in case it should be dropped + * only if rx/tx processing queued. In other case interrupt + * can be lost. + */ + adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX); - /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ - iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + if (status & ISR_GPHY) /* clear phy status */ + atlx_clear_phy_int(adapter); - /* check if SMB intr */ - if (status & ISR_SMB) - atl1_inc_smb(adapter); + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); - /* check if PCIE PHY Link down */ - if (status & ISR_PHY_LINKDOWN) { - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie phy link down %x\n", status); - if (netif_running(adapter->netdev)) { /* reset MAC */ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->reset_dev_task); - return IRQ_HANDLED; - } - } + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); - /* check if DMA read/write error ? */ - if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, &adapter->pdev->dev, - "pcie DMA r/w error (status = 0x%x)\n", - status); - iowrite32(0, adapter->hw.hw_addr + REG_IMR); + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + atlx_irq_disable(adapter); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } + } - /* link event */ - if (status & ISR_GPHY) { - adapter->soft_stats.tx_carrier_errors++; - atl1_check_for_link(adapter); - } + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + atlx_irq_disable(adapter); + schedule_work(&adapter->reset_dev_task); + return IRQ_HANDLED; + } - /* transmit event */ - if (status & ISR_CMB_TX) - atl1_intr_tx(adapter); - - /* rx exception */ - if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV | ISR_CMB_RX))) { - if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV)) - if (netif_msg_intr(adapter)) - dev_printk(KERN_DEBUG, - &adapter->pdev->dev, - "rx exception, ISR = 0x%x\n", - status); - atl1_intr_rx(adapter); - } + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } - if (--max_ints < 0) - break; + /* transmit or receive event */ + if (status & (ISR_CMB_TX | ISR_CMB_RX) && + atl1_sched_rings_clean(adapter)) + adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats & + ~(ISR_CMB_TX | ISR_CMB_RX); - } while ((status = adapter->cmb.cmb->int_stats)); + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV))) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", + status); + atl1_sched_rings_clean(adapter); + } /* re-enable Interrupt */ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); @@ -2599,6 +2645,7 @@ static s32 atl1_up(struct atl1_adapter *adapter) if (unlikely(err)) goto err_up; + napi_enable(&adapter->napi); atlx_irq_enable(adapter); atl1_check_link(adapter); netif_start_queue(netdev); @@ -2615,6 +2662,7 @@ static void atl1_down(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; + napi_disable(&adapter->napi); netif_stop_queue(netdev); del_timer_sync(&adapter->phy_config_timer); adapter->phy_timer_pending = false; @@ -2971,6 +3019,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, netdev->netdev_ops = &atl1_netdev_ops; netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64); netdev->ethtool_ops = &atl1_ethtool_ops; adapter->bd_number = cards_found; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index e04bf4d71e4..3bf79a56220 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -275,13 +275,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter); #define ISR_DIS_SMB 0x20000000 #define ISR_DIS_DMA 0x40000000 -/* Normal Interrupt mask */ -#define IMR_NORMAL_MASK (\ +/* Normal Interrupt mask without RX/TX enabled */ +#define IMR_NORXTX_MASK (\ ISR_SMB |\ ISR_GPHY |\ ISR_PHY_LINKDOWN|\ ISR_DMAR_TO_RST |\ - ISR_DMAW_TO_RST |\ + ISR_DMAW_TO_RST) + +/* Normal Interrupt mask */ +#define IMR_NORMAL_MASK (\ + IMR_NORXTX_MASK |\ ISR_CMB_TX |\ ISR_CMB_RX) @@ -758,6 +762,7 @@ struct atl1_adapter { u16 link_speed; u16 link_duplex; spinlock_t lock; + struct napi_struct napi; struct work_struct reset_dev_task; struct work_struct link_chg_task; @@ -781,6 +786,12 @@ struct atl1_adapter { u16 ict; /* interrupt clear timer (2us resolution */ struct mii_if_info mii; /* MII interface info */ + /* + * Use this value to check is napi handler allowed to + * enable ints or not + */ + bool int_enabled; + u32 bd_number; /* board number */ bool pci_using_64; struct atl1_hw hw; diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index c9e9dc57986..b4f3aa49a7f 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -155,14 +155,21 @@ static void atlx_set_multi(struct net_device *netdev) } } +static inline void atlx_imr_set(struct atlx_adapter *adapter, + unsigned int imr) +{ + iowrite32(imr, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + /* * atlx_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static void atlx_irq_enable(struct atlx_adapter *adapter) { - iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); - ioread32(adapter->hw.hw_addr + REG_IMR); + atlx_imr_set(adapter, IMR_NORMAL_MASK); + adapter->int_enabled = true; } /* @@ -171,8 +178,8 @@ static void atlx_irq_enable(struct atlx_adapter *adapter) */ static void atlx_irq_disable(struct atlx_adapter *adapter) { - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - ioread32(adapter->hw.hw_addr + REG_IMR); + adapter->int_enabled = false; + atlx_imr_set(adapter, 0); synchronize_irq(adapter->pdev->irq); } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8297e286873..ac7b7448853 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3006,7 +3006,7 @@ error: dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data); + skb = build_skb(data, 0); if (!skb) { kfree(data); goto error; @@ -7343,8 +7343,7 @@ static struct { { "rx_fw_discards" }, }; -#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ - sizeof(bnx2_stats_str_arr[0])) +#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr) #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) @@ -7976,7 +7975,6 @@ static int __devinit bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) { struct bnx2 *bp; - unsigned long mem_len; int rc, i, j; u32 reg; u64 dma_mask, persist_dma_mask; @@ -8036,13 +8034,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) #endif INIT_WORK(&bp->reset_task, bnx2_reset_task); - dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); - mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); - dev->mem_end = dev->mem_start + mem_len; - dev->irq = pdev->irq; - - bp->regview = ioremap_nocache(dev->base_addr, mem_len); - + bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID + + TX_MAX_TSS_RINGS + 1)); if (!bp->regview) { dev_err(&pdev->dev, "Cannot map register space, aborting\n"); rc = -ENOMEM; @@ -8346,10 +8339,8 @@ err_out_unmap: bp->flags &= ~BNX2_FLAG_AER_ENABLED; } - if (bp->regview) { - iounmap(bp->regview); - bp->regview = NULL; - } + pci_iounmap(pdev, bp->regview); + bp->regview = NULL; err_out_release: pci_release_regions(pdev); @@ -8432,7 +8423,7 @@ static int __devinit bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed = 0; - struct net_device *dev = NULL; + struct net_device *dev; struct bnx2 *bp; int rc; char str[40]; @@ -8442,15 +8433,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); - if (!dev) return -ENOMEM; rc = bnx2_init_board(pdev, dev); - if (rc < 0) { - free_netdev(dev); - return rc; - } + if (rc < 0) + goto err_free; dev->netdev_ops = &bnx2_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; @@ -8480,22 +8468,21 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto error; } - netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, + netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, " + "node addr %pM\n", board_info[ent->driver_data].name, ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', ((CHIP_ID(bp) & 0x0ff0) >> 4), - bnx2_bus_string(bp, str), - dev->base_addr, - bp->pdev->irq, dev->dev_addr); + bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), + pdev->irq, dev->dev_addr); return 0; error: - if (bp->regview) - iounmap(bp->regview); + iounmap(bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); +err_free: free_netdev(dev); return rc; } @@ -8511,8 +8498,7 @@ bnx2_remove_one(struct pci_dev *pdev) del_timer_sync(&bp->timer); cancel_work_sync(&bp->reset_task); - if (bp->regview) - iounmap(bp->regview); + pci_iounmap(bp->pdev, bp->regview); kfree(bp->temp_stats_blk); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 2c9ee552dff..e30e2a2f354 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,13 +23,17 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.72.10-0" -#define DRV_MODULE_RELDATE "2012/02/20" +#define DRV_MODULE_VERSION "1.72.50-0" +#define DRV_MODULE_RELDATE "2012/04/23" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) #define BCM_DCBNL #endif + + +#include "bnx2x_hsi.h" + #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) #define BCM_CNIC 1 #include "../cnic_if.h" @@ -345,7 +349,6 @@ union db_prod { #define SGE_PAGE_SIZE PAGE_SIZE #define SGE_PAGE_SHIFT PAGE_SHIFT #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) -#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) /* SGE ring related macros */ #define NUM_RX_SGE_PAGES 2 @@ -815,6 +818,8 @@ struct bnx2x_common { #define CHIP_NUM_57800_MF 0x16a5 #define CHIP_NUM_57810 0x168e #define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57811 0x163d +#define CHIP_NUM_57811_MF 0x163e #define CHIP_NUM_57840 0x168d #define CHIP_NUM_57840_MF 0x16ab #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) @@ -826,6 +831,8 @@ struct bnx2x_common { #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811) +#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF) #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ @@ -836,6 +843,8 @@ struct bnx2x_common { CHIP_IS_57800_MF(bp) || \ CHIP_IS_57810(bp) || \ CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57811(bp) || \ + CHIP_IS_57811_MF(bp) || \ CHIP_IS_57840(bp) || \ CHIP_IS_57840_MF(bp)) #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) @@ -1053,6 +1062,13 @@ struct bnx2x_slowpath { struct flow_control_configuration pfc_config; } func_rdata; + /* afex ramrod can not be a part of func_rdata union because these + * events might arrive in parallel to other events from func_rdata. + * Therefore, if they would have been defined in the same union, + * data can get corrupted. + */ + struct afex_vif_list_ramrod_data func_afex_rdata; + /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -1169,6 +1185,7 @@ struct bnx2x_fw_stats_data { enum { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, + BNX2X_SP_RTNL_AFEX_F_UPDATE, BNX2X_SP_RTNL_FAN_FAILURE, }; @@ -1222,7 +1239,6 @@ struct bnx2x { #define ETH_MAX_JUMBO_PACKET_SIZE 9600 /* TCP with Timestamp Option (32) + IPv6 (40) */ #define ETH_MAX_TPA_HEADER_SIZE 72 -#define ETH_MIN_TPA_HEADER_SIZE 40 /* Max supported alignment is 256 (8 shift) */ #define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) @@ -1300,6 +1316,7 @@ struct bnx2x { #define NO_ISCSI_FLAG (1 << 14) #define NO_FCOE_FLAG (1 << 15) #define BC_SUPPORTS_PFC_STATS (1 << 17) +#define USING_SINGLE_MSIX_FLAG (1 << 20) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -1329,21 +1346,20 @@ struct bnx2x { struct bnx2x_common common; struct bnx2x_port port; - struct cmng_struct_per_port cmng; - u32 vn_weight_sum; + struct cmng_init cmng; + u32 mf_config[E1HVN_MAX]; - u32 mf2_config[E2_FUNC_MAX]; + u32 mf_ext_config; u32 path_has_ovlan; /* E3 */ u16 mf_ov; u8 mf_mode; #define IS_MF(bp) (bp->mf_mode != 0) #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) +#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) u8 wol; - bool gro_check; - int rx_ring_size; u16 tx_quick_cons_trip_int; @@ -1371,7 +1387,6 @@ struct bnx2x { #define BNX2X_STATE_DIAG 0xe000 #define BNX2X_STATE_ERROR 0xf000 - int multi_mode; #define BNX2X_MAX_PRIORITY 8 #define BNX2X_MAX_ENTRIES_PER_PRI 16 #define BNX2X_MAX_COS 3 @@ -1582,6 +1597,9 @@ struct bnx2x { struct dcbx_features dcbx_remote_feat; u32 dcbx_remote_flags; #endif + /* AFEX: store default vlan used */ + int afex_def_vlan_tag; + enum mf_cfg_afex_vlan_mode afex_vlan_mode; u32 pending_max; /* multiple tx classes of service */ @@ -2138,9 +2156,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) +#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \ + MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp)) #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) +#else +#define IS_MF_FCOE_AFEX(bp) false #endif + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4b054812713..ad0743bf4bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -23,7 +23,6 @@ #include <linux/ip.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> -#include <linux/firmware.h> #include <linux/prefetch.h> #include "bnx2x_cmn.h" #include "bnx2x_init.h" @@ -329,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); tpa_info->full_page = SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; - /* - * FW 7.2.16 BUG workaround: - * if SGE size is (exactly) multiple gro_size - * fw will place one less frag on SGE. - * the calculation is done only for potentially - * dangerous MTUs. - */ - if (unlikely(bp->gro_check)) - if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size)) - tpa_info->full_page -= gro_size; tpa_info->gro_size = gro_size; } @@ -369,8 +358,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, * Approximate value of the MSS for this aggregation calculated using * the first packet of it. */ -static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, - u16 len_on_bd) +static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, + u16 len_on_bd) { /* * TPA arrgregation won't have either IP options or TCP options @@ -396,6 +385,36 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, return len_on_bd - hdrs_len; } +static int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) { + BNX2X_ERR("Can't alloc sge\n"); + return -ENOMEM; + } + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, @@ -494,11 +513,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return 0; } -static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, - struct bnx2x_agg_info *tpa_info, - u16 pages, - struct eth_end_agg_rx_cqe *cqe, - u16 cqe_idx) +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_agg_info *tpa_info, + u16 pages, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) { struct sw_rx_bd *rx_buf = &tpa_info->first_buf; u8 pad = tpa_info->placement_offset; @@ -524,7 +543,7 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_data)) - skb = build_skb(data); + skb = build_skb(data, 0); if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR @@ -568,6 +587,36 @@ drop: fp->eth_q_stats.rx_skb_alloc_failed++; } +static int bnx2x_alloc_rx_data(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + u8 *data; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); + if (unlikely(data == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, + fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + kfree(data); + BNX2X_ERR("Can't map rx data\n"); + return -ENOMEM; + } + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { @@ -732,7 +781,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - skb = build_skb(data); + skb = build_skb(data, 0); if (unlikely(!skb)) { kfree(data); fp->eth_q_stats.rx_skb_alloc_failed++; @@ -881,8 +930,8 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) * * It uses a none-atomic bit operations because is called under the mutex. */ -static inline void bnx2x_fill_report_data(struct bnx2x *bp, - struct bnx2x_link_report_data *data) +static void bnx2x_fill_report_data(struct bnx2x *bp, + struct bnx2x_link_report_data *data) { u16 line_speed = bnx2x_get_mf_speed(bp); @@ -1000,6 +1049,47 @@ void __bnx2x_link_report(struct bnx2x *bp) } } +static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } +} + +static void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + u8 *data = first_buf->data; + + if (data == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + kfree(data); + first_buf->data = NULL; + } +} + void bnx2x_init_rx_rings(struct bnx2x *bp) { int func = BP_FUNC(bp); @@ -1212,16 +1302,15 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) void bnx2x_free_irq(struct bnx2x *bp) { - if (bp->flags & USING_MSIX_FLAG) + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1); - else if (bp->flags & USING_MSI_FLAG) - free_irq(bp->pdev->irq, bp->dev); else - free_irq(bp->pdev->irq, bp->dev); + free_irq(bp->dev->irq, bp->dev); } -int bnx2x_enable_msix(struct bnx2x *bp) +int __devinit bnx2x_enable_msix(struct bnx2x *bp) { int msix_vec = 0, i, rc, req_cnt; @@ -1261,8 +1350,8 @@ int bnx2x_enable_msix(struct bnx2x *bp) rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); if (rc) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); - return rc; + BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + goto no_msix; } /* * decrease number of queues by number of unallocated entries @@ -1270,18 +1359,34 @@ int bnx2x_enable_msix(struct bnx2x *bp) bp->num_queues -= diff; BNX2X_DEV_INFO("New queue configuration set: %d\n", - bp->num_queues); - } else if (rc) { - /* fall to INTx if not enough memory */ - if (rc == -ENOMEM) - bp->flags |= DISABLE_MSI_FLAG; + bp->num_queues); + } else if (rc > 0) { + /* Get by with single vector */ + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); + if (rc) { + BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", + rc); + goto no_msix; + } + + BNX2X_DEV_INFO("Using single MSI-X vector\n"); + bp->flags |= USING_SINGLE_MSIX_FLAG; + + } else if (rc < 0) { BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); - return rc; + goto no_msix; } bp->flags |= USING_MSIX_FLAG; return 0; + +no_msix: + /* fall to INTx if not enough memory */ + if (rc == -ENOMEM) + bp->flags |= DISABLE_MSI_FLAG; + + return rc; } static int bnx2x_req_msix_irqs(struct bnx2x *bp) @@ -1343,22 +1448,26 @@ int bnx2x_enable_msi(struct bnx2x *bp) static int bnx2x_req_irq(struct bnx2x *bp) { unsigned long flags; - int rc; + unsigned int irq; - if (bp->flags & USING_MSI_FLAG) + if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) flags = 0; else flags = IRQF_SHARED; - rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, - bp->dev->name, bp->dev); - return rc; + if (bp->flags & USING_MSIX_FLAG) + irq = bp->msix_table[0].vector; + else + irq = bp->pdev->irq; + + return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); } -static inline int bnx2x_setup_irqs(struct bnx2x *bp) +static int bnx2x_setup_irqs(struct bnx2x *bp) { int rc = 0; - if (bp->flags & USING_MSIX_FLAG) { + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { rc = bnx2x_req_msix_irqs(bp); if (rc) return rc; @@ -1371,15 +1480,20 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp) } if (bp->flags & USING_MSI_FLAG) { bp->dev->irq = bp->pdev->irq; - netdev_info(bp->dev, "using MSI IRQ %d\n", - bp->pdev->irq); + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->dev->irq); + } + if (bp->flags & USING_MSIX_FLAG) { + bp->dev->irq = bp->msix_table[0].vector; + netdev_info(bp->dev, "using MSIX IRQ %d\n", + bp->dev->irq); } } return 0; } -static inline void bnx2x_napi_enable(struct bnx2x *bp) +static void bnx2x_napi_enable(struct bnx2x *bp) { int i; @@ -1387,7 +1501,7 @@ static inline void bnx2x_napi_enable(struct bnx2x *bp) napi_enable(&bnx2x_fp(bp, i, napi)); } -static inline void bnx2x_napi_disable(struct bnx2x *bp) +static void bnx2x_napi_disable(struct bnx2x *bp) { int i; @@ -1437,24 +1551,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); } + void bnx2x_set_num_queues(struct bnx2x *bp) { - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - bp->num_queues = 1; - break; - case ETH_RSS_MODE_REGULAR: - bp->num_queues = bnx2x_calc_num_queues(bp); - break; - - default: - bp->num_queues = 1; - break; - } + /* RSS queues */ + bp->num_queues = bnx2x_calc_num_queues(bp); #ifdef BCM_CNIC - /* override in STORAGE SD mode */ - if (IS_MF_STORAGE_SD(bp)) + /* override in STORAGE SD modes */ + if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) bp->num_queues = 1; #endif /* Add special queues */ @@ -1483,7 +1588,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). */ -static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) +static int bnx2x_set_real_num_queues(struct bnx2x *bp) { int rc, tx, rx; @@ -1515,7 +1620,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) return rc; } -static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) +static void bnx2x_set_rx_buf_size(struct bnx2x *bp) { int i; @@ -1543,22 +1648,19 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) } } -static inline int bnx2x_init_rss_pf(struct bnx2x *bp) +static int bnx2x_init_rss_pf(struct bnx2x *bp) { int i; u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - /* - * Prepare the inital contents fo the indirection table if RSS is + /* Prepare the initial contents fo the indirection table if RSS is * enabled */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { - for (i = 0; i < sizeof(ind_table); i++) - ind_table[i] = - bp->fp->cl_id + - ethtool_rxfh_indir_default(i, num_eth_queues); - } + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = + bp->fp->cl_id + + ethtool_rxfh_indir_default(i, num_eth_queues); /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is @@ -1568,11 +1670,12 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp) * For 57712 and newer on the other hand it's a per-function * configuration. */ - return bnx2x_config_rss_pf(bp, ind_table, - bp->port.pmf || !CHIP_IS_E1x(bp)); + return bnx2x_config_rss_eth(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); } -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) +int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table, bool config_hash) { struct bnx2x_config_rss_params params = {NULL}; int i; @@ -1584,58 +1687,35 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) * bp->multi_mode = ETH_RSS_MODE_DISABLED; */ - params.rss_obj = &bp->rss_conf_obj; + params.rss_obj = rss_obj; __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - /* RSS mode */ - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_REGULAR: - __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_VLAN_PRI: - __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_E1HOV_PRI: - __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_IP_DSCP: - __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); - break; - default: - BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); - return -EINVAL; - } + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - /* If RSS is enabled */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { - /* RSS configuration */ - __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - /* Hash bits */ - params.rss_result_mask = MULTI_MASK; + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; - memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); - if (config_hash) { - /* RSS keys */ - for (i = 0; i < sizeof(params.rss_key) / 4; i++) - params.rss_key[i] = random32(); + if (config_hash) { + /* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) + params.rss_key[i] = random32(); - __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); - } + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } return bnx2x_config_rss(bp, ¶ms); } -static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) { struct bnx2x_func_state_params func_params = {NULL}; @@ -1744,6 +1824,87 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) return true; } +/** + * bnx2x_bz_fp - zero content of the fastpath structure. + * + * @bp: driver handle + * @index: fastpath index to be zeroed + * + * Makes sure the contents of the bp->fp[index].napi is kept + * intact. + */ +static void bnx2x_bz_fp(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct napi_struct orig_napi = fp->napi; + /* bzero bnx2x_fastpath contents */ + if (bp->stats_init) + memset(fp, 0, sizeof(*fp)); + else { + /* Keep Queue statistics */ + struct bnx2x_eth_q_stats *tmp_eth_q_stats; + struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; + + tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), + GFP_KERNEL); + if (tmp_eth_q_stats) + memcpy(tmp_eth_q_stats, &fp->eth_q_stats, + sizeof(struct bnx2x_eth_q_stats)); + + tmp_eth_q_stats_old = + kzalloc(sizeof(struct bnx2x_eth_q_stats_old), + GFP_KERNEL); + if (tmp_eth_q_stats_old) + memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, + sizeof(struct bnx2x_eth_q_stats_old)); + + memset(fp, 0, sizeof(*fp)); + + if (tmp_eth_q_stats) { + memcpy(&fp->eth_q_stats, tmp_eth_q_stats, + sizeof(struct bnx2x_eth_q_stats)); + kfree(tmp_eth_q_stats); + } + + if (tmp_eth_q_stats_old) { + memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, + sizeof(struct bnx2x_eth_q_stats_old)); + kfree(tmp_eth_q_stats_old); + } + + } + + /* Restore the NAPI object as it has been already initialized */ + fp->napi = orig_napi; + + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* + * set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || + (bp->flags & GRO_ENABLE_FLAG && + bnx2x_mtu_allows_gro(bp->dev->mtu))); + if (bp->flags & TPA_ENABLE_FLAG) + fp->mode = TPA_MODE_LRO; + else if (bp->flags & GRO_ENABLE_FLAG) + fp->mode = TPA_MODE_GRO; + +#ifdef BCM_CNIC + /* We don't want TPA on an FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + fp->disable_tpa = 1; +#endif +} + + /* must be called with rtnl_lock */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { @@ -1911,8 +2072,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) SHMEM2_WR(bp, dcc_support, (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + if (SHMEM2_HAS(bp, afex_driver_support)) + SHMEM2_WR(bp, afex_driver_support, + SHMEM_AFEX_SUPPORTED_VERSION_ONE); } + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; rc = bnx2x_func_start(bp); if (rc) { @@ -2968,6 +3135,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(txq, skb->len); + skb_tx_timestamp(skb); + txdata->tx_pkt_prod++; /* * Make sure that the BD data is updated before updating the producer @@ -3084,7 +3253,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) } #ifdef BCM_CNIC - if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { + if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && + !is_zero_ether_addr(addr->sa_data)) { BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); return -EINVAL; } @@ -3181,7 +3351,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp) bnx2x_free_fp_mem_at(bp, i); } -static inline void set_sb_shortcuts(struct bnx2x *bp, int index) +static void set_sb_shortcuts(struct bnx2x *bp, int index) { union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); if (!CHIP_IS_E1x(bp)) { @@ -3197,6 +3367,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index) } } +/* Returns the number of actually allocated BDs */ +static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) +{ + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i, failure_cnt = 0; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { + failure_cnt++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - failure_cnt)); + } + + if (failure_cnt) + BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", + i - failure_cnt, fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; + + return i - failure_cnt; +} + +static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } +} + static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) { union host_hc_status_block *sb; @@ -3206,7 +3433,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) int rx_ring_size = 0; #ifdef BCM_CNIC - if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { + if (!bp->rx_ring_size && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; } else @@ -3528,8 +3756,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) */ dev->mtu = new_mtu; - bp->gro_check = bnx2x_need_gro_check(new_mtu); - return bnx2x_reload_if_running(dev); } @@ -3687,9 +3913,9 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } -static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, - u8 fw_sb_id, u8 sb_index, - u8 ticks) +static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) { u32 addr = BAR_CSTRORM_INTMEM + @@ -3700,9 +3926,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, port, fw_sb_id, sb_index, ticks); } -static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, - u16 fw_sb_id, u8 sb_index, - u8 disable) +static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) { u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 addr = BAR_CSTRORM_INTMEM + diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 5c27454d2ec..7cd99b75347 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -86,13 +86,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); void bnx2x_send_unload_done(struct bnx2x *bp); /** - * bnx2x_config_rss_pf - configure RSS parameters. + * bnx2x_config_rss_pf - configure RSS parameters in a PF. * * @bp: driver handle + * @rss_obj RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration */ -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); +int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table, bool config_hash); /** * bnx2x__init_func_obj - init function object @@ -485,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp); * fills msix_table, requests vectors, updates num_queues * according to number of available vectors. */ -int bnx2x_enable_msix(struct bnx2x *bp); +int __devinit bnx2x_enable_msix(struct bnx2x *bp); /** * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly @@ -610,53 +612,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, barrier(); } -static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, - u8 idu_sb_id, bool is_Pf) -{ - u32 data, ctl, cnt = 100; - u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; - u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; - u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; - u32 sb_bit = 1 << (idu_sb_id%32); - u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; - u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; - - /* Not supported in BC mode */ - if (CHIP_INT_MODE_IS_BC(bp)) - return; - - data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup - << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | - IGU_REGULAR_CLEANUP_SET | - IGU_REGULAR_BCLEANUP; - - ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | - func_encode << IGU_CTRL_REG_FID_SHIFT | - IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; - - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - data, igu_addr_data); - REG_WR(bp, igu_addr_data, data); - mmiowb(); - barrier(); - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - ctl, igu_addr_ctl); - REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); - barrier(); - - /* wait for clean up to finish */ - while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) - msleep(20); - - - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { - DP(NETIF_MSG_HW, - "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", - idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); - } -} - static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, u8 storm, u16 index, u8 op, u8 update) { @@ -843,7 +798,7 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG) { pci_disable_msix(bp->pdev); - bp->flags &= ~USING_MSIX_FLAG; + bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); } else if (bp->flags & USING_MSI_FLAG) { pci_disable_msi(bp->pdev); bp->flags &= ~USING_MSI_FLAG; @@ -883,66 +838,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) bnx2x_clear_sge_mask_next_elems(fp); } -static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - dma_addr_t mapping; - - if (unlikely(page == NULL)) { - BNX2X_ERR("Can't alloc sge\n"); - return -ENOMEM; - } - - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - BNX2X_ERR("Can't map sge\n"); - return -ENOMEM; - } - - sw_buf->page = page; - dma_unmap_addr_set(sw_buf, mapping, mapping); - - sge->addr_hi = cpu_to_le32(U64_HI(mapping)); - sge->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - -static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - u8 *data; - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; - struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; - dma_addr_t mapping; - - data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); - if (unlikely(data == NULL)) - return -ENOMEM; - - mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, - fp->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - kfree(data); - BNX2X_ERR("Can't map rx data\n"); - return -ENOMEM; - } - - rx_buf->data = data; - dma_unmap_addr_set(rx_buf, mapping, mapping); - - rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - /* note that we are not allocating a new buffer, * we are just moving one from cons to prod * we are not creating a new mapping, @@ -964,6 +859,19 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, /************************* Init ******************************************/ +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + +static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, + bool config_hash) +{ + return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, + config_hash); +} + /** * bnx2x_func_start - init function * @@ -1027,66 +935,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, bnx2x_free_rx_sge(bp, fp, i); } -static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) -{ - int i; - - for (i = 0; i < last; i++) { - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; - struct sw_rx_bd *first_buf = &tpa_info->first_buf; - u8 *data = first_buf->data; - - if (data == NULL) { - DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); - continue; - } - if (tpa_info->tpa_state == BNX2X_TPA_START) - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(first_buf, mapping), - fp->rx_buf_size, DMA_FROM_DEVICE); - kfree(data); - first_buf->data = NULL; - } -} - -static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) -{ - int i; - - for (i = 1; i <= NUM_TX_RINGS; i++) { - struct eth_tx_next_bd *tx_next_bd = - &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; - - tx_next_bd->addr_hi = - cpu_to_le32(U64_HI(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - tx_next_bd->addr_lo = - cpu_to_le32(U64_LO(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - } - - SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); - txdata->tx_db.data.zero_fill1 = 0; - txdata->tx_db.data.prod = 0; - - txdata->tx_pkt_prod = 0; - txdata->tx_pkt_cons = 0; - txdata->tx_bd_prod = 0; - txdata->tx_bd_cons = 0; - txdata->tx_pkt = 0; -} - -static inline void bnx2x_init_tx_rings(struct bnx2x *bp) -{ - int i; - u8 cos; - - for_each_tx_queue(bp, i) - for_each_cos_in_tx_queue(&bp->fp[i], cos) - bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); -} - static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) { int i; @@ -1104,80 +952,6 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) } } -static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) -{ - int i; - - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - struct eth_rx_sge *sge; - - sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; - sge->addr_hi = - cpu_to_le32(U64_HI(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - - sge->addr_lo = - cpu_to_le32(U64_LO(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - } -} - -static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) -{ - int i; - for (i = 1; i <= NUM_RCQ_RINGS; i++) { - struct eth_rx_cqe_next_page *nextpg; - - nextpg = (struct eth_rx_cqe_next_page *) - &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; - nextpg->addr_hi = - cpu_to_le32(U64_HI(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - nextpg->addr_lo = - cpu_to_le32(U64_LO(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - } -} - -/* Returns the number of actually allocated BDs */ -static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, - int rx_ring_size) -{ - struct bnx2x *bp = fp->bp; - u16 ring_prod, cqe_ring_prod; - int i, failure_cnt = 0; - - fp->rx_comp_cons = 0; - cqe_ring_prod = ring_prod = 0; - - /* This routine is called only during fo init so - * fp->eth_q_stats.rx_skb_alloc_failed = 0 - */ - for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { - failure_cnt++; - continue; - } - ring_prod = NEXT_RX_IDX(ring_prod); - cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); - WARN_ON(ring_prod <= (i - failure_cnt)); - } - - if (failure_cnt) - BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", - i - failure_cnt, fp->index); - - fp->rx_bd_prod = ring_prod; - /* Limit the CQE producer by the CQE ring size */ - fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, - cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; - - fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; - - return i - failure_cnt; -} - /* Statistics ID are global per chip/path, while Client IDs for E1x are per * port. */ @@ -1406,30 +1180,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp, REG_WR(bp, addr + (i * 4), data[i]); } -static inline void storm_memset_func_cfg(struct bnx2x *bp, - struct tstorm_eth_function_common_config *tcfg, - u16 abs_fid) -{ - size_t size = sizeof(struct tstorm_eth_function_common_config); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)tcfg); -} - -static inline void storm_memset_cmng(struct bnx2x *bp, - struct cmng_struct_per_port *cmng, - u8 port) -{ - size_t size = sizeof(struct cmng_struct_per_port); - - u32 addr = BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); - - __storm_memset_struct(bp, addr, size, (u32 *)cmng); -} - /** * bnx2x_wait_sp_comp - wait for the outstanding SP commands. * @@ -1512,93 +1262,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) */ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; } - -static inline bool bnx2x_need_gro_check(int mtu) -{ - return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) != - (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1)); -} - -/** - * bnx2x_bz_fp - zero content of the fastpath structure. - * - * @bp: driver handle - * @index: fastpath index to be zeroed - * - * Makes sure the contents of the bp->fp[index].napi is kept - * intact. - */ -static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) -{ - struct bnx2x_fastpath *fp = &bp->fp[index]; - struct napi_struct orig_napi = fp->napi; - /* bzero bnx2x_fastpath contents */ - if (bp->stats_init) - memset(fp, 0, sizeof(*fp)); - else { - /* Keep Queue statistics */ - struct bnx2x_eth_q_stats *tmp_eth_q_stats; - struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; - - tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), - GFP_KERNEL); - if (tmp_eth_q_stats) - memcpy(tmp_eth_q_stats, &fp->eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); - - tmp_eth_q_stats_old = - kzalloc(sizeof(struct bnx2x_eth_q_stats_old), - GFP_KERNEL); - if (tmp_eth_q_stats_old) - memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, - sizeof(struct bnx2x_eth_q_stats_old)); - - memset(fp, 0, sizeof(*fp)); - - if (tmp_eth_q_stats) { - memcpy(&fp->eth_q_stats, tmp_eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); - kfree(tmp_eth_q_stats); - } - - if (tmp_eth_q_stats_old) { - memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, - sizeof(struct bnx2x_eth_q_stats_old)); - kfree(tmp_eth_q_stats_old); - } - - } - - /* Restore the NAPI object as it has been already initialized */ - fp->napi = orig_napi; - - fp->bp = bp; - fp->index = index; - if (IS_ETH_FP(fp)) - fp->max_cos = bp->max_cos; - else - /* Special queues support only one CoS */ - fp->max_cos = 1; - - /* - * set the tpa flag for each queue. The tpa flag determines the queue - * minimal size so it must be set prior to queue memory allocation - */ - fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || - (bp->flags & GRO_ENABLE_FLAG && - bnx2x_mtu_allows_gro(bp->dev->mtu))); - if (bp->flags & TPA_ENABLE_FLAG) - fp->mode = TPA_MODE_LRO; - else if (bp->flags & GRO_ENABLE_FLAG) - fp->mode = TPA_MODE_GRO; - -#ifdef BCM_CNIC - /* We don't want TPA on an FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - fp->disable_tpa = 1; -#endif -} - #ifdef BCM_CNIC /** * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. @@ -1608,11 +1271,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) */ void bnx2x_get_iscsi_info(struct bnx2x *bp); #endif -/* returns func by VN for current port */ -static inline int func_by_vn(struct bnx2x *bp, int vn) -{ - return 2 * vn + BP_PORT(bp); -} /** * bnx2x_link_sync_notify - send notification to other functions. @@ -1667,7 +1325,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) if (is_valid_ether_addr(addr)) return true; #ifdef BCM_CNIC - if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) + if (is_zero_ether_addr(addr) && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) return true; #endif return false; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 2cc0a170397..ddc18ee5c5a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -22,13 +22,10 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/crc32.h> - - #include "bnx2x.h" #include "bnx2x_cmn.h" #include "bnx2x_dump.h" #include "bnx2x_init.h" -#include "bnx2x_sp.h" /* Note: in the format strings below %s is replaced by the queue-name which is * either its index or 'fcoe' for the fcoe queue. Make sure the format string @@ -595,8 +592,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) #define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) #define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) -static inline bool bnx2x_is_reg_online(struct bnx2x *bp, - const struct reg_addr *reg_info) +static bool bnx2x_is_reg_online(struct bnx2x *bp, + const struct reg_addr *reg_info) { if (CHIP_IS_E1(bp)) return IS_E1_ONLINE(reg_info->info); @@ -613,7 +610,7 @@ static inline bool bnx2x_is_reg_online(struct bnx2x *bp, } /******* Paged registers info selectors ********/ -static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) +static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_vals_e2; @@ -623,7 +620,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_MODE_VALUES_E2; @@ -633,7 +630,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) return 0; } -static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) +static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_write_regs_e2; @@ -643,7 +640,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_WRITE_REGS_E2; @@ -653,7 +650,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) return 0; } -static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) +static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_read_regs_e2; @@ -663,7 +660,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_READ_REGS_E2; @@ -673,7 +670,7 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) return 0; } -static inline int __bnx2x_get_regs_len(struct bnx2x *bp) +static int __bnx2x_get_regs_len(struct bnx2x *bp) { int num_pages = __bnx2x_get_page_reg_num(bp); int page_write_num = __bnx2x_get_page_write_num(bp); @@ -718,7 +715,7 @@ static int bnx2x_get_regs_len(struct net_device *dev) * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ -static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) +static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) { u32 i, j, k, n; /* addresses of the paged registers */ @@ -747,7 +744,7 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) } } -static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) { u32 i, j; @@ -1433,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, else ering->rx_pending = MAX_RX_AVAIL; - ering->tx_max_pending = MAX_TX_AVAIL; + ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; ering->tx_pending = bp->tx_ring_size; } @@ -1451,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, if ((ering->rx_pending > MAX_RX_AVAIL) || (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) || - (ering->tx_pending > MAX_TX_AVAIL) || + (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) || (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; @@ -2212,7 +2209,7 @@ static void bnx2x_self_test(struct net_device *dev, /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled */ -static inline int bnx2x_num_stat_queues(struct bnx2x *bp) +static int bnx2x_num_stat_queues(struct bnx2x *bp) { return BNX2X_NUM_ETH_QUEUES(bp); } @@ -2396,10 +2393,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) { - struct bnx2x *bp = netdev_priv(dev); - - return (bp->multi_mode == ETH_RSS_MODE_DISABLED ? - 0 : T_ETH_INDIRECTION_TABLE_SIZE); + return T_ETH_INDIRECTION_TABLE_SIZE; } static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) @@ -2445,7 +2439,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) ind_table[i] = indir[i] + bp->fp->cl_id; } - return bnx2x_config_rss_pf(bp, ind_table, false); + return bnx2x_config_rss_eth(bp, ind_table, false); } static const struct ethtool_ops bnx2x_ethtool_ops = { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index b9b26332343..426f77aa721 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -387,7 +387,7 @@ #define STATS_QUERY_CMD_COUNT 16 -#define NIV_LIST_TABLE_SIZE 4096 +#define AFEX_LIST_TABLE_SIZE 4096 #define INVALID_VNIC_ID 0xFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index dbff5915b81..a440a8ba85f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 /* The interval in seconds between sending LLDP packets. Set to zero to disable the feature */ @@ -1235,6 +1236,8 @@ struct drv_func_mb { #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 + #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 @@ -1242,6 +1245,13 @@ struct drv_func_mb { #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000 + #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000 + #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000 + #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000 + #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000 + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 @@ -1299,6 +1309,14 @@ struct drv_func_mb { #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000 + + #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000 + #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000 + #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000 + #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000 + #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000 + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 @@ -1357,6 +1375,12 @@ struct drv_func_mb { #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000 + #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000 + #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000 + #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000 + #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000 + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 u32 virt_mac_upper; @@ -1448,7 +1472,26 @@ struct func_mf_cfg { #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK - u32 reserved[2]; + /* afex default VLAN ID - 12 bits */ + #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000 + #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16 + + u32 afex_config; + #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff + #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16 + + u32 reserved; +}; + +enum mf_cfg_afex_vlan_mode { + FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0, + FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE, + FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE }; /* This structure is not applicable and should not be accessed on 57711 */ @@ -1945,18 +1988,29 @@ struct shmem2_region { u32 nvm_retain_bitmap_addr; /* 0x0070 */ - u32 reserved1; /* 0x0074 */ + /* afex support of that driver */ + u32 afex_driver_support; /* 0x0074 */ + #define SHMEM_AFEX_VERSION_MASK 0x100f + #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001 + #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000 - u32 reserved2[E2_FUNC_MAX]; + /* driver receives addr in scratchpad to which it should respond */ + u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX]; - u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ - u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ + /* generic params from MCP to driver (value depends on the msg sent + * to driver + */ + u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */ + u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */ u32 swim_base_addr; /* 0x0108 */ u32 swim_funcs; u32 swim_main_cb; - u32 reserved5[2]; + /* bitmap notifying which VIF profiles stored in nvram are enabled by + * switch + */ + u32 afex_profiles_enabled[2]; /* generic flags controlled by the driver */ u32 drv_flags; @@ -2696,10 +2750,51 @@ union drv_info_to_mcp { struct fcoe_stats_info fcoe_stat; struct iscsi_stats_info iscsi_stat; }; + +/* stats collected for afex. + * NOTE: structure is exactly as expected to be received by the switch. + * order must remain exactly as is unless protocol changes ! + */ +struct afex_stats { + u32 tx_unicast_frames_hi; + u32 tx_unicast_frames_lo; + u32 tx_unicast_bytes_hi; + u32 tx_unicast_bytes_lo; + u32 tx_multicast_frames_hi; + u32 tx_multicast_frames_lo; + u32 tx_multicast_bytes_hi; + u32 tx_multicast_bytes_lo; + u32 tx_broadcast_frames_hi; + u32 tx_broadcast_frames_lo; + u32 tx_broadcast_bytes_hi; + u32 tx_broadcast_bytes_lo; + u32 tx_frames_discarded_hi; + u32 tx_frames_discarded_lo; + u32 tx_frames_dropped_hi; + u32 tx_frames_dropped_lo; + + u32 rx_unicast_frames_hi; + u32 rx_unicast_frames_lo; + u32 rx_unicast_bytes_hi; + u32 rx_unicast_bytes_lo; + u32 rx_multicast_frames_hi; + u32 rx_multicast_frames_lo; + u32 rx_multicast_bytes_hi; + u32 rx_multicast_bytes_lo; + u32 rx_broadcast_frames_hi; + u32 rx_broadcast_frames_lo; + u32 rx_broadcast_bytes_hi; + u32 rx_broadcast_bytes_lo; + u32 rx_frames_discarded_hi; + u32 rx_frames_discarded_lo; + u32 rx_frames_dropped_hi; + u32 rx_frames_dropped_lo; +}; + #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 2 -#define BCM_5710_FW_REVISION_VERSION 16 -#define BCM_5710_FW_ENGINEERING_VERSION 0 +#define BCM_5710_FW_REVISION_VERSION 51 +#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3389,7 +3484,7 @@ struct client_init_tx_data { #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 u8 default_vlan_flg; - u8 reserved2; + u8 force_default_pri_flg; __le32 reserved3; }; @@ -4375,8 +4470,21 @@ struct fcoe_statistics_params { /* + * The data afex vif list ramrod need + */ +struct afex_vif_list_ramrod_data { + u8 afex_vif_list_command; + u8 func_bit_map; + __le16 vif_list_index; + u8 func_to_clear; + u8 echo; + __le16 reserved1; +}; + + +/* * cfc delete event data -*/ + */ struct cfc_del_event_data { u32 cid; u32 reserved0; @@ -4448,6 +4556,65 @@ struct cmng_struct_per_port { struct cmng_flags_per_port flags; }; +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter { + u32 quota; +#if defined(__BIG_ENDIAN) + u16 __reserved0; + u16 rate; +#elif defined(__LITTLE_ENDIAN) + u16 rate; + u16 __reserved0; +#endif +}; + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn { + struct rate_shaping_counter vn_counter; +}; + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn { + u32 cos_credit_delta[MAX_COS_NUMBER]; + u32 vn_credit_delta; + u32 __reserved0; +}; + +/* + * cmng port init state + */ +struct cmng_vnic { + struct rate_shaping_vars_per_vn vnic_max_rate[4]; + struct fairness_vars_per_vn vnic_min_rate[4]; +}; + +/* + * cmng port init state + */ +struct cmng_init { + struct cmng_struct_per_port port; + struct cmng_vnic vnic; +}; + + +/* + * driver parameters for congestion management init, all rates are in Mbps + */ +struct cmng_init_input { + u32 port_rate; + u16 vnic_min_rate[4]; + u16 vnic_max_rate[4]; + u16 cos_min_rate[MAX_COS_NUMBER]; + u16 cos_to_pause_mask[MAX_COS_NUMBER]; + struct cmng_flags_per_port flags; +}; + /* * Protocol-common command ID for slow path elements @@ -4462,7 +4629,7 @@ enum common_spqe_cmd_id { RAMROD_CMD_ID_COMMON_STAT_QUERY, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, RAMROD_CMD_ID_COMMON_START_TRAFFIC, - RAMROD_CMD_ID_COMMON_RESERVED1, + RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, MAX_COMMON_SPQE_CMD_ID }; @@ -4670,6 +4837,17 @@ struct malicious_vf_event_data { }; /* + * vif list event data + */ +struct vif_list_event_data { + u8 func_bit_map; + u8 echo; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + +/* * union for all event ring message types */ union event_data { @@ -4678,6 +4856,7 @@ union event_data { struct cfc_del_event_data cfc_del_event; struct vf_flr_event_data vf_flr_event; struct malicious_vf_event_data malicious_vf_event; + struct vif_list_event_data vif_list_event; }; @@ -4743,7 +4922,7 @@ enum event_ring_opcode { EVENT_RING_OPCODE_FORWARD_SETUP, EVENT_RING_OPCODE_RSS_UPDATE_RULES, EVENT_RING_OPCODE_FUNCTION_UPDATE, - EVENT_RING_OPCODE_RESERVED1, + EVENT_RING_OPCODE_AFEX_VIF_LISTS, EVENT_RING_OPCODE_SET_MAC, EVENT_RING_OPCODE_CLASSIFICATION_RULES, EVENT_RING_OPCODE_FILTERS_RULES, @@ -4763,16 +4942,6 @@ enum fairness_mode { /* - * per-vnic fairness variables - */ -struct fairness_vars_per_vn { - u32 cos_credit_delta[MAX_COS_NUMBER]; - u32 vn_credit_delta; - u32 __reserved0; -}; - - -/* * Priority and cos */ struct priority_cos { @@ -4800,12 +4969,27 @@ struct flow_control_configuration { struct function_start_data { __le16 function_mode; __le16 sd_vlan_tag; - u16 reserved; + __le16 vif_id; u8 path_id; u8 network_cos_mode; }; +struct function_update_data { + u8 vif_id_change_flg; + u8 afex_default_vlan_change_flg; + u8 allowed_priorities_change_flg; + u8 network_cos_mode_change_flg; + __le16 vif_id; + __le16 afex_default_vlan; + u8 allowed_priorities; + u8 network_cos_mode; + u8 lb_mode_en; + u8 reserved0; + __le32 reserved1; +}; + + /* * FW version stored in the Xstorm RAM */ @@ -5003,7 +5187,7 @@ enum mf_mode { SINGLE_FUNCTION, MULTI_FUNCTION_SD, MULTI_FUNCTION_SI, - MULTI_FUNCTION_RESERVED, + MULTI_FUNCTION_AFEX, MAX_MF_MODE }; @@ -5128,6 +5312,7 @@ union protocol_common_specific_data { u8 protocol_data[8]; struct regpair phy_address; struct regpair mac_config_addr; + struct afex_vif_list_ramrod_data afex_vif_list_data; }; /* @@ -5140,29 +5325,6 @@ struct protocol_common_spe { /* - * a single rate shaping counter. can be used as protocol or vnic counter - */ -struct rate_shaping_counter { - u32 quota; -#if defined(__BIG_ENDIAN) - u16 __reserved0; - u16 rate; -#elif defined(__LITTLE_ENDIAN) - u16 rate; - u16 __reserved0; -#endif -}; - - -/* - * per-vnic rate shaping variables - */ -struct rate_shaping_vars_per_vn { - struct rate_shaping_counter vn_counter; -}; - - -/* * The send queue element */ struct slow_path_element { @@ -5330,6 +5492,18 @@ enum vf_pf_channel_state { /* + * vif_list_rule_kind + */ +enum vif_list_rule_kind { + VIF_LIST_RULE_SET, + VIF_LIST_RULE_GET, + VIF_LIST_RULE_CLEAR_ALL, + VIF_LIST_RULE_CLEAR_FUNC, + MAX_VIF_LIST_RULE_KIND +}; + + +/* * zone A per-queue data */ struct xstorm_queue_zone_data { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 29f5c3cca31..559c396d45c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -125,7 +125,7 @@ enum { MODE_MF = 0x00000100, MODE_MF_SD = 0x00000200, MODE_MF_SI = 0x00000400, - MODE_MF_NIV = 0x00000800, + MODE_MF_AFEX = 0x00000800, MODE_E3_A0 = 0x00001000, MODE_E3_B0 = 0x00002000, MODE_COS3 = 0x00004000, @@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); /* set/clear queue bit in command-queue bit map - (E2/E3A0 only, valid COS values are 0/1) */ + * (E2/E3A0 only, valid COS values are 0/1) + */ if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); reg_bit_map = REG_RD(bp, reg_addr); @@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, } -/* Returns the index of start or end of a specific block stage in ops array*/ +/* congestion managment port init api description + * the api works as follows: + * the driver should pass the cmng_init_input struct, the port_init function + * will prepare the required internal ram structure which will be passed back + * to the driver (cmng_init) that will write it into the internal ram. + * + * IMPORTANT REMARKS: + * 1. the cmng_init struct does not represent the contiguous internal ram + * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET + * offset in order to write the port sub struct and the + * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other + * words - don't use memcpy!). + * 2. although the cmng_init struct is filled for the maximal vnic number + * possible, the driver should only write the valid vnics into the internal + * ram according to the appropriate port mode. + */ +#define BITS_TO_BYTES(x) ((x)/8) + +/* CMNG constants, as derived from system spec calculations */ + +/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */ +#define DEF_MIN_RATE 100 + +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 + +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer + */ +#define QM_ARB_BYTES 160000 + +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 + +/* how many bytes above threshold for + * the minimal credit of Min algorithm + */ +#define MIN_ABOVE_THRESH 32768 + +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair + */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + +/* Memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 +#define SAFC_TIMEOUT_USEC 52 + +#define SDM_TICKS 4 + + +static inline void bnx2x_init_max(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + /* rate shaping per-port variables + * 100 micro seconds in SDM ticks = 25 + * since each tick is 4 microSeconds + */ + + pdata->rs_vars.rs_periodic_timeout = + RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS; + + /* this is the threshold below which no timer arming will occur. + * 1.25 coefficient is for the threshold to be a little bigger + * then the real time to compensate for timer in-accuracy + */ + pdata->rs_vars.rs_threshold = + (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4; + + /* rate shaping per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* global vnic counter */ + vdata->vnic_max_rate[vnic].vn_counter.rate = + input_data->vnic_max_rate[vnic]; + /* maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + vdata->vnic_max_rate[vnic].vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * + (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8; + } + +} + +static inline void bnx2x_init_min(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + /* this is the resolution of the fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + + /* fairness per-port variables + * for 10G it is 1000usec. for 1G it is 10000usec. + */ + tFair = T_FAIR_COEF / input_data->port_rate; + + /* this is the threshold below which we won't arm the timer anymore */ + pdata->fair_vars.fair_threshold = QM_ARB_BYTES; + + /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits + * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution) + */ + pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM; + + /* since each tick is 4 microSeconds */ + pdata->fair_vars.fairness_timeout = + fair_periodic_timeout_usec / SDM_TICKS; + + /* calculate sum of weights */ + vnicWeightSum = 0; + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) + vnicWeightSum += input_data->vnic_min_rate[vnic]; + + /* global vnic counter */ + if (vnicWeightSum > 0) { + /* fairness per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* this is the credit for each period of the fairness + * algorithm - number of bytes in T_FAIR (this vnic + * share of the port rate) + */ + vdata->vnic_min_rate[vnic].vn_credit_delta = + (u32)input_data->vnic_min_rate[vnic] * 100 * + (T_FAIR_COEF / (8 * 100 * vnicWeightSum)); + if (vdata->vnic_min_rate[vnic].vn_credit_delta < + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + vdata->vnic_min_rate[vnic].vn_credit_delta = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } +} + +static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, cos; + u32 cosWeightSum = 0; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + for (cos = 0; cos < MAX_COS_NUMBER; cos++) + cosWeightSum += input_data->cos_min_rate[cos]; + + if (cosWeightSum > 0) { + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* Since cos and vnic shouldn't work together the rate + * to divide between the coses is the port rate. + */ + u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta; + for (cos = 0; cos < MAX_COS_NUMBER; cos++) { + /* this is the credit for each period of + * the fairness algorithm - number of bytes + * in T_FAIR (this cos share of the vnic rate) + */ + ccd[cos] = + (u32)input_data->cos_min_rate[cos] * 100 * + (T_FAIR_COEF / (8 * 100 * cosWeightSum)); + if (ccd[cos] < pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + ccd[cos] = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } + } +} + +static inline void bnx2x_init_safc(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + /* in microSeconds */ + ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC; +} + +/* Congestion management port init */ +static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + u32 r_param; + memset(ram_data, 0, sizeof(struct cmng_init)); + + ram_data->port.flags = input_data->flags; + + /* number of bytes transmitted in a rate of 10Gbps + * in one usec = 1.25KB. + */ + r_param = BITS_TO_BYTES(input_data->port_rate); + bnx2x_init_max(input_data, r_param, ram_data); + bnx2x_init_min(input_data, r_param, ram_data); + bnx2x_init_fw_wrr(input_data, r_param, ram_data); + bnx2x_init_safc(input_data, ram_data); +} + + + +/* Returns the index of start or end of a specific block stage in ops array */ #define BLOCK_OPS_IDX(block, stage, end) \ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) @@ -499,9 +708,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) bnx2x_set_mcp_parity(bp, false); } -/** - * Clear the parity error status registers. - */ +/* Clear the parity error status registers. */ static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) { int i; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 64392ec410a..a3fb7215cd8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -138,7 +138,6 @@ -/* */ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 @@ -404,8 +403,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); - /* - * mapping between entry priority to client number (0,1,2 -debug and + /* mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -413,8 +411,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) */ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT @@ -425,13 +422,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* defines which entries (clients) are subjected to WFQ arbitration */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /* - * For strict priority entries defines the number of consecutive + /* For strict priority entries defines the number of consecutive * slots for the highest priority. */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /* - * mapping between the CREDIT_WEIGHT registers and actual client + /* mapping between the CREDIT_WEIGHT registers and actual client * numbers */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); @@ -443,8 +438,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); /* ETS mode disable */ REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /* - * If ETS mode is enabled (there is no strict priority) defines a WFQ + /* If ETS mode is enabled (there is no strict priority) defines a WFQ * weight for COS0/COS1. */ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); @@ -471,10 +465,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; } else min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; - /** - * If the link isn't up (static configuration for example ) The - * link will be according to 20GBPS. - */ + /* If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ return min_w_val; } /****************************************************************************** @@ -538,8 +531,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, struct bnx2x *bp = params->bp; const u8 port = params->port; const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); - /** - * mapping between entry priority to client number (0,1,2 -debug and + /* Mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS1, ... 8 - * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by * reset value or init tool @@ -551,18 +543,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); } - /** - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ - /* TODO_ETS - Should be done by reset value or init tool */ + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /** - * mapping between the CREDIT_WEIGHT registers and actual client + /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ - /* TODO_ETS - Should be done by reset value or init tool */ if (port) { /*Port 1 has 6 COS*/ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); @@ -574,8 +562,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); } - /** - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT @@ -590,13 +577,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /** - * Please notice the register address are note continuous and a - * for here is note appropriate.In 2 port mode port0 only COS0-5 - * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 - * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT - * are never used for WFQ - */ + /* Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : @@ -633,10 +619,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( u32 base_upper_bound = 0; u8 max_cos = 0; u8 i = 0; - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 - * port mode port1 has COS0-2 that can be used for WFQ. - */ + /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ if (!port) { base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; @@ -666,8 +651,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) u32 base_weight = 0; u8 max_cos = 0; - /** - * mapping between entry priority to client number 0 - COS0 + /* Mapping between entry priority to client number 0 - COS0 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. * TODO_ETS - Should be done by reset value or init tool */ @@ -695,10 +679,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ. - * In 4 port mode port1 has COS0-2 that can be used for WFQ. - */ + /* In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ if (!port) { base_weight = PBF_REG_COS0_WEIGHT_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; @@ -738,7 +721,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, /****************************************************************************** * Description: * Disable will return basicly the values to init values. -*. +* ******************************************************************************/ int bnx2x_ets_disabled(struct link_params *params, struct link_vars *vars) @@ -867,7 +850,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, /****************************************************************************** * Description: * Calculate the total BW.A value of 0 isn't legal. -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_get_total_bw( const struct link_params *params, @@ -879,7 +862,6 @@ static int bnx2x_ets_e3b0_get_total_bw( u8 is_bw_cos_exist = 0; *total_bw = 0 ; - /* Calculate total BW requested */ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { @@ -887,10 +869,9 @@ static int bnx2x_ets_e3b0_get_total_bw( if (!ets_params->cos[cos_idx].params.bw_params.bw) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" "was set to 0\n"); - /* - * This is to prevent a state when ramrods + /* This is to prevent a state when ramrods * can't be sent - */ + */ ets_params->cos[cos_idx].params.bw_params.bw = 1; } @@ -908,8 +889,7 @@ static int bnx2x_ets_e3b0_get_total_bw( } DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config total BW should be 100\n"); - /* - * We can handle a case whre the BW isn't 100 this can happen + /* We can handle a case whre the BW isn't 100 this can happen * if the TC are joined. */ } @@ -919,7 +899,7 @@ static int bnx2x_ets_e3b0_get_total_bw( /****************************************************************************** * Description: * Invalidate all the sp_pri_to_cos. -*. +* ******************************************************************************/ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) { @@ -931,7 +911,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) * Description: * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers * according to sp_pri_to_cos. -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, u8 *sp_pri_to_cos, const u8 pri, @@ -964,7 +944,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, * Description: * Returns the correct value according to COS and priority in * the sp_pri_cli register. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, const u8 pri_set, @@ -981,7 +961,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, * Description: * Returns the correct value according to COS and priority in the * sp_pri_cli register for NIG. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) { @@ -997,7 +977,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) * Description: * Returns the correct value according to COS and priority in the * sp_pri_cli register for PBF. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) { @@ -1013,7 +993,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) * Description: * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers * according to sp_pri_to_cos.(which COS has higher priority) -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, u8 *sp_pri_to_cos) @@ -1149,8 +1129,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, return -EINVAL; } - /* - * Upper bound is set according to current link speed (min_w_val + /* Upper bound is set according to current link speed (min_w_val * should be the same for upper bound and COS credit val). */ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); @@ -1160,8 +1139,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { cos_bw_bitmap |= (1 << cos_entry); - /* - * The function also sets the BW in HW(not the mappin + /* The function also sets the BW in HW(not the mappin * yet) */ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( @@ -1217,14 +1195,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params) /* ETS disabled configuration */ struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - /* - * defines which entries (clients) are subjected to WFQ arbitration + /* Defines which entries (clients) are subjected to WFQ arbitration * COS0 0x8 * COS1 0x10 */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); - /* - * mapping between the ARB_CREDIT_WEIGHT registers and actual + /* Mapping between the ARB_CREDIT_WEIGHT registers and actual * client numbers (WEIGHT_0 does not actually have to represent * client 0) * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -1242,8 +1218,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 * entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT @@ -1298,8 +1273,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) u32 val = 0; DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, * 3 - COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT @@ -1307,8 +1281,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * MCP and debug are strict */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); - /* - * For strict priority entries defines the number of consecutive slots + /* For strict priority entries defines the number of consecutive slots * for the highest priority. */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); @@ -1320,8 +1293,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); - /* - * mapping between entry priority to client number (0,1,2 -debug and + /* Mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -1356,15 +1328,12 @@ static void bnx2x_update_pfc_xmac(struct link_params *params, if (!(params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)) { - /* - * RX flow control - Process pause frame in receive direction + /* RX flow control - Process pause frame in receive direction */ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; - /* - * TX flow control - Send pause packet when buffer is full - */ + /* TX flow control - Send pause packet when buffer is full */ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; } else {/* PFC support */ @@ -1457,8 +1426,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) { u32 mode, emac_base; - /** - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ @@ -1578,15 +1546,6 @@ static void bnx2x_umac_enable(struct link_params *params, DP(NETIF_MSG_LINK, "enabling UMAC\n"); - /** - * This register determines on which events the MAC will assert - * error on the i/f to the NIG along w/ EOP. - */ - - /** - * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + - * params->port*0x14, 0xfffff. - */ /* This register opens the gate for the UMAC despite its name */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); @@ -1649,8 +1608,7 @@ static void bnx2x_umac_enable(struct link_params *params, val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame * length used by the MAC receive logic to check frames. */ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); @@ -1666,8 +1624,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) struct bnx2x *bp = params->bp; u32 is_port4mode = bnx2x_is_4_port_mode(bp); - /* - * In 4-port mode, need to set the mode only once, so if XMAC is + /* In 4-port mode, need to set the mode only once, so if XMAC is * already out of reset, it means the mode has already been set, * and it must not* reset the XMAC again, since it controls both * ports of the path @@ -1691,13 +1648,13 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) if (is_port4mode) { DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); - /* Set the number of ports on the system side to up to 2 */ + /* Set the number of ports on the system side to up to 2 */ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); /* Set the number of ports on the Warp Core to 10G */ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); } else { - /* Set the number of ports on the system side to 1 */ + /* Set the number of ports on the system side to 1 */ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); if (max_speed == SPEED_10000) { DP(NETIF_MSG_LINK, @@ -1729,8 +1686,7 @@ static void bnx2x_xmac_disable(struct link_params *params) if (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { - /* - * Send an indication to change the state in the NIG back to XON + /* Send an indication to change the state in the NIG back to XON * Clearing this bit enables the next set of this bit to get * rising edge */ @@ -1755,13 +1711,11 @@ static int bnx2x_xmac_enable(struct link_params *params, bnx2x_xmac_init(params, vars->line_speed); - /* - * This register determines on which events the MAC will assert + /* This register determines on which events the MAC will assert * error on the i/f to the NIG along w/ EOP. */ - /* - * This register tells the NIG whether to send traffic to UMAC + /* This register tells the NIG whether to send traffic to UMAC * or XMAC */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); @@ -1863,8 +1817,7 @@ static int bnx2x_emac_enable(struct link_params *params, val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - /* - * Setting this bit causes MAC control frames (except for pause + /* Setting this bit causes MAC control frames (except for pause * frames) to be passed on for processing. This setting has no * affect on the operation of the pause frames. This bit effects * all packets regardless of RX Parser packet sorting logic. @@ -1963,8 +1916,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, struct link_vars *vars, u8 is_lb) { - /* - * Set rx control: Strip CRC and enable BigMAC to relay + /* Set rx control: Strip CRC and enable BigMAC to relay * control packets to the system as well */ u32 wb_data[2]; @@ -2016,8 +1968,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); - /* - * Set Time (based unit is 512 bit time) between automatic + /* Set Time (based unit is 512 bit time) between automatic * re-sending of PP packets amd enable automatic re-send of * Per-Priroity Packet as long as pp_gen is asserted and * pp_disable is low. @@ -2086,7 +2037,7 @@ static int bnx2x_pfc_brb_get_config_params( config_val->default_class1.full_xon = 0; if (CHIP_IS_E2(bp)) { - /* class0 defaults */ + /* Class0 defaults */ config_val->default_class0.pause_xoff = DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; config_val->default_class0.pause_xon = @@ -2095,7 +2046,7 @@ static int bnx2x_pfc_brb_get_config_params( DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; config_val->default_class0.full_xon = DEFAULT0_E2_BRB_MAC_FULL_XON_THR; - /* pause able*/ + /* Pause able*/ config_val->pauseable_th.pause_xoff = PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = @@ -2114,7 +2065,7 @@ static int bnx2x_pfc_brb_get_config_params( config_val->non_pauseable_th.full_xon = PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3A0(bp)) { - /* class0 defaults */ + /* Class0 defaults */ config_val->default_class0.pause_xoff = DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; config_val->default_class0.pause_xon = @@ -2123,7 +2074,7 @@ static int bnx2x_pfc_brb_get_config_params( DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; config_val->default_class0.full_xon = DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; - /* pause able */ + /* Pause able */ config_val->pauseable_th.pause_xoff = PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; config_val->pauseable_th.pause_xon = @@ -2142,7 +2093,7 @@ static int bnx2x_pfc_brb_get_config_params( config_val->non_pauseable_th.full_xon = PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; } else if (CHIP_IS_E3B0(bp)) { - /* class0 defaults */ + /* Class0 defaults */ config_val->default_class0.pause_xoff = DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; config_val->default_class0.pause_xon = @@ -2305,27 +2256,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params, reg_th_config = &config_val.non_pauseable_th; } else reg_th_config = &config_val.default_class0; - /* - * The number of free blocks below which the pause signal to class 0 + /* The number of free blocks below which the pause signal to class 0 * of MAC #n is asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , reg_th_config->pause_xoff); - /* - * The number of free blocks above which the pause signal to class 0 + /* The number of free blocks above which the pause signal to class 0 * of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to class 0 + /* The number of free blocks below which the full signal to class 0 * of MAC #n is asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to class 0 + /* The number of free blocks above which the full signal to class 0 * of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : @@ -2339,30 +2286,26 @@ static int bnx2x_update_pfc_brb(struct link_params *params, reg_th_config = &config_val.non_pauseable_th; } else reg_th_config = &config_val.default_class1; - /* - * The number of free blocks below which the pause signal to + /* The number of free blocks below which the pause signal to * class 1 of MAC #n is asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, reg_th_config->pause_xoff); - /* - * The number of free blocks above which the pause signal to + /* The number of free blocks above which the pause signal to * class 1 of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : BRB1_REG_PAUSE_1_XON_THRESHOLD_0, reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to + /* The number of free blocks below which the full signal to * class 1 of MAC #n is asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : BRB1_REG_FULL_1_XOFF_THRESHOLD_0, reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to + /* The number of free blocks above which the full signal to * class 1 of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : @@ -2379,49 +2322,41 @@ static int bnx2x_update_pfc_brb(struct link_params *params, REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, e3b0_val.per_class_guaranty_mode); - /* - * The hysteresis on the guarantied buffer space for the Lb + /* The hysteresis on the guarantied buffer space for the Lb * port before signaling XON. */ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, e3b0_val.lb_guarantied_hyst); - /* - * The number of free blocks below which the full signal to the + /* The number of free blocks below which the full signal to the * LB port is asserted. */ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, e3b0_val.full_lb_xoff_th); - /* - * The number of free blocks above which the full signal to the + /* The number of free blocks above which the full signal to the * LB port is de-asserted. */ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, e3b0_val.full_lb_xon_threshold); - /* - * The number of blocks guarantied for the MAC #n port. n=0,1 + /* The number of blocks guarantied for the MAC #n port. n=0,1 */ - /* The number of blocks guarantied for the LB port.*/ + /* The number of blocks guarantied for the LB port. */ REG_WR(bp, BRB1_REG_LB_GUARANTIED, e3b0_val.lb_guarantied); - /* - * The number of blocks guarantied for the MAC #n port. - */ + /* The number of blocks guarantied for the MAC #n port. */ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, 2 * e3b0_val.mac_0_class_t_guarantied); REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, 2 * e3b0_val.mac_1_class_t_guarantied); - /* - * The number of blocks guarantied for class #t in MAC0. t=0,1 + /* The number of blocks guarantied for class #t in MAC0. t=0,1 */ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, e3b0_val.mac_0_class_t_guarantied); REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, e3b0_val.mac_0_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class in + /* The hysteresis on the guarantied buffer space for class in * MAC0. t=0,1 */ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, @@ -2429,15 +2364,13 @@ static int bnx2x_update_pfc_brb(struct link_params *params, REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, e3b0_val.mac_0_class_t_guarantied_hyst); - /* - * The number of blocks guarantied for class #t in MAC1.t=0,1 + /* The number of blocks guarantied for class #t in MAC1.t=0,1 */ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, e3b0_val.mac_1_class_t_guarantied); REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, e3b0_val.mac_1_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class #t + /* The hysteresis on the guarantied buffer space for class #t * in MAC1. t=0,1 */ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, @@ -2520,15 +2453,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params, FEATURE_CONFIG_PFC_ENABLED; DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); - /* - * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set * MAC control frames (that are not pause packets) * will be forwarded to the XCM. */ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : NIG_REG_LLH0_XCM_MASK); - /* - * nig params will override non PFC params, since it's possible to + /* NIG params will override non PFC params, since it's possible to * do transition from PFC to SAFC */ if (set_pfc) { @@ -2548,7 +2479,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, llfc_out_en = nig_params->llfc_out_en; llfc_enable = nig_params->llfc_enable; pause_enable = nig_params->pause_enable; - } else /*defaul non PFC mode - PAUSE */ + } else /* Default non PFC mode - PAUSE */ pause_enable = 1; xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : @@ -2608,8 +2539,7 @@ int bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params) { - /* - * The PFC and pause are orthogonal to one another, meaning when + /* The PFC and pause are orthogonal to one another, meaning when * PFC is enabled, the pause are disabled, and when PFC is * disabled, pause are set according to the pause result. */ @@ -3148,7 +3078,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); /* address */ - tmp = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@ -3337,8 +3266,7 @@ int bnx2x_phy_read(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) { u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute + /* Probe for the phy according to the given phy_addr, and execute * the read request on it */ for (phy_index = 0; phy_index < params->num_phys; phy_index++) { @@ -3355,8 +3283,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 val) { u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute + /* Probe for the phy according to the given phy_addr, and execute * the write request on it */ for (phy_index = 0; phy_index < params->num_phys; phy_index++) { @@ -3382,7 +3309,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, if (bnx2x_is_4_port_mode(bp)) { u32 port_swap, port_swap_ovr; - /*figure out path swap value */ + /* Figure out path swap value */ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); if (path_swap_ovr & 0x1) path_swap = (path_swap_ovr & 0x2); @@ -3392,7 +3319,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, if (path_swap) path = path ^ 1; - /*figure out port swap value */ + /* Figure out port swap value */ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); if (port_swap_ovr & 0x1) port_swap = (port_swap_ovr & 0x2); @@ -3405,7 +3332,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, lane = (port<<1) + path; } else { /* two port mode - no port swap */ - /*figure out path swap value */ + /* Figure out path swap value */ path_swap_ovr = REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); if (path_swap_ovr & 0x1) { @@ -3437,8 +3364,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params, if (USES_WARPCORE(bp)) { aer_val = bnx2x_get_warpcore_lane(phy, params); - /* - * In Dual-lane mode, two lanes are joined together, + /* In Dual-lane mode, two lanes are joined together, * so in order to configure them, the AER broadcast method is * used here. * 0x200 is the broadcast address for lanes 0,1 @@ -3518,8 +3444,7 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /** - * resolve pause mode and advertisement Please refer to Table + /* Resolve pause mode and advertisement Please refer to Table * 28B-3 of the 802.3ab-1999 spec */ @@ -3642,6 +3567,7 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; if (pause_result & (1<<1)) vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; + } static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, @@ -3698,6 +3624,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, bnx2x_pause_resolve(vars, pause_result); } + static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -3819,9 +3746,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertise pause */ bnx2x_ext_phy_set_pause(params, phy, vars); - - /* - * Set KR Autoneg Work-Around flag for Warpcore version older than D108 + /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); @@ -3829,7 +3754,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; } - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, &val16); @@ -3903,7 +3827,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); - /*Enable encoded forced speed */ + /* Enable encoded forced speed */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); @@ -4265,8 +4189,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, PORT_HW_CFG_E3_MOD_ABS_MASK) >> PORT_HW_CFG_E3_MOD_ABS_SHIFT; - /* - * Should not happen. This function called upon interrupt + /* Should not happen. This function called upon interrupt * triggered by GPIO ( since EPIO can only generate interrupts * to MCP). * So if this function was called and none of the GPIOs was set, @@ -4366,7 +4289,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, "link up, rx_tx_asic_rst 0x%x\n", vars->rx_tx_asic_rst); } else { - /*reset the lane to see if link comes up.*/ + /* Reset the lane to see if link comes up.*/ bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_reset_lane(bp, phy, 0); @@ -4387,7 +4310,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, } /*params->rx_tx_asic_rst*/ } - static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -4545,7 +4467,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, /* Update those 1-copy registers */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, 0); - /* Enable 1G MDIO (1-copy) */ + /* Enable 1G MDIO (1-copy) */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, &val16); @@ -4624,43 +4546,43 @@ void bnx2x_sync_link(struct link_params *params, vars->duplex = DUPLEX_FULL; switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) { - case LINK_10THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_10TFD: - vars->line_speed = SPEED_10; - break; + case LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_10TFD: + vars->line_speed = SPEED_10; + break; - case LINK_100TXHD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_100T4: - case LINK_100TXFD: - vars->line_speed = SPEED_100; - break; + case LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_100T4: + case LINK_100TXFD: + vars->line_speed = SPEED_100; + break; - case LINK_1000THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_1000TFD: - vars->line_speed = SPEED_1000; - break; + case LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_1000TFD: + vars->line_speed = SPEED_1000; + break; - case LINK_2500THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_2500TFD: - vars->line_speed = SPEED_2500; - break; + case LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_2500TFD: + vars->line_speed = SPEED_2500; + break; - case LINK_10GTFD: - vars->line_speed = SPEED_10000; - break; - case LINK_20GTFD: - vars->line_speed = SPEED_20000; - break; - default: - break; + case LINK_10GTFD: + vars->line_speed = SPEED_10000; + break; + case LINK_20GTFD: + vars->line_speed = SPEED_20000; + break; + default: + break; } vars->flow_ctrl = 0; if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) @@ -4835,9 +4757,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params, struct bnx2x_phy *phy) { struct bnx2x *bp = params->bp; - /* - * Each two bits represents a lane number: - * No swap is 0123 => 0x1b no need to enable the swap + /* Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap */ u16 rx_lane_swap, tx_lane_swap; @@ -5051,8 +4972,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - /* - * program speed + /* Program speed * - needed only if the speed is greater than 1G (2.5G or 10G) */ CL22_RD_OVER_CL45(bp, phy, @@ -5087,8 +5007,6 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 val = 0; - /* configure the 48 bits for BAM AN */ - /* set extended capabilities */ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) val |= MDIO_OVER_1G_UP1_2_5G; @@ -5234,11 +5152,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, } } - -/* - * link management +/* Link management */ - static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, struct link_params *params) { @@ -5383,8 +5298,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, "ustat_val(0x8371) = 0x%x\n", ustat_val); return; } - /* - * Step 3: Check CL37 Message Pages received to indicate LP + /* Step 3: Check CL37 Message Pages received to indicate LP * supports only CL37 */ CL22_RD_OVER_CL45(bp, phy, @@ -5401,8 +5315,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, cl37_fsm_received); return; } - /* - * The combined cl37/cl73 fsm state information indicating that + /* The combined cl37/cl73 fsm state information indicating that * we are connected to a device which does not support cl73, but * does support cl37 BAM. In this case we disable cl73 and * restart cl37 auto-neg @@ -5973,8 +5886,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, { u32 latch_status = 0; - /* - * Disable the MI INT ( external phy int ) by writing 1 to the + /* Disable the MI INT ( external phy int ) by writing 1 to the * status register. Link down indication is high-active-signal, * so in this case we need to write the status to clear the XOR */ @@ -6009,8 +5921,7 @@ static void bnx2x_link_int_ack(struct link_params *params, struct bnx2x *bp = params->bp; u8 port = params->port; u32 mask; - /* - * First reset all status we assume only one line will be + /* First reset all status we assume only one line will be * change at a time */ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, @@ -6024,8 +5935,7 @@ static void bnx2x_link_int_ack(struct link_params *params, if (is_10g_plus) mask = NIG_STATUS_XGXS0_LINK10G; else if (params->switch_cfg == SWITCH_CFG_10G) { - /* - * Disable the link interrupt by writing 1 to + /* Disable the link interrupt by writing 1 to * the relevant lane in the status register */ u32 ser_lane = @@ -6227,8 +6137,7 @@ int bnx2x_set_led(struct link_params *params, break; case LED_MODE_OPER: - /* - * For all other phys, OPER mode is same as ON, so in case + /* For all other phys, OPER mode is same as ON, so in case * link is down, do nothing */ if (!vars->link_up) @@ -6239,9 +6148,7 @@ int bnx2x_set_led(struct link_params *params, (params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && CHIP_IS_E2(bp) && params->num_phys == 2) { - /* - * This is a work-around for E2+8727 Configurations - */ + /* This is a work-around for E2+8727 Configurations */ if (mode == LED_MODE_ON || speed == SPEED_10000){ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); @@ -6250,8 +6157,7 @@ int bnx2x_set_led(struct link_params *params, tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); - /* - * return here without enabling traffic + /* Return here without enabling traffic * LED blink and setting rate in ON mode. * In oper mode, enabling LED blink * and setting rate is needed. @@ -6260,8 +6166,7 @@ int bnx2x_set_led(struct link_params *params, return rc; } } else if (SINGLE_MEDIA_DIRECT(params)) { - /* - * This is a work-around for HW issue found when link + /* This is a work-around for HW issue found when link * is up in CL73 */ if ((!CHIP_IS_E3(bp)) || @@ -6310,10 +6215,7 @@ int bnx2x_set_led(struct link_params *params, (speed == SPEED_1000) || (speed == SPEED_100) || (speed == SPEED_10))) { - /* - * On Everest 1 Ax chip versions for speeds less than - * 10G LED scheme is different - */ + /* For speeds less than 10G LED scheme is different */ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1); REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + @@ -6333,8 +6235,7 @@ int bnx2x_set_led(struct link_params *params, } -/* - * This function comes to reflect the actual link state read DIRECTLY from the +/* This function comes to reflect the actual link state read DIRECTLY from the * HW */ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, @@ -6422,16 +6323,14 @@ static int bnx2x_link_initialize(struct link_params *params, int rc = 0; u8 phy_index, non_ext_phy; struct bnx2x *bp = params->bp; - /* - * In case of external phy existence, the line speed would be the + /* In case of external phy existence, the line speed would be the * line speed linked up by the external phy. In case it is direct * only, then the line_speed during initialization will be * equal to the req_line_speed */ vars->line_speed = params->phy[INT_PHY].req_line_speed; - /* - * Initialize the internal phy in case this is a direct board + /* Initialize the internal phy in case this is a direct board * (no external phys), or this board has external phy which requires * to first. */ @@ -6463,8 +6362,7 @@ static int bnx2x_link_initialize(struct link_params *params, } else { for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { - /* - * No need to initialize second phy in case of first + /* No need to initialize second phy in case of first * phy only selection. In case of second phy, we do * need to initialize the first phy, since they are * connected. @@ -6492,7 +6390,6 @@ static int bnx2x_link_initialize(struct link_params *params, NIG_STATUS_XGXS0_LINK_STATUS | NIG_STATUS_SERDES0_LINK_STATUS | NIG_MASK_MI_INT)); - bnx2x_update_mng(params, vars->link_status); return rc; } @@ -6577,7 +6474,7 @@ static int bnx2x_update_link_up(struct link_params *params, u8 link_10g) { struct bnx2x *bp = params->bp; - u8 port = params->port; + u8 phy_idx, port = params->port; int rc = 0; vars->link_status |= (LINK_STATUS_LINK_UP | @@ -6641,11 +6538,18 @@ static int bnx2x_update_link_up(struct link_params *params, /* update shared memory */ bnx2x_update_mng(params, vars->link_status); + + /* Check remote fault */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_check_half_open_conn(params, vars, 0); + break; + } + } msleep(20); return rc; } -/* - * The bnx2x_link_update function should be called upon link +/* The bnx2x_link_update function should be called upon link * interrupt. * Link is considered up as follows: * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs @@ -6702,8 +6606,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - /* - * Step 1: + /* Step 1: * Check external link change only for external phys, and apply * priority selection between them in case the link on both phys * is up. Note that instead of the common vars, a temporary @@ -6734,23 +6637,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) switch (bnx2x_phy_selection(params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the + /* In this option, the first PHY makes sure to pass the * traffic through itself only. * Its not clear how to reset the link on the second phy */ active_external_phy = EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the + /* In this option, the first PHY makes sure to pass the * traffic through the second PHY. */ active_external_phy = EXT_PHY2; break; default: - /* - * Link indication on both PHYs with the following cases + /* Link indication on both PHYs with the following cases * is invalid: * - FIRST_PHY means that second phy wasn't initialized, * hence its link is expected to be down @@ -6767,8 +6667,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) } } prev_line_speed = vars->line_speed; - /* - * Step 2: + /* Step 2: * Read the status of the internal phy. In case of * DIRECT_SINGLE_MEDIA board, this link is the external link, * otherwise this is the link between the 577xx and the first @@ -6778,8 +6677,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) params->phy[INT_PHY].read_status( ¶ms->phy[INT_PHY], params, vars); - /* - * The INT_PHY flow control reside in the vars. This include the + /* The INT_PHY flow control reside in the vars. This include the * case where the speed or flow control are not set to AUTO. * Otherwise, the active external phy flow control result is set * to the vars. The ext_phy_line_speed is needed to check if the @@ -6788,14 +6686,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) */ if (active_external_phy > INT_PHY) { vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; - /* - * Link speed is taken from the XGXS. AN and FC result from + /* Link speed is taken from the XGXS. AN and FC result from * the external phy. */ vars->link_status |= phy_vars[active_external_phy].link_status; - /* - * if active_external_phy is first PHY and link is up - disable + /* if active_external_phy is first PHY and link is up - disable * disable TX on second external PHY */ if (active_external_phy == EXT_PHY1) { @@ -6832,8 +6728,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," " ext_phy_line_speed = %d\n", vars->flow_ctrl, vars->link_status, ext_phy_line_speed); - /* - * Upon link speed change set the NIG into drain mode. Comes to + /* Upon link speed change set the NIG into drain mode. Comes to * deals with possible FIFO glitch due to clk change when speed * is decreased without link down indicator */ @@ -6858,8 +6753,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) bnx2x_link_int_ack(params, vars, link_10g_plus); - /* - * In case external phy link is up, and internal link is down + /* In case external phy link is up, and internal link is down * (not initialized yet probably after link initialization, it * needs to be initialized. * Note that after link down-up as result of cable plug, the xgxs @@ -6887,8 +6781,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars); } } - /* - * Link is up only if both local phy and external phy (in case of + /* Link is up only if both local phy and external phy (in case of * non-direct board) are up and no fault detected on active PHY. */ vars->link_up = (vars->phy_link_up && @@ -6907,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) else rc = bnx2x_update_link_down(params, vars); + /* Update MCP link status was changed */ + if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX) + bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); + return rc; } @@ -7120,8 +7017,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) } /* XAUI workaround in 8073 A0: */ - /* - * After loading the boot ROM and restarting Autoneg, poll + /* After loading the boot ROM and restarting Autoneg, poll * Dev1, Reg $C820: */ @@ -7130,8 +7026,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &val); - /* - * If bit [14] = 0 or bit [13] = 0, continue on with + /* If bit [14] = 0 or bit [13] = 0, continue on with * system initialization (XAUI work-around not required, as * these bits indicate 2.5G or 1G link up). */ @@ -7140,8 +7035,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) return 0; } else if (!(val & (1<<15))) { DP(NETIF_MSG_LINK, "bit 15 went off\n"); - /* - * If bit 15 is 0, then poll Dev1, Reg $C841 until it's + /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's * MSB (bit15) goes to 1 (indicating that the XAUI * workaround has completed), then continue on with * system initialization. @@ -7291,8 +7185,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, val = (1<<7); } else if (phy->req_line_speed == SPEED_2500) { val = (1<<5); - /* - * Note that 2.5G works only when used with 1G + /* Note that 2.5G works only when used with 1G * advertisement */ } else @@ -7343,8 +7236,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, /* Add support for CL37 (passive mode) III */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - /* - * The SNR will improve about 2db by changing BW and FEE main + /* The SNR will improve about 2db by changing BW and FEE main * tap. Rest commands are executed after link is up * Change FFE main cursor to 5 in EDC register */ @@ -7431,8 +7323,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { - /* - * The SNR will improve about 2dbby changing the BW and FEE main + /* The SNR will improve about 2dbby changing the BW and FEE main * tap. The 1st write to change FFE main tap is set before * restart AN. Change PLL Bandwidth in EDC register */ @@ -7479,8 +7370,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); - /* - * Set bit 3 to invert Rx in 1G mode and clear this bit + /* Set bit 3 to invert Rx in 1G mode and clear this bit * when it`s in 10G mode. */ if (vars->line_speed == SPEED_1000) { @@ -7602,8 +7492,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params, u8 pmd_dis) { struct bnx2x *bp = params->bp; - /* - * Disable transmitter only for bootcodes which can enable it afterwards + /* Disable transmitter only for bootcodes which can enable it afterwards * (for D3 link) */ if (pmd_dis) { @@ -7780,9 +7669,6 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, u32 data_array[4]; u16 addr32; struct bnx2x *bp = params->bp; - /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" - " addr %d, cnt %d\n", - addr, byte_cnt);*/ if (byte_cnt > 16) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 16 bytes\n"); @@ -7847,8 +7733,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 0x8002); - /* - * Wait appropriate time for two-wire command to finish before + /* Wait appropriate time for two-wire command to finish before * polling the status register */ msleep(1); @@ -7941,8 +7826,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, { u8 copper_module_type; phy->media_type = ETH_PHY_DA_TWINAX; - /* - * Check if its active cable (includes SFP+ module) + /* Check if its active cable (includes SFP+ module) * of passive cable */ if (bnx2x_read_sfp_module_eeprom(phy, @@ -8019,8 +7903,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); return 0; } -/* - * This function read the relevant field from the module (SFP+), and verify it +/* This function read the relevant field from the module (SFP+), and verify it * is compliant with this board */ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, @@ -8102,8 +7985,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, u8 val; struct bnx2x *bp = params->bp; u16 timeout; - /* - * Initialization time after hot-plug may take up to 300ms for + /* Initialization time after hot-plug may take up to 300ms for * some phys type ( e.g. JDSU ) */ @@ -8125,8 +8007,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, u8 is_power_up) { /* Make sure GPIOs are not using for LED mode */ u16 val; - /* - * In the GPIO register, bit 4 is use to determine if the GPIOs are + /* In the GPIO register, bit 4 is use to determine if the GPIOs are * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for * output * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 @@ -8142,8 +8023,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, if (is_power_up) val = (1<<4); else - /* - * Set GPIO control to OUTPUT, and set the power bit + /* Set GPIO control to OUTPUT, and set the power bit * to according to the is_power_up */ val = (1<<1); @@ -8177,8 +8057,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); - /* - * Changing to LRM mode takes quite few seconds. So do it only + /* Changing to LRM mode takes quite few seconds. So do it only * if current mode is limiting (default is LRM) */ if (cur_limiting_mode != EDC_MODE_LIMITING) @@ -8313,8 +8192,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); if (CHIP_IS_E3(bp)) { - /* - * Low ==> if SFP+ module is supported otherwise + /* Low ==> if SFP+ module is supported otherwise * High ==> if SFP+ module is not on the approved vendor list */ bnx2x_set_e3_module_fault_led(params, gpio_mode); @@ -8339,8 +8217,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params, return; DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", power, pin_cfg); - /* - * Low ==> corresponding SFP+ module is powered + /* Low ==> corresponding SFP+ module is powered * high ==> the SFP+ module is powered down */ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); @@ -8474,14 +8351,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); } - /* - * Check and set limiting mode / LRM mode on 8726. On 8727 it + /* Check and set limiting mode / LRM mode on 8726. On 8727 it * is done automatically */ bnx2x_set_limiting_mode(params, phy, edc_mode); - /* - * Enable transmit for this module if the module is approved, or + /* Enable transmit for this module if the module is approved, or * if unapproved modules should also enable the Tx laser */ if (rc == 0 || @@ -8536,8 +8411,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params) bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, gpio_port); - /* - * Module was plugged out. + /* Module was plugged out. * Disable transmit for this module */ phy->media_type = ETH_PHY_NOT_PRESENT; @@ -8607,8 +8481,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" " link_status 0x%x\n", rx_sd, pcs_status, val2); - /* - * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status * are set, or if the autoneg bit 1 is set */ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); @@ -8722,8 +8595,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, } bnx2x_save_bcm_spirom_ver(bp, phy, params->port); - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low * power mode, if TX Laser is disabled */ @@ -8833,8 +8705,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, bnx2x_8726_external_rom_boot(phy, params); - /* - * Need to call module detected on initialization since the module + /* Need to call module detected on initialization since the module * detection triggered by actual module insertion might occur before * driver is loaded, and when driver is loaded, it reset all * registers, including the transmitter @@ -8871,8 +8742,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - /* - * Enable RX-ALARM control to receive interrupt for 1G speed + /* Enable RX-ALARM control to receive interrupt for 1G speed * change */ bnx2x_cl45_write(bp, phy, @@ -8973,8 +8843,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { u32 swap_val, swap_override; u8 port; - /* - * The PHY reset is controlled by GPIO 1. Fake the port number + /* The PHY reset is controlled by GPIO 1. Fake the port number * to cancel the swap done in set_gpio() */ struct bnx2x *bp = params->bp; @@ -9012,14 +8881,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); - /* - * Initially configure MOD_ABS to interrupt when module is + /* Initially configure MOD_ABS to interrupt when module is * presence( bit 8) */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - /* - * Set EDC off by setting OPTXLOS signal input to low (bit 9). + /* Set EDC off by setting OPTXLOS signal input to low (bit 9). * When the EDC is off it locks onto a reference clock and avoids * becoming 'lost' */ @@ -9040,8 +8907,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, if (phy->flags & FLAGS_NOC) val |= (3<<5); - /* - * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 * status which reflect SFP+ module over-current */ if (!(phy->flags & FLAGS_NOC)) @@ -9067,8 +8933,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* - * Power down the XAUI until link is up in case of dual-media + /* Power down the XAUI until link is up in case of dual-media * and 1G */ if (DUAL_MEDIA(params)) { @@ -9093,8 +8958,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); } else { - /* - * Since the 8727 has only single reset pin, need to set the 10G + /* Since the 8727 has only single reset pin, need to set the 10G * registers although it is default */ bnx2x_cl45_write(bp, phy, @@ -9109,8 +8973,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, 0x0008); } - /* - * Set 2-wire transfer rate of SFP+ module EEPROM + /* Set 2-wire transfer rate of SFP+ module EEPROM * to 100Khz since some DACs(direct attached cables) do * not work at 400Khz. */ @@ -9133,8 +8996,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low * power mode, if TX Laser is disabled */ tx_en_mode = REG_RD(bp, params->shmem_base + @@ -9180,8 +9042,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "MOD_ABS indication show module is absent\n"); phy->media_type = ETH_PHY_NOT_PRESENT; - /* - * 1. Set mod_abs to detect next module + /* 1. Set mod_abs to detect next module * presence event * 2. Set EDC off by setting OPTXLOS signal input to low * (bit 9). @@ -9195,8 +9056,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as + /* Clear RX alarm since it stays up as long as * the mod_abs wasn't changed */ bnx2x_cl45_read(bp, phy, @@ -9207,8 +9067,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, /* Module is present */ DP(NETIF_MSG_LINK, "MOD_ABS indication show module is present\n"); - /* - * First disable transmitter, and if the module is ok, the + /* First disable transmitter, and if the module is ok, the * module_detection will enable it * 1. Set mod_abs to detect next module absent event ( bit 8) * 2. Restore the default polarity of the OPRXLOS signal and @@ -9222,8 +9081,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as the mod_abs + /* Clear RX alarm since it stays up as long as the mod_abs * wasn't changed. This is need to be done before calling the * module detection, otherwise it will clear* the link update * alarm @@ -9284,8 +9142,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - /* - * If a module is present and there is need to check + /* If a module is present and there is need to check * for over current */ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { @@ -9350,8 +9207,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); - /* - * Bits 0..2 --> speed detected, + /* Bits 0..2 --> speed detected, * Bits 13..15--> link is down */ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { @@ -9394,8 +9250,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_GP, &val1); - /* - * In case of dual-media board and 1G, power up the XAUI side, + /* In case of dual-media board and 1G, power up the XAUI side, * otherwise power it down. For 10G it is done automatically */ if (link_up) @@ -9561,8 +9416,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } - /* - * This phy uses the NIG latch mechanism since link indication + /* This phy uses the NIG latch mechanism since link indication * arrives through its LED4 and not via its LASI signal, so we * get steady signal instead of clear on read */ @@ -9667,8 +9521,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, if (phy->req_duplex == DUPLEX_FULL) autoneg_val |= (1<<8); - /* - * Always write this if this is not 84833. + /* Always write this if this is not 84833. * For 84833, write it only when it's a forced speed. */ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || @@ -9916,8 +9769,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - /* - * BCM84823 requires that XGXS links up first @ 10G for normal + /* BCM84823 requires that XGXS links up first @ 10G for normal * behavior. */ u16 temp; @@ -10393,8 +10245,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, break; } - /* - * This is a workaround for E3+84833 until autoneg + /* This is a workaround for E3+84833 until autoneg * restart is fixed in f/w */ if (CHIP_IS_E3(bp)) { @@ -10418,8 +10269,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "54618SE cfg init\n"); usleep_range(1000, 1000); - /* - * This works with E3 only, no need to check the chip + /* This works with E3 only, no need to check the chip * before determining the port. */ port = params->port; @@ -10441,7 +10291,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, MDIO_PMA_REG_CTRL, 0x8000); bnx2x_wait_reset_complete(bp, phy, params); - /*wait for GPHY to reset */ + /* Wait for GPHY to reset */ msleep(50); /* Configure LED4: set to INTR (0x6). */ @@ -10647,13 +10497,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, u32 cfg_pin; u8 port; - /* - * In case of no EPIO routed to reset the GPHY, put it + /* In case of no EPIO routed to reset the GPHY, put it * in low power mode. */ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); - /* - * This works with E3 only, no need to check the chip + /* This works with E3 only, no need to check the chip * before determining the port. */ port = params->port; @@ -10762,7 +10610,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, bnx2x_ext_phy_resolve_fc(phy, params, vars); if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { - /* report LP advertised speeds */ + /* Report LP advertised speeds */ bnx2x_cl22_read(bp, phy, 0x5, &val); if (val & (1<<5)) @@ -10827,8 +10675,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, /* This register opens the gate for the UMAC despite its name */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame * length used by the MAC receive logic to check frames. */ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); @@ -11101,22 +10948,23 @@ static struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_HW_LOCK_REQUIRED, + .flags = (FLAGS_HW_LOCK_REQUIRED | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_20000baseKR2_Full | - SUPPORTED_20000baseMLD2_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, @@ -11258,7 +11106,8 @@ static struct bnx2x_phy phy_8726 = { .addr = 0xff, .def_md_devad = 0, .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_INIT_XGXS_FIRST), + FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11289,7 +11138,8 @@ static struct bnx2x_phy phy_8727 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11354,8 +11204,9 @@ static struct bnx2x_phy phy_84823 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11390,8 +11241,9 @@ static struct bnx2x_phy phy_84833 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11466,9 +11318,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, /* Get the 4 lanes xgxs config rx and tx */ u32 rx = 0, tx = 0, i; for (i = 0; i < 2; i++) { - /* - * INT_PHY and EXT_PHY1 share the same value location in the - * shmem. When num_phys is greater than 1, than this value + /* INT_PHY and EXT_PHY1 share the same value location in + * the shmem. When num_phys is greater than 1, than this value * applies only to EXT_PHY1 */ if (phy_index == INT_PHY || phy_index == EXT_PHY1) { @@ -11546,8 +11397,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, offsetof(struct shmem_region, dev_info. port_hw_config[port].default_cfg)) & PORT_HW_CFG_NET_SERDES_IF_MASK); - /* - * Set the appropriate supported and flags indications per + /* Set the appropriate supported and flags indications per * interface type of the chip */ switch (serdes_net_if) { @@ -11605,8 +11455,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, break; } - /* - * Enable MDC/MDIO work-around for E3 A0 since free running MDC + /* Enable MDC/MDIO work-around for E3 A0 since free running MDC * was not set as expected. For B0, ECO will be enabled so there * won't be an issue there */ @@ -11719,8 +11568,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); - /* - * The shmem address of the phy version is located on different + /* The shmem address of the phy version is located on different * structures. In case this structure is too old, do not set * the address */ @@ -11754,8 +11602,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && (phy->ver_addr)) { - /* - * Remove 100Mb link supported for BCM84833 when phy fw + /* Remove 100Mb link supported for BCM84833 when phy fw * version lower than or equal to 1.39 */ u32 raw_ver = REG_RD(bp, phy->ver_addr); @@ -11765,8 +11612,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, SUPPORTED_100baseT_Full); } - /* - * In case mdc/mdio_access of the external phy is different than the + /* In case mdc/mdio_access of the external phy is different than the * mdc/mdio access of the XGXS, a HW lock must be taken in each access * to prevent one port interfere with another port's CL45 operations. */ @@ -11936,13 +11782,16 @@ int bnx2x_phy_probe(struct link_params *params) if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) break; + if (params->feature_config_flags & + FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) + phy->flags &= ~FLAGS_TX_ERROR_CHECK; + sync_offset = params->shmem_base + offsetof(struct shmem_region, dev_info.port_hw_config[params->port].media_type); media_types = REG_RD(bp, sync_offset); - /* - * Update media type for non-PMF sync only for the first time + /* Update media type for non-PMF sync only for the first time * In case the media type changes afterwards, it will be updated * using the update_status function */ @@ -12016,8 +11865,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params, vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_XMAC; vars->phy_flags = PHY_XGXS_FLAG; - /* - * Set WC to loopback mode since link is required to provide clock + /* Set WC to loopback mode since link is required to provide clock * to the XMAC in 20G mode */ bnx2x_set_aer_mmd(params, ¶ms->phy[0]); @@ -12162,6 +12010,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_link_int_enable(params); break; } + bnx2x_update_mng(params, vars->link_status); return 0; } @@ -12302,7 +12151,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, NIG_MASK_MI_INT)); /* Need to take the phy out of low power mode in order - to write to access its registers */ + * to write to access its registers + */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); @@ -12350,8 +12200,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, (val | 1<<10)); } - /* - * Toggle Transmitter: Power down and then up with 600ms delay + /* Toggle Transmitter: Power down and then up with 600ms delay * between */ msleep(600); @@ -12494,8 +12343,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, reset_gpio = MISC_REGISTERS_GPIO_1; port = 1; - /* - * Retrieve the reset gpio/port which control the reset. + /* Retrieve the reset gpio/port which control the reset. * Default is GPIO1, PORT1 */ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], @@ -12670,8 +12518,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - /* - * GPIO1 affects both ports, so there's need to pull + /* GPIO1 affects both ports, so there's need to pull * it for single port alone */ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, @@ -12679,8 +12526,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], phy_index, chip_id); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - /* - * GPIO3's are linked, and so both need to be toggled + /* GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, @@ -12779,7 +12625,8 @@ static void bnx2x_check_over_curr(struct link_params *params, } static void bnx2x_analyze_link_error(struct link_params *params, - struct link_vars *vars, u32 lss_status) + struct link_vars *vars, u32 lss_status, + u8 notify) { struct bnx2x *bp = params->bp; /* Compare new value with previous value */ @@ -12793,8 +12640,7 @@ static void bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, half_open_conn, lss_status); - /* - * a. Update shmem->link_status accordingly + /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ if (lss_status) { @@ -12802,8 +12648,10 @@ static void bnx2x_analyze_link_error(struct link_params *params, vars->link_status &= ~LINK_STATUS_LINK_UP; vars->link_up = 0; vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - /* - * Set LED mode to off since the PHY doesn't know about these + + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + /* Set LED mode to off since the PHY doesn't know about these * errors */ led_mode = LED_MODE_OFF; @@ -12813,7 +12661,11 @@ static void bnx2x_analyze_link_error(struct link_params *params, vars->link_up = 1; vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; led_mode = LED_MODE_OPER; + + /* Clear nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } + bnx2x_sync_link(params, vars); /* Update the LED according to the link state */ bnx2x_set_led(params, vars, led_mode, SPEED_10000); @@ -12822,7 +12674,8 @@ static void bnx2x_analyze_link_error(struct link_params *params, /* C. Trigger General Attention */ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; - bnx2x_notify_link_changed(bp); + if (notify) + bnx2x_notify_link_changed(bp); } /****************************************************************************** @@ -12834,22 +12687,23 @@ static void bnx2x_analyze_link_error(struct link_params *params, * a fault, for example, due to break in the TX side of fiber. * ******************************************************************************/ -static void bnx2x_check_half_open_conn(struct link_params *params, - struct link_vars *vars) +int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, + u8 notify) { struct bnx2x *bp = params->bp; u32 lss_status = 0; u32 mac_base; /* In case link status is physically up @ 10G do */ - if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) - return; + if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || + (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) + return 0; if (CHIP_IS_E3(bp) && (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_XMAC))) { /* Check E3 XMAC */ - /* - * Note that link speed cannot be queried here, since it may be + /* Note that link speed cannot be queried here, since it may be * zero while link is down. In case UMAC is active, LSS will * simply not be set */ @@ -12863,7 +12717,7 @@ static void bnx2x_check_half_open_conn(struct link_params *params, if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) lss_status = 1; - bnx2x_analyze_link_error(params, vars, lss_status); + bnx2x_analyze_link_error(params, vars, lss_status, notify); } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { /* Check E1X / E2 BMAC */ @@ -12880,18 +12734,21 @@ static void bnx2x_check_half_open_conn(struct link_params *params, REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); lss_status = (wb_data[0] > 0); - bnx2x_analyze_link_error(params, vars, lss_status); + bnx2x_analyze_link_error(params, vars, lss_status, notify); } + return 0; } void bnx2x_period_func(struct link_params *params, struct link_vars *vars) { - struct bnx2x *bp = params->bp; u16 phy_idx; + struct bnx2x *bp = params->bp; for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); - bnx2x_check_half_open_conn(params, vars); + if (bnx2x_check_half_open_conn(params, vars, 1) != + 0) + DP(NETIF_MSG_LINK, "Fault detection failed\n"); break; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 763535ee483..ea4371f4335 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -254,8 +254,10 @@ struct link_params { #define FEATURE_CONFIG_PFC_ENABLED (1<<1) #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8) #define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) +#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) /* Will be populated during common init */ struct bnx2x_phy phy[MAX_PHYS]; @@ -495,4 +497,6 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, void bnx2x_period_func(struct link_params *params, struct link_vars *vars); +int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, u8 notify); #endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6af310195ba..f755a665dab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -39,7 +39,6 @@ #include <linux/time.h> #include <linux/ethtool.h> #include <linux/mii.h> -#include <linux/if.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> @@ -93,15 +92,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); -static int multi_mode = 1; -module_param(multi_mode, int, 0); -MODULE_PARM_DESC(multi_mode, " Multi queue mode " - "(0 Disable; 1 Enable (default))"); int num_queues; module_param(num_queues, int, 0); -MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" - " (default is as a number of CPUs)"); +MODULE_PARM_DESC(num_queues, + " Set number of queues (default is as a number of CPUs)"); static int disable_tpa; module_param(disable_tpa, int, 0); @@ -141,7 +136,9 @@ enum bnx2x_board_type { BCM57810, BCM57810_MF, BCM57840, - BCM57840_MF + BCM57840_MF, + BCM57811, + BCM57811_MF }; /* indexed by board_type, above */ @@ -158,8 +155,9 @@ static struct { { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " - "Ethernet Multi Function"} + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, + { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"}, + { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"}, }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -195,6 +193,12 @@ static struct { #ifndef PCI_DEVICE_ID_NX2_57840_MF #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57811 +#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 +#endif +#ifndef PCI_DEVICE_ID_NX2_57811_MF +#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF +#endif static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, @@ -207,6 +211,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, { 0 } }; @@ -220,15 +226,15 @@ static LIST_HEAD(bnx2x_prev_list); * General service functions ****************************************************************************/ -static inline void __storm_memset_dma_mapping(struct bnx2x *bp, +static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { REG_WR(bp, addr, U64_LO(mapping)); REG_WR(bp, addr + 4, U64_HI(mapping)); } -static inline void storm_memset_spq_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) +static void storm_memset_spq_addr(struct bnx2x *bp, + dma_addr_t mapping, u16 abs_fid) { u32 addr = XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); @@ -236,8 +242,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp, __storm_memset_dma_mapping(bp, addr, mapping); } -static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, - u16 pf_id) +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); @@ -249,8 +255,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, pf_id); } -static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, - u8 enable) +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), enable); @@ -262,8 +268,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, enable); } -static inline void storm_memset_eq_data(struct bnx2x *bp, - struct event_ring_data *eq_data, +static void storm_memset_eq_data(struct bnx2x *bp, + struct event_ring_data *eq_data, u16 pfid) { size_t size = sizeof(struct event_ring_data); @@ -273,8 +279,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp, __storm_memset_struct(bp, addr, size, (u32 *)eq_data); } -static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, - u16 pfid) +static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, + u16 pfid) { u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); REG_WR16(bp, addr, eq_prod); @@ -309,67 +315,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" -static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, - int msglvl) -{ - u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; - - switch (dmae->opcode & DMAE_COMMAND_DST) { - case DMAE_CMD_DST_PCI: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - case DMAE_CMD_DST_GRC: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - default: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" - "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src_addr [%08x] len [%d * 4] dst_addr [none]\n" - "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - } - -} /* copy command into DMAE command memory and set DMAE command go */ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) @@ -506,8 +451,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.dst_addr_hi = 0; dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - /* issue the command and wait for completion */ bnx2x_issue_dmae_with_comp(bp, &dmae); } @@ -540,8 +483,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - /* issue the command and wait for completion */ bnx2x_issue_dmae_with_comp(bp, &dmae); } @@ -562,27 +503,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } -/* used only for slowpath so not inlined */ -static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) -{ - u32 wb_write[2]; - - wb_write[0] = val_hi; - wb_write[1] = val_lo; - REG_WR_DMAE(bp, reg, wb_write, 2); -} - -#ifdef USE_WB_RD -static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) -{ - u32 wb_data[2]; - - REG_RD_DMAE(bp, reg, wb_data, 2); - - return HILO_U64(wb_data[0], wb_data[1]); -} -#endif - static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; @@ -756,7 +676,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) printk("%s" "end of fw dump\n", lvl); } -static inline void bnx2x_fw_dump(struct bnx2x *bp) +static void bnx2x_fw_dump(struct bnx2x *bp) { bnx2x_fw_dump_lvl(bp, KERN_ERR); } @@ -1076,8 +996,8 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } -static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, - u32 expected, u32 poll_count) +static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) { u32 cur_cnt = poll_count; u32 val; @@ -1088,8 +1008,8 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, return val; } -static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, - char *msg, u32 poll_cnt) +static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) { u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); if (val != 0) { @@ -1186,7 +1106,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) -static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, +static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { struct sdm_op_gen op_gen = {0}; @@ -1220,7 +1140,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, return ret; } -static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { int pos; u16 status; @@ -1361,14 +1281,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) int port = BP_PORT(bp); u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); + if (single_msix) + val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | @@ -1425,8 +1348,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) static void bnx2x_igu_int_enable(struct bnx2x *bp) { u32 val; - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); @@ -1436,6 +1360,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) val |= (IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); + + if (single_msix) + val |= IGU_PF_CONF_SINGLE_ISR_EN; } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_FUNC_EN | @@ -1455,6 +1382,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (val & IGU_PF_CONF_INT_LINE_EN) + pci_intx(bp->pdev, true); + barrier(); /* init leading/trailing edge */ @@ -1623,7 +1553,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) * Returns the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ -static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) { if (BP_PATH(bp)) return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; @@ -1636,9 +1566,9 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) * * @bp: driver handle * - * Tries to aquire a leader lock for cuurent engine. + * Tries to aquire a leader lock for current engine. */ -static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) { return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); } @@ -1719,6 +1649,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); + if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && + (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { + /* if Q update ramrod is completed for last Q in AFEX vif set + * flow, then ACK MCP at the end + * + * mark pending ACK to MCP bit. + * prevent case that both bits are cleared. + * At the end of load/unload driver checks that + * sp_state is cleaerd, and this order prevents + * races + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); + wmb(); + clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_clear_bit(); + + /* schedule workqueue to send ack to MCP */ + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + } + return; } @@ -2229,40 +2180,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) return rc; } -static void bnx2x_init_port_minmax(struct bnx2x *bp) -{ - u32 r_param = bp->link_vars.line_speed / 8; - u32 fair_periodic_timeout_usec; - u32 t_fair; - - memset(&(bp->cmng.rs_vars), 0, - sizeof(struct rate_shaping_vars_per_port)); - memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); - - /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ - bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; - - /* this is the threshold below which no timer arming will occur - 1.25 coefficient is for the threshold to be a little bigger - than the real time, to compensate for timer in-accuracy */ - bp->cmng.rs_vars.rs_threshold = - (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; - - /* resolution of fairness timer */ - fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; - /* for 10G it is 1000usec. for 1G it is 10000usec. */ - t_fair = T_FAIR_COEF / bp->link_vars.line_speed; - - /* this is the threshold below which we won't arm the timer anymore */ - bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; - - /* we multiply by 1e3/8 to get bytes/msec. - We don't want the credits to pass a credit - of the t_fair*FAIR_MEM (algorithm resolution) */ - bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; - /* since each tick is 4 usec */ - bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; -} /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. @@ -2273,12 +2190,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp) In the later case fainess algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ -static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +static void bnx2x_calc_vn_min(struct bnx2x *bp, + struct cmng_init_input *input) { int all_zero = 1; int vn; - bp->vn_weight_sum = 0; for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { u32 vn_cfg = bp->mf_config[vn]; u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> @@ -2286,106 +2203,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) /* Skip hidden vns */ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) - continue; - + vn_min_rate = 0; /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) + else if (!vn_min_rate) vn_min_rate = DEF_MIN_RATE; else all_zero = 0; - bp->vn_weight_sum += vn_min_rate; + input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BNX2X_IS_ETS_ENABLED(bp)) { - bp->cmng.flags.cmng_enables &= + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); } else if (all_zero) { - bp->cmng.flags.cmng_enables &= + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); + DP(NETIF_MSG_IFUP, + "All MIN values are zeroes fairness will be disabled\n"); } else - bp->cmng.flags.cmng_enables |= + input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } -static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) +static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, + struct cmng_init_input *input) { - struct rate_shaping_vars_per_vn m_rs_vn; - struct fairness_vars_per_vn m_fair_vn; + u16 vn_max_rate; u32 vn_cfg = bp->mf_config[vn]; - int func = func_by_vn(bp, vn); - u16 vn_min_rate, vn_max_rate; - int i; - /* If function is hidden - set min and max to zeroes */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { - vn_min_rate = 0; + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) vn_max_rate = 0; - - } else { + else { u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); - vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If fairness is enabled (not all min rates are zeroes) and - if current min rate is zero - set it to 1. - This is a requirement of the algorithm. */ - if (bp->vn_weight_sum && (vn_min_rate == 0)) - vn_min_rate = DEF_MIN_RATE; - - if (IS_MF_SI(bp)) + if (IS_MF_SI(bp)) { /* maxCfg in percents of linkspeed */ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; - else + } else /* SD modes */ /* maxCfg is absolute in 100Mb units */ vn_max_rate = maxCfg * 100; } - DP(NETIF_MSG_IFUP, - "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", - func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); - - memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); - memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); - - /* global vn counter - maximal Mbps for this vn */ - m_rs_vn.vn_counter.rate = vn_max_rate; - - /* quota - number of bytes transmitted in this period */ - m_rs_vn.vn_counter.quota = - (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; - - if (bp->vn_weight_sum) { - /* credit for each period of the fairness algorithm: - number of bytes in T_FAIR (the vn share the port rate). - vn_weight_sum should not be larger than 10000, thus - T_FAIR_COEF / (8 * vn_weight_sum) will always be greater - than zero */ - m_fair_vn.vn_credit_delta = - max_t(u32, (vn_min_rate * (T_FAIR_COEF / - (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold + - MIN_ABOVE_THRESH)); - DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", - m_fair_vn.vn_credit_delta); - } - - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_rs_vn))[i]); - - for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_fair_vn))[i]); + DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); + + input->vnic_max_rate[vn] = vn_max_rate; } + static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) { if (CHIP_REV_IS_SLOW(bp)) @@ -2423,38 +2290,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + } } static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) { + struct cmng_init_input input; + memset(&input, 0, sizeof(struct cmng_init_input)); + + input.port_rate = bp->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { int vn; - /* clear cmng_enables */ - bp->cmng.flags.cmng_enables = 0; - /* read mf conf from shmem */ if (read_cfg) bnx2x_read_mf_cfg(bp); - /* Init rate shaping and fairness contexts */ - bnx2x_init_port_minmax(bp); - /* vn_weight_sum and enable fairness if not 0 */ - bnx2x_calc_vn_weight_sum(bp); + bnx2x_calc_vn_min(bp, &input); /* calculate and set min-max rate for each vn */ if (bp->port.pmf) for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) - bnx2x_init_vn_minmax(bp, vn); + bnx2x_calc_vn_max(bp, vn, &input); /* always enable rate shaping and fairness */ - bp->cmng.flags.cmng_enables |= + input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; - if (!bp->vn_weight_sum) - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); + + bnx2x_init_cmng(&input, &bp->cmng); return; } @@ -2463,6 +2334,35 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) "rate shaping and fairness are disabled\n"); } +static void storm_memset_cmng(struct bnx2x *bp, + struct cmng_init *cmng, + u8 port) +{ + int vn; + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); + + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + int func = func_by_vn(bp, vn); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); + size = sizeof(struct rate_shaping_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_max_rate[vn]); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); + size = sizeof(struct fairness_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_min_rate[vn]); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2535,6 +2435,190 @@ void bnx2x__link_status_update(struct bnx2x *bp) bnx2x_link_report(bp); } +static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, + u16 vlan_val, u8 allowed_prio) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_afex_update_params *f_update_params = + &func_params.params.afex_update; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; + + /* no need to wait for RAMROD completion, so don't + * set RAMROD_COMP_WAIT flag + */ + + f_update_params->vif_id = vifid; + f_update_params->afex_default_vlan = vlan_val; + f_update_params->allowed_priorities = allowed_prio; + + /* if ramrod can not be sent, response to MCP immediately */ + if (bnx2x_func_state_change(bp, &func_params) < 0) + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + + return 0; +} + +static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, + u16 vif_index, u8 func_bit_map) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_afex_viflists_params *update_params = + &func_params.params.afex_viflists; + int rc; + u32 drv_msg_code; + + /* validate only LIST_SET and LIST_GET are received from switch */ + if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) + BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", + cmd_type); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; + + /* set parameters according to cmd_type */ + update_params->afex_vif_list_command = cmd_type; + update_params->vif_list_index = cpu_to_le16(vif_index); + update_params->func_bit_map = + (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; + update_params->func_to_clear = 0; + drv_msg_code = + (cmd_type == VIF_LIST_RULE_GET) ? + DRV_MSG_CODE_AFEX_LISTGET_ACK : + DRV_MSG_CODE_AFEX_LISTSET_ACK; + + /* if ramrod can not be sent, respond to MCP immediately for + * SET and GET requests (other are not triggered from MCP) + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc < 0) + bnx2x_fw_command(bp, drv_msg_code, 0); + + return 0; +} + +static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) +{ + struct afex_stats afex_stats; + u32 func = BP_ABS_FUNC(bp); + u32 mf_config; + u16 vlan_val; + u32 vlan_prio; + u16 vif_id; + u8 allowed_prio; + u8 vlan_mode; + u32 addr_to_write, vifid, addrs, stats_type, i; + + if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); + } + + if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", + vifid, addrs); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, + addrs); + } + + if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { + addr_to_write = SHMEM2_RD(bp, + afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); + stats_type = SHMEM2_RD(bp, + afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + + DP(BNX2X_MSG_MCP, + "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", + addr_to_write); + + bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); + + /* write response to scratchpad, for MCP */ + for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) + REG_WR(bp, addr_to_write + i*sizeof(u32), + *(((u32 *)(&afex_stats))+i)); + + /* send ack message to MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); + } + + if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { + mf_config = MF_CFG_RD(bp, func_mf_config[func].config); + bp->mf_config[BP_VN(bp)] = mf_config; + DP(BNX2X_MSG_MCP, + "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", + mf_config); + + /* if VIF_SET is "enabled" */ + if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { + /* set rate limit directly to internal RAM */ + struct cmng_init_input cmng_input; + struct rate_shaping_vars_per_vn m_rs_vn; + size_t size = sizeof(struct rate_shaping_vars_per_vn); + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); + + bp->mf_config[BP_VN(bp)] = mf_config; + + bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); + m_rs_vn.vn_counter.rate = + cmng_input.vnic_max_rate[BP_VN(bp)]; + m_rs_vn.vn_counter.quota = + (m_rs_vn.vn_counter.rate * + RS_PERIODIC_TIMEOUT_USEC) / 8; + + __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); + + /* read relevant values from mf_cfg struct in shmem */ + vif_id = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK) >> + FUNC_MF_CFG_E1HOV_TAG_SHIFT; + vlan_val = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_AFEX_VLAN_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_SHIFT; + vlan_prio = (mf_config & + FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> + FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; + vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); + vlan_mode = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; + allowed_prio = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> + FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; + + /* send ramrod to FW, return in case of failure */ + if (bnx2x_afex_func_update(bp, vif_id, vlan_val, + allowed_prio)) + return; + + bp->afex_def_vlan_tag = vlan_val; + bp->afex_vlan_mode = vlan_mode; + } else { + /* notify link down because BP->flags is disabled */ + bnx2x_link_report(bp); + + /* send INVALID VIF ramrod to FW */ + bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); + + /* Reset the default afex VLAN */ + bp->afex_def_vlan_tag = -1; + } + } +} + static void bnx2x_pmf_update(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -2619,6 +2703,18 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) } +static void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) +{ + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); +} + void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { if (CHIP_IS_E1x(bp)) { @@ -2648,9 +2744,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) * * Return the flags that are common for the Tx-only and not normal connections. */ -static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool zero_stats) +static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) { unsigned long flags = 0; @@ -2670,9 +2766,9 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, return flags; } -static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool leading) +static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) { unsigned long flags = 0; @@ -2680,8 +2776,11 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, &flags); - if (IS_FCOE_FP(fp)) + if (IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_FCOE, &flags); + /* For FCoE - force usage of default priority (for afex) */ + __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); + } if (!fp->disable_tpa) { __set_bit(BNX2X_Q_FLG_TPA, &flags); @@ -2698,6 +2797,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, /* Always set HW VLAN stripping */ __set_bit(BNX2X_Q_FLG_VLAN, &flags); + /* configure silent vlan removal */ + if (IS_MF_AFEX(bp)) + __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); + return flags | bnx2x_get_common_flags(bp, fp, true); } @@ -2800,6 +2903,13 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; else rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + /* configure silent vlan removal + * if multi function mode is afex, then mask default vlan + */ + if (IS_MF_AFEX(bp)) { + rxq_init->silent_removal_value = bp->afex_def_vlan_tag; + rxq_init->silent_removal_mask = VLAN_VID_MASK; + } } static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, @@ -3051,7 +3161,7 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) * configure FW * notify others function about the change */ -static inline void bnx2x_config_mf_bw(struct bnx2x *bp) +static void bnx2x_config_mf_bw(struct bnx2x *bp) { if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); @@ -3060,7 +3170,7 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); } -static inline void bnx2x_set_mf_bw(struct bnx2x *bp) +static void bnx2x_set_mf_bw(struct bnx2x *bp) { bnx2x_config_mf_bw(bp); bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); @@ -3147,7 +3257,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) } /* must be called under the spq lock */ -static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) +static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) { struct eth_spe *next_spe = bp->spq_prod_bd; @@ -3163,7 +3273,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) } /* must be called under the spq lock */ -static inline void bnx2x_sp_prod_update(struct bnx2x *bp) +static void bnx2x_sp_prod_update(struct bnx2x *bp) { int func = BP_FUNC(bp); @@ -3185,7 +3295,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) * @cmd: command to check * @cmd_type: command type */ -static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || @@ -3319,7 +3429,7 @@ static void bnx2x_release_alr(struct bnx2x *bp) #define BNX2X_DEF_SB_ATT_IDX 0x0001 #define BNX2X_DEF_SB_IDX 0x0002 -static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) { struct host_sp_status_block *def_sb = bp->def_status_blk; u16 rc = 0; @@ -3451,7 +3561,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) } } -static inline void bnx2x_fan_failure(struct bnx2x *bp) +static void bnx2x_fan_failure(struct bnx2x *bp) { int port = BP_PORT(bp); u32 ext_phy_config; @@ -3481,7 +3591,7 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) } -static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) { int port = BP_PORT(bp); int reg_offset; @@ -3521,7 +3631,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) { u32 val; @@ -3552,7 +3662,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) { u32 val; @@ -3596,7 +3706,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) { u32 val; @@ -3606,6 +3716,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) int func = BP_FUNC(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bnx2x_read_mf_cfg(bp); bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, func_mf_config[BP_ABS_FUNC(bp)].config); val = SHMEM_RD(bp, @@ -3628,6 +3739,9 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) /* start dcbx state machine */ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_NEG_RECEIVED); + if (val & DRV_STATUS_AFEX_EVENT_MASK) + bnx2x_handle_afex_cmd(bp, + val & DRV_STATUS_AFEX_EVENT_MASK); if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ @@ -3722,7 +3836,7 @@ void bnx2x_set_reset_global(struct bnx2x *bp) * * Should be run under rtnl lock */ -static inline void bnx2x_clear_reset_global(struct bnx2x *bp) +static void bnx2x_clear_reset_global(struct bnx2x *bp) { u32 val; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); @@ -3736,7 +3850,7 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp) * * should be run under rtnl lock */ -static inline bool bnx2x_reset_is_global(struct bnx2x *bp) +static bool bnx2x_reset_is_global(struct bnx2x *bp) { u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); @@ -3749,7 +3863,7 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp) * * Should be run under rtnl lock */ -static inline void bnx2x_set_reset_done(struct bnx2x *bp) +static void bnx2x_set_reset_done(struct bnx2x *bp) { u32 val; u32 bit = BP_PATH(bp) ? @@ -3874,7 +3988,7 @@ bool bnx2x_clear_pf_load(struct bnx2x *bp) * * should be run under rtnl lock */ -static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) +static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) { u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK); @@ -3895,7 +4009,7 @@ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) /* * Reset the load status for the current engine. */ -static inline void bnx2x_clear_load_status(struct bnx2x *bp) +static void bnx2x_clear_load_status(struct bnx2x *bp) { u32 val; u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : @@ -3906,13 +4020,13 @@ static inline void bnx2x_clear_load_status(struct bnx2x *bp) bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } -static inline void _print_next_block(int idx, const char *blk) +static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } -static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, + bool print) { int i = 0; u32 cur_bit = 0; @@ -3959,8 +4073,8 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, return par_num; } -static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) +static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, + bool *global, bool print) { int i = 0; u32 cur_bit = 0; @@ -4045,8 +4159,8 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, return par_num; } -static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, + bool print) { int i = 0; u32 cur_bit = 0; @@ -4097,8 +4211,8 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, return par_num; } -static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, - bool *global, bool print) +static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, + bool *global, bool print) { int i = 0; u32 cur_bit = 0; @@ -4139,8 +4253,8 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, return par_num; } -static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, + bool print) { int i = 0; u32 cur_bit = 0; @@ -4166,8 +4280,8 @@ static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, return par_num; } -static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, - u32 *sig) +static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) { if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || @@ -4238,7 +4352,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) } -static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { @@ -4430,7 +4544,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, igu_addr); } -static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) +static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); @@ -4461,7 +4575,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, } #endif -static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) { struct bnx2x_mcast_ramrod_params rparam; int rc; @@ -4486,8 +4600,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) netif_addr_unlock_bh(bp->dev); } -static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, - union event_ring_elem *elem) +static void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; @@ -4534,7 +4648,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); #endif -static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) { netif_addr_lock_bh(bp->dev); @@ -4555,7 +4669,94 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) netif_addr_unlock_bh(bp->dev); } -static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( +static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, + union event_ring_elem *elem) +{ + if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { + DP(BNX2X_MSG_SP, + "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", + elem->message.data.vif_list_event.func_bit_map); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, + elem->message.data.vif_list_event.func_bit_map); + } else if (elem->message.data.vif_list_event.echo == + VIF_LIST_RULE_SET) { + DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); + } +} + +/* called with rtnl_lock */ +static void bnx2x_after_function_update(struct bnx2x *bp) +{ + int q, rc; + struct bnx2x_fastpath *fp; + struct bnx2x_queue_state_params queue_params = {NULL}; + struct bnx2x_queue_update_params *q_update_params = + &queue_params.params.update; + + /* Send Q update command with afex vlan removal values for all Qs */ + queue_params.cmd = BNX2X_Q_CMD_UPDATE; + + /* set silent vlan removal values according to vlan mode */ + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &q_update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &q_update_params->update_flags); + __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* in access mode mark mask and value are 0 to strip all vlans */ + if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { + q_update_params->silent_removal_value = 0; + q_update_params->silent_removal_mask = 0; + } else { + q_update_params->silent_removal_value = + (bp->afex_def_vlan_tag & VLAN_VID_MASK); + q_update_params->silent_removal_mask = VLAN_VID_MASK; + } + + for_each_eth_queue(bp, q) { + /* Set the appropriate Queue object */ + fp = &bp->fp[q]; + queue_params.q_obj = &fp->q_obj; + + /* send the ramrod */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) { + fp = &bp->fp[FCOE_IDX]; + queue_params.q_obj = &fp->q_obj; + + /* clear pending completion bit */ + __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* mark latest Q bit */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_clear_bit(); + + /* send Q update ramrod for FCoE Q */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } else { + /* If no FCoE ring - ACK MCP now */ + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } +#else + /* If no FCoE ring - ACK MCP now */ + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); +#endif /* BCM_CNIC */ +} + +static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( struct bnx2x *bp, u32 cid) { DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); @@ -4653,6 +4854,28 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; + case EVENT_RING_OPCODE_FUNCTION_UPDATE: + DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, + "AFEX: ramrod completed FUNCTION_UPDATE\n"); + f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); + + /* We will perform the Queues update from sp_rtnl task + * as all Queue SP operations should run under + * rtnl_lock. + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + + schedule_delayed_work(&bp->sp_rtnl_task, 0); + goto next_spqe; + + case EVENT_RING_OPCODE_AFEX_VIF_LISTS: + f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_AFEX_VIFLISTS); + bnx2x_after_afex_vif_lists(bp, elem); + goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "got FUNC_START ramrod\n"); @@ -4784,6 +5007,13 @@ static void bnx2x_sp_task(struct work_struct *work) bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); + + /* afex - poll to check if VIFSET_ACK should be sent to MFW */ + if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, + &bp->sp_state)) { + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } } irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) @@ -4870,7 +5100,7 @@ static void bnx2x_timer(unsigned long data) * nic init service functions */ -static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) { u32 i; if (!(len%4) && !(addr%4)) @@ -4883,10 +5113,10 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) } /* helper: writes FP SP data to FW - data_size in dwords */ -static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, - int fw_sb_id, - u32 *sb_data_p, - u32 data_size) +static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, + int fw_sb_id, + u32 *sb_data_p, + u32 data_size) { int index; for (index = 0; index < data_size; index++) @@ -4896,7 +5126,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, *(sb_data_p + index)); } -static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) +static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) { u32 *sb_data_p; u32 data_size = 0; @@ -4929,7 +5159,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) } /* helper: writes SP SB data to FW */ -static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, +static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, struct hc_sp_status_block_data *sp_sb_data) { int func = BP_FUNC(bp); @@ -4941,7 +5171,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, *((u32 *)sp_sb_data + i)); } -static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) +static void bnx2x_zero_sp_sb(struct bnx2x *bp) { int func = BP_FUNC(bp); struct hc_sp_status_block_data sp_sb_data; @@ -4962,8 +5192,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) } -static inline -void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, +static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; @@ -4974,8 +5203,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, /* allocates state machine ids. */ -static inline -void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ @@ -5383,7 +5611,7 @@ static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; } -static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) { if (CHIP_IS_E1x(fp->bp)) return BP_L_ID(fp->bp) + fp->index; @@ -5444,6 +5672,43 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) bnx2x_update_fpsb_idx(fp); } +static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) +{ + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; +} + +static void bnx2x_init_tx_rings(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_tx_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); +} + void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) { int i; @@ -5968,7 +6233,7 @@ void bnx2x_pf_disable(struct bnx2x *bp) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); } -static inline void bnx2x__common_init_phy(struct bnx2x *bp) +static void bnx2x__common_init_phy(struct bnx2x *bp) { u32 shmem_base[2], shmem2_base[2]; shmem_base[0] = bp->common.shmem_base; @@ -6255,12 +6520,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); - if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) - /* Bit-map indicating which L2 hdrs may appear - * after the basic Ethernet header - */ - REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * received in afex mode + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); + } else { + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); @@ -6294,9 +6571,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); + if (!CHIP_IS_E1x(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * sent in afex mode + */ + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); + } else { + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } REG_WR(bp, SRC_REG_SOFT_RST, 1); @@ -6514,15 +6803,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_PRS, init_phase); - if (CHIP_IS_E3B0(bp)) - /* Ovlan exists only if we are in multi-function + - * switch-dependent mode, in switch-independent there - * is no ovlan headers - */ - REG_WR(bp, BP_PORT(bp) ? - PRS_REG_HDRS_AFTER_BASIC_PORT_1 : - PRS_REG_HDRS_AFTER_BASIC_PORT_0, - (bp->path_has_ovlan ? 7 : 6)); + if (CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure headers for AFEX mode */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : + PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_MUST_HAVE_HDRS_PORT_1 : + PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); + } else { + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); + } + } bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_block(bp, BLOCK_CSDM, init_phase); @@ -6584,10 +6887,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ - REG_WR(bp, BP_PORT(bp) ? - NIG_REG_P1_HDRS_AFTER_BASIC : - NIG_REG_P0_HDRS_AFTER_BASIC, - IS_MF_SD(bp) ? 7 : 6); + if (IS_MF_AFEX(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); + else + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); if (CHIP_IS_E3(bp)) REG_WR(bp, BP_PORT(bp) ? @@ -6609,6 +6917,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) val = 1; break; case MULTI_FUNCTION_SI: + case MULTI_FUNCTION_AFEX: val = 2; break; } @@ -6640,21 +6949,71 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) { int reg; + u32 wb_write[2]; if (CHIP_IS_E1(bp)) reg = PXP2_REG_RQ_ONCHIP_AT + index*8; else reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; - bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); + wb_write[0] = ONCHIP_ADDR1(addr); + wb_write[1] = ONCHIP_ADDR2(addr); + REG_WR_DMAE(bp, reg, wb_write, 2); } -static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) +static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, + u8 idu_sb_id, bool is_Pf) +{ + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, + "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } +} + +static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) { bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); } -static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) +static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) { u32 i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) @@ -7005,7 +7364,7 @@ void bnx2x_free_mem(struct bnx2x *bp) BCM_PAGE_SIZE * NUM_EQ_PAGES); } -static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) { int num_groups; int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; @@ -7192,7 +7551,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) unsigned long ramrod_flags = 0; #ifdef BCM_CNIC - if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { + if (is_zero_ether_addr(bp->dev->dev_addr) && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, "Ignoring Zero MAC for STORAGE SD mode\n"); return 0; @@ -7230,7 +7590,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: - /* Set number of queues according to bp->multi_mode value */ + /* Set number of queues for MSI-X mode */ bnx2x_set_num_queues(bp); BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); @@ -7239,15 +7599,17 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp */ - if (bnx2x_enable_msix(bp)) { - /* failed to enable MSI-X */ - BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", + if (bnx2x_enable_msix(bp) || + bp->flags & USING_SINGLE_MSIX_FLAG) { + /* failed to enable multiple MSI-X */ + BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", bp->num_queues, 1 + NON_ETH_CONTEXT_USE); bp->num_queues = 1 + NON_ETH_CONTEXT_USE; /* Try to enable MSI */ - if (!(bp->flags & DISABLE_MSI_FLAG)) + if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && + !(bp->flags & DISABLE_MSI_FLAG)) bnx2x_enable_msi(bp); } break; @@ -7368,7 +7730,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) * - HC configuration * - Queue's CDU context */ -static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, +static void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { @@ -7718,7 +8080,7 @@ static void bnx2x_reset_port(struct bnx2x *bp) /* TODO: Close Doorbell port? */ } -static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) +static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) { struct bnx2x_func_state_params func_params = {NULL}; @@ -7733,7 +8095,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) return bnx2x_func_state_change(bp, &func_params); } -static inline int bnx2x_func_stop(struct bnx2x *bp) +static int bnx2x_func_stop(struct bnx2x *bp) { struct bnx2x_func_state_params func_params = {NULL}; int rc; @@ -7848,7 +8210,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); } -static inline int bnx2x_func_wait_started(struct bnx2x *bp) +static int bnx2x_func_wait_started(struct bnx2x *bp) { int tout = 50; int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; @@ -8158,7 +8520,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) * * @bp: driver handle */ -static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) +static void bnx2x_mcp_wait_one(struct bnx2x *bp) { /* special handling for emulation and FPGA, wait 10 times longer */ @@ -8494,7 +8856,7 @@ exit_leader_reset: return rc; } -static inline void bnx2x_recovery_failed(struct bnx2x *bp) +static void bnx2x_recovery_failed(struct bnx2x *bp) { netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); @@ -8727,7 +9089,8 @@ sp_rtnl_not_reset: #endif if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); - + if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) + bnx2x_after_function_update(bp); /* * in case of fan failure we need to reset id if the "stop on error" * debug flag is set, since we trying to prevent permanent overheating @@ -9222,6 +9585,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) id |= (val & 0xf); bp->common.chip_id = id; + /* force 57811 according to MISC register */ + if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { + if (CHIP_IS_57810(bp)) + bp->common.chip_id = (CHIP_NUM_57811 << 16) | + (bp->common.chip_id & 0x0000FFFF); + else if (CHIP_IS_57810_MF(bp)) + bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | + (bp->common.chip_id & 0x0000FFFF); + bp->common.chip_id |= 0x1; + } + /* Set doorbell size */ bp->db_size = (1 << BNX2X_DB_SHIFT); @@ -9314,7 +9688,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; - + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; @@ -9946,6 +10322,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) } else bp->flags |= NO_FCOE_FLAG; + + bp->mf_ext_config = cfg; + } else { /* SD MODE */ if (IS_MF_STORAGE_SD(bp)) { if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { @@ -9967,6 +10346,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) memset(bp->dev->dev_addr, 0, ETH_ALEN); } } + + if (IS_MF_FCOE_AFEX(bp)) + /* use FIP MAC as primary MAC */ + memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); + #endif } else { /* in SF read MACs from port configuration */ @@ -10139,6 +10523,19 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal MAC address for SI\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: + if ((!CHIP_IS_E1x(bp)) && + (MF_CFG_RD(bp, func_mf_config[func]. + mac_upper) != 0xffff) && + (SHMEM2_HAS(bp, + afex_driver_support))) { + bp->mf_mode = MULTI_FUNCTION_AFEX; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else { + BNX2X_DEV_INFO("can not configure afex mode\n"); + } + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: /* get OV configuration */ val = MF_CFG_RD(bp, @@ -10179,6 +10576,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) return -EPERM; } break; + case MULTI_FUNCTION_AFEX: + BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); + break; case MULTI_FUNCTION_SI: BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", func); @@ -10346,6 +10746,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; + case MULTI_FUNCTION_AFEX: + SET_FLAGS(flags, MODE_MF_AFEX); + break; } } else SET_FLAGS(flags, MODE_SF); @@ -10405,12 +10808,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) if (BP_NOMCP(bp) && (func == 0)) dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); - bp->multi_mode = multi_mode; - bp->disable_tpa = disable_tpa; #ifdef BCM_CNIC - bp->disable_tpa |= IS_MF_STORAGE_SD(bp); + bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); #endif /* Set TPA flags */ @@ -10429,7 +10830,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->mrrs = mrrs; - bp->tx_ring_size = MAX_TX_AVAIL; + bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; /* make sure that the numbers are in the right granularity */ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; @@ -10460,8 +10861,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) if (CHIP_IS_E3B0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; - bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu); - return rc; } @@ -10551,8 +10950,8 @@ static int bnx2x_close(struct net_device *dev) return 0; } -static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p) +static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) { int mc_count = netdev_mc_count(bp->dev); struct bnx2x_mcast_list_elem *mc_mac = @@ -10575,7 +10974,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, return 0; } -static inline void bnx2x_free_mcast_macs_list( +static void bnx2x_free_mcast_macs_list( struct bnx2x_mcast_ramrod_params *p) { struct bnx2x_mcast_list_elem *mc_mac = @@ -10593,7 +10992,7 @@ static inline void bnx2x_free_mcast_macs_list( * * We will use zero (0) as a MAC type for these MACs. */ -static inline int bnx2x_set_uc_list(struct bnx2x *bp) +static int bnx2x_set_uc_list(struct bnx2x *bp) { int rc; struct net_device *dev = bp->dev; @@ -10624,7 +11023,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp) BNX2X_UC_LIST_MAC, &ramrod_flags); } -static inline int bnx2x_set_mc_list(struct bnx2x *bp) +static int bnx2x_set_mc_list(struct bnx2x *bp) { struct net_device *dev = bp->dev; struct bnx2x_mcast_ramrod_params rparam = {NULL}; @@ -10810,7 +11209,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #endif }; -static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) +static int bnx2x_set_coherency_mask(struct bnx2x *bp) { struct device *dev = &bp->pdev->dev; @@ -11076,7 +11475,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp) return 0; } -static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; u32 *target = (u32 *)_target; @@ -11090,7 +11489,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) Ops array is stored in the following format: {op(8bit), offset(24bit, big endian), data(32bit, big endian)} */ -static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) +static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct raw_op *target = (struct raw_op *)_target; @@ -11108,7 +11507,7 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) * IRO array is stored in the following format: * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } */ -static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) +static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct iro *target = (struct iro *)_target; @@ -11128,7 +11527,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) } } -static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be16 *source = (const __be16 *)_source; u16 *target = (u16 *)_target; @@ -11265,11 +11664,13 @@ void bnx2x__init_func_obj(struct bnx2x *bp) bnx2x_init_func_obj(bp, &bp->func_obj, bnx2x_sp(bp, func_rdata), bnx2x_sp_mapping(bp, func_rdata), + bnx2x_sp(bp, func_afex_rdata), + bnx2x_sp_mapping(bp, func_afex_rdata), &bnx2x_func_sp_drv); } /* must be called after sriov-enable */ -static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) +static int bnx2x_set_qm_cid_count(struct bnx2x *bp) { int cid_count = BNX2X_L2_CID_COUNT(bp); @@ -11285,7 +11686,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) * @dev: pci device * */ -static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) { int pos; u16 control; @@ -11346,6 +11747,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, case BCM57810_MF: case BCM57840: case BCM57840_MF: + case BCM57811: + case BCM57811_MF: max_cos_est = BNX2X_MULTI_TX_COS_E3B0; break; @@ -11759,7 +12162,7 @@ module_exit(bnx2x_cleanup); * This function will wait until the ramdord completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ -static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) { unsigned long ramrod_flags = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index c25803b9c0c..bbd387492a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1483,6 +1483,11 @@ starts at 0x0 for the A0 tape-out and increments by one for each all-layer tape-out. */ #define MISC_REG_CHIP_REV 0xa40c +/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11- + * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72]; + * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */ +#define MISC_REG_CHIP_TYPE 0xac60 +#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1) /* [RW 32] The following driver registers(1...16) represent 16 drivers and 32 clients. Each client can be controlled by one driver only. One in each bit represent that this driver control the appropriate client (Ex: bit 5 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 51357332162..6c14b4a4e82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) } -static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, - bool add, unsigned char *dev_addr, int index) +void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index) { u32 wb_data[2]; u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : NIG_REG_LLH0_FUNC_MEM; - if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) + if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) + return; + + if (index > BNX2X_LLH_CAM_MAX_PF_LINE) return; DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", @@ -4090,12 +4093,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, rss_mode = ETH_RSS_MODE_DISABLED; else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) rss_mode = ETH_RSS_MODE_REGULAR; - else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_VLAN_PRI; - else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_E1HOV_PRI; - else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_IP_DSCP; data->rss_mode = rss_mode; @@ -4404,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); tx_data->anti_spoofing_flg = test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); + tx_data->force_default_pri_flg = + test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); + tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tss_leading_client_id = params->tss_leading_cl_id; @@ -5331,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, case BNX2X_F_STATE_STARTED: if (cmd == BNX2X_F_CMD_STOP) next_state = BNX2X_F_STATE_INITIALIZED; + /* afex ramrods can be sent only in started mode, and only + * if not pending for function_stop ramrod completion + * for these events - next state remained STARTED. + */ + else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; else if (cmd == BNX2X_F_CMD_TX_STOP) next_state = BNX2X_F_STATE_TX_STOPPED; @@ -5618,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, U64_LO(data_mapping), NONE_CONNECTION_TYPE); } +static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->afex_rdata; + dma_addr_t data_mapping = o->afex_rdata_mapping; + struct bnx2x_func_afex_update_params *afex_update_params = + ¶ms->params.afex_update; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_id_change_flg = 1; + rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); + rdata->afex_default_vlan_change_flg = 1; + rdata->afex_default_vlan = + cpu_to_le16(afex_update_params->afex_default_vlan); + rdata->allowed_priorities_change_flg = 1; + rdata->allowed_priorities = afex_update_params->allowed_priorities; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + DP(BNX2X_MSG_SP, + "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", + rdata->vif_id, + rdata->afex_default_vlan, rdata->allowed_priorities); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static +inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct afex_vif_list_ramrod_data *rdata = + (struct afex_vif_list_ramrod_data *)o->afex_rdata; + struct bnx2x_func_afex_viflists_params *afex_viflist_params = + ¶ms->params.afex_viflists; + u64 *p_rdata = (u64 *)rdata; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_list_index = afex_viflist_params->vif_list_index; + rdata->func_bit_map = afex_viflist_params->func_bit_map; + rdata->afex_vif_list_command = + afex_viflist_params->afex_vif_list_command; + rdata->func_to_clear = afex_viflist_params->func_to_clear; + + /* send in echo type of sub command */ + rdata->echo = afex_viflist_params->afex_vif_list_command; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", + rdata->afex_vif_list_command, rdata->vif_list_index, + rdata->func_bit_map, rdata->func_to_clear); + + /* this ramrod sends data directly and not through DMA mapping */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, + U64_HI(*p_rdata), U64_LO(*p_rdata), + NONE_CONNECTION_TYPE); +} + static inline int bnx2x_func_send_stop(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5669,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, return bnx2x_func_send_stop(bp, params); case BNX2X_F_CMD_HW_RESET: return bnx2x_func_hw_reset(bp, params); + case BNX2X_F_CMD_AFEX_UPDATE: + return bnx2x_func_send_afex_update(bp, params); + case BNX2X_F_CMD_AFEX_VIFLISTS: + return bnx2x_func_send_afex_viflists(bp, params); case BNX2X_F_CMD_TX_STOP: return bnx2x_func_send_tx_stop(bp, params); case BNX2X_F_CMD_TX_START: @@ -5682,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, void bnx2x_init_func_obj(struct bnx2x *bp, struct bnx2x_func_sp_obj *obj, void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, struct bnx2x_func_sp_drv_ops *drv_iface) { memset(obj, 0, sizeof(*obj)); @@ -5690,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp, obj->rdata = rdata; obj->rdata_mapping = rdata_mapping; - + obj->afex_rdata = afex_rdata; + obj->afex_rdata_mapping = afex_rdata_mapping; obj->send_cmd = bnx2x_func_send_cmd; obj->check_transition = bnx2x_func_chk_transition; obj->complete_cmd = bnx2x_func_comp_cmd; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 61a7670adfc..efd80bdd0df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -62,6 +62,8 @@ enum { BNX2X_FILTER_MCAST_PENDING, BNX2X_FILTER_MCAST_SCHED, BNX2X_FILTER_RSS_CONF_PENDING, + BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, + BNX2X_AFEX_PENDING_VIFSET_MCP_ACK }; struct bnx2x_raw_obj { @@ -432,6 +434,8 @@ enum { BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 }; +void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index); /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ @@ -685,9 +689,6 @@ enum { /* RSS_MODE bits are mutually exclusive */ BNX2X_RSS_MODE_DISABLED, BNX2X_RSS_MODE_REGULAR, - BNX2X_RSS_MODE_VLAN_PRI, - BNX2X_RSS_MODE_E1HOV_PRI, - BNX2X_RSS_MODE_IP_DSCP, BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ @@ -801,7 +802,8 @@ enum { BNX2X_Q_FLG_TX_SWITCH, BNX2X_Q_FLG_TX_SEC, BNX2X_Q_FLG_ANTI_SPOOF, - BNX2X_Q_FLG_SILENT_VLAN_REM + BNX2X_Q_FLG_SILENT_VLAN_REM, + BNX2X_Q_FLG_FORCE_DEFAULT_PRI }; /* Queue type options: queue type may be a compination of below. */ @@ -963,6 +965,11 @@ struct bnx2x_queue_state_params { } params; }; +struct bnx2x_viflist_params { + u8 echo_res; + u8 func_bit_map_res; +}; + struct bnx2x_queue_sp_obj { u32 cids[BNX2X_MULTI_TX_COS]; u8 cl_id; @@ -1045,6 +1052,8 @@ enum bnx2x_func_cmd { BNX2X_F_CMD_START, BNX2X_F_CMD_STOP, BNX2X_F_CMD_HW_RESET, + BNX2X_F_CMD_AFEX_UPDATE, + BNX2X_F_CMD_AFEX_VIFLISTS, BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_START, BNX2X_F_CMD_MAX, @@ -1089,6 +1098,18 @@ struct bnx2x_func_start_params { u8 network_cos_mode; }; +struct bnx2x_func_afex_update_params { + u16 vif_id; + u16 afex_default_vlan; + u8 allowed_priorities; +}; + +struct bnx2x_func_afex_viflists_params { + u16 vif_list_index; + u8 func_bit_map; + u8 afex_vif_list_command; + u8 func_to_clear; +}; struct bnx2x_func_tx_start_params { struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; @@ -1110,6 +1131,8 @@ struct bnx2x_func_state_params { struct bnx2x_func_hw_init_params hw_init; struct bnx2x_func_hw_reset_params hw_reset; struct bnx2x_func_start_params start; + struct bnx2x_func_afex_update_params afex_update; + struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_tx_start_params tx_start; } params; }; @@ -1154,6 +1177,13 @@ struct bnx2x_func_sp_obj { void *rdata; dma_addr_t rdata_mapping; + /* Buffer to use as a afex ramrod data and its mapping. + * This can't be same rdata as above because afex ramrod requests + * can arrive to the object in parallel to other ramrod requests. + */ + void *afex_rdata; + dma_addr_t afex_rdata_mapping; + /* this mutex validates that when pending flag is taken, the next * ramrod to be sent will be the one set the pending bit */ @@ -1197,6 +1227,7 @@ union bnx2x_qable_obj { void bnx2x_init_func_obj(struct bnx2x *bp, struct bnx2x_func_sp_obj *obj, void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, struct bnx2x_func_sp_drv_ops *drv_iface); int bnx2x_func_state_change(struct bnx2x *bp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index e1c9310fb07..1e2785cd11d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1316,7 +1316,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) * * @param bp */ -static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index; @@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp) UPDATE_FW_STAT_OLD(mac_discard); } } + +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type) +{ + int i; + struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct per_queue_stats *fcoe_q_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX]; + + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &fcoe_q_stats->tstorm_queue_statistics; + + struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = + &fcoe_q_stats->ustorm_queue_statistics; + + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &fcoe_q_stats->xstorm_queue_statistics; + + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + memset(afex_stats, 0, sizeof(struct afex_stats)); + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + + ADD_64(afex_stats->rx_unicast_bytes_hi, + qstats->total_unicast_bytes_received_hi, + afex_stats->rx_unicast_bytes_lo, + qstats->total_unicast_bytes_received_lo); + + ADD_64(afex_stats->rx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_received_hi, + afex_stats->rx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_received_lo); + + ADD_64(afex_stats->rx_multicast_bytes_hi, + qstats->total_multicast_bytes_received_hi, + afex_stats->rx_multicast_bytes_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(afex_stats->rx_unicast_frames_hi, + qstats->total_unicast_packets_received_hi, + afex_stats->rx_unicast_frames_lo, + qstats->total_unicast_packets_received_lo); + + ADD_64(afex_stats->rx_broadcast_frames_hi, + qstats->total_broadcast_packets_received_hi, + afex_stats->rx_broadcast_frames_lo, + qstats->total_broadcast_packets_received_lo); + + ADD_64(afex_stats->rx_multicast_frames_hi, + qstats->total_multicast_packets_received_hi, + afex_stats->rx_multicast_frames_lo, + qstats->total_multicast_packets_received_lo); + + /* sum to rx_frames_discarded all discraded + * packets due to size, ttl0 and checksum + */ + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_checksum_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_checksum_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_ttl0_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_ttl0_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->etherstatsoverrsizepkts_hi, + afex_stats->rx_frames_discarded_lo, + qstats->etherstatsoverrsizepkts_lo); + + ADD_64(afex_stats->rx_frames_dropped_hi, + qstats->no_buff_discard_hi, + afex_stats->rx_frames_dropped_lo, + qstats->no_buff_discard_lo); + + ADD_64(afex_stats->tx_unicast_bytes_hi, + qstats->total_unicast_bytes_transmitted_hi, + afex_stats->tx_unicast_bytes_lo, + qstats->total_unicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_transmitted_hi, + afex_stats->tx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_bytes_hi, + qstats->total_multicast_bytes_transmitted_hi, + afex_stats->tx_multicast_bytes_lo, + qstats->total_multicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_unicast_frames_hi, + qstats->total_unicast_packets_transmitted_hi, + afex_stats->tx_unicast_frames_lo, + qstats->total_unicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_frames_hi, + qstats->total_broadcast_packets_transmitted_hi, + afex_stats->tx_broadcast_frames_lo, + qstats->total_broadcast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_frames_hi, + qstats->total_multicast_packets_transmitted_hi, + afex_stats->tx_multicast_frames_lo, + qstats->total_multicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_frames_dropped_hi, + qstats->total_transmitted_dropped_packets_error_hi, + afex_stats->tx_frames_dropped_lo, + qstats->total_transmitted_dropped_packets_error_lo); + } + + /* now add FCoE statistics which are collected separately + * (both offloaded and non offloaded) + */ + if (!NO_FCOE(bp)) { + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + LE32_0, + afex_stats->rx_unicast_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + afex_stats->rx_unicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + afex_stats->rx_broadcast_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_multicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + afex_stats->rx_multicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_broadcast_frames_hi, + LE32_0, + afex_stats->rx_broadcast_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64_LE(afex_stats->rx_multicast_frames_hi, + LE32_0, + afex_stats->rx_multicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->checksum_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->pkts_too_big_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->ttl0_discard); + + ADD_64_LE16(afex_stats->rx_frames_dropped_hi, + LE16_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_tstorm_stats->no_buff_discard); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->ucast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->mcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->bcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + LE32_0, + afex_stats->tx_unicast_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + afex_stats->tx_unicast_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + afex_stats->tx_broadcast_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_multicast_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + afex_stats->tx_multicast_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64_LE(afex_stats->tx_broadcast_frames_hi, + LE32_0, + afex_stats->tx_broadcast_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_multicast_frames_hi, + LE32_0, + afex_stats->tx_multicast_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_frames_dropped_hi, + LE32_0, + afex_stats->tx_frames_dropped_lo, + fcoe_q_xstorm_stats->error_drop_pkts); + } + + /* if port stats are requested, add them to the PMF + * stats, as anyway they will be accumulated by the + * MCP before sent to the switch + */ + if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->mac_filter_discard); + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->brb_truncate_discard); + ADD_64(afex_stats->rx_frames_discarded_hi, + 0, + afex_stats->rx_frames_discarded_lo, + estats->mac_discard); + } +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2b46e1eb7fd..93e689fdfed 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old { s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ } while (0) +#define LE32_0 ((__force __le32) 0) +#define LE16_0 ((__force __le16) 0) + +/* The _force is for cases where high value is 0 */ +#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le32_to_cpu(a_hi_le), \ + s_lo, le32_to_cpu(a_lo_le)) + +#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le16_to_cpu(a_hi_le), \ + s_lo, le16_to_cpu(a_lo_le)) + /* difference = minuend - subtrahend */ #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ do { \ @@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); * @bp: driver handle */ void bnx2x_save_statistics(struct bnx2x *bp); + +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type); #endif /* BNX2X_STATS_H */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ceeab8e852e..d55df329017 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -5622,17 +5622,29 @@ static void tg3_tx(struct tg3_napi *tnapi) } } +static void tg3_frag_free(bool is_frag, void *data) +{ + if (is_frag) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { + unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (!ri->data) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); - kfree(ri->data); + tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); ri->data = NULL; } + /* Returns size of skb allocated or < 0 on error. * * We only need to fill in the address because the other members @@ -5645,7 +5657,8 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, - u32 opaque_key, u32 dest_idx_unmasked) + u32 opaque_key, u32 dest_idx_unmasked, + unsigned int *frag_size) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; @@ -5680,7 +5693,13 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, */ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - data = kmalloc(skb_size, GFP_ATOMIC); + if (skb_size <= PAGE_SIZE) { + data = netdev_alloc_frag(skb_size); + *frag_size = skb_size; + } else { + data = kmalloc(skb_size, GFP_ATOMIC); + *frag_size = 0; + } if (!data) return -ENOMEM; @@ -5688,8 +5707,8 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, data + TG3_RX_OFFSET(tp), data_size, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) { - kfree(data); + if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { + tg3_frag_free(skb_size <= PAGE_SIZE, data); return -EIO; } @@ -5840,18 +5859,19 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; + unsigned int frag_size; skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, - *post_ptr); + *post_ptr, &frag_size); if (skb_size < 0) goto drop_it; pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data); + skb = build_skb(data, frag_size); if (!skb) { - kfree(data); + tg3_frag_free(frag_size != 0, data); goto drop_it_no_recycle; } skb_reserve(skb, TG3_RX_OFFSET(tp)); @@ -7287,7 +7307,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + unsigned int frag_size; + + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, + &frag_size) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " @@ -7319,7 +7342,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + unsigned int frag_size; + + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, + &frag_size) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " @@ -12248,6 +12274,7 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_rxfh_indir_size = tg3_get_rxfh_indir_size, .get_rxfh_indir = tg3_get_rxfh_indir, .set_rxfh_indir = tg3_set_rxfh_indir, + .get_ts_info = ethtool_op_get_ts_info, }; static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 77977d735dd..0b640fafbda 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -70,7 +70,6 @@ static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); static void bfa_ioc_recover(struct bfa_ioc *ioc); -static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); @@ -346,8 +345,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) switch (event) { case IOC_E_FWRSP_GETATTR: del_timer(&ioc->ioc_timer); - bfa_ioc_check_attr_wwns(ioc); - bfa_ioc_hb_monitor(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; @@ -380,6 +377,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) { ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); + bfa_ioc_hb_monitor(ioc); } static void @@ -1207,27 +1205,62 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg) writel(1, sem_reg); } +/* Clear fwver hdr */ +static void +bfa_ioc_fwver_clear(struct bfa_ioc *ioc) +{ + u32 pgnum, pgoff, loff = 0; + int i; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + pgoff = PSS_SMEM_PGOFF(loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { + writel(0, ioc->ioc_regs.smem_page_start + loff); + loff += sizeof(u32); + } +} + + static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) { struct bfi_ioc_image_hdr fwhdr; - u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + u32 fwstate, r32; - if (fwstate == BFI_IOC_UNINIT) + /* Spin on init semaphore to serialize. */ + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + while (r32 & 0x1) { + udelay(20); + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + } + + fwstate = readl(ioc->ioc_regs.ioc_fwstate); + if (fwstate == BFI_IOC_UNINIT) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); return; + } bfa_nw_ioc_fwver_get(ioc, &fwhdr); - if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); return; + } + bfa_ioc_fwver_clear(ioc); writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); /* * Try to lock and then unlock the semaphore. */ readl(ioc->ioc_regs.ioc_sem_reg); writel(1, ioc->ioc_regs.ioc_sem_reg); + + /* Unlock init semaphore */ + writel(1, ioc->ioc_regs.ioc_init_sem_reg); } static void @@ -1585,11 +1618,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, u32 i; u32 asicmode; - /** - * Initialize LMEM first before code download - */ - bfa_ioc_lmem_init(ioc); - fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); pgnum = bfa_ioc_smem_pgnum(ioc, loff); @@ -1914,6 +1942,10 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc) bfa_ioc_pll_init_asic(ioc); ioc->pllinit = true; + + /* Initialize LMEM */ + bfa_ioc_lmem_init(ioc); + /* * release semaphore. */ @@ -2513,13 +2545,6 @@ bfa_ioc_recover(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } -static void -bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) -{ - if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) - return; -} - /** * @dg hal_iocpf_pvt BFA IOC PF private functions * @{ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index 348479bbfa3..b6b036a143a 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -199,9 +199,9 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) * Host to LPU mailbox message addresses */ static const struct { - u32 hfn_mbox; - u32 lpu_mbox; - u32 hfn_pgn; + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; } ct_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, @@ -803,17 +803,72 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb) } #define CT2_NFC_MAX_DELAY 1000 +#define CT2_NFC_VER_VALID 0x143 +#define BFA_IOC_PLL_POLL 1000000 + +static bool +bfa_ioc_ct2_nfc_halted(void __iomem *rb) +{ + volatile u32 r32; + + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + return true; + + return false; +} + +static void +bfa_ioc_ct2_nfc_resume(void __iomem *rb) +{ + volatile u32 r32; + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (!(r32 & __NFC_CONTROLLER_HALTED)) + return; + udelay(1000); + } + BUG_ON(1); +} + static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { volatile u32 wgn, r32; - int i; + u32 nfc_ver, i; - /* - * Initialize PLL if not already done by NFC - */ wgn = readl(rb + CT2_WGN_STATUS); - if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { + + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && + (nfc_ver >= CT2_NFC_VER_VALID)) { + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + writel(__RESET_AND_START_SCLK_LCLK_PLLS, + rb + CT2_CSI_FW_CTL_SET_REG); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) + break; + } + BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) + break; + } + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + udelay(1000); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + } else { writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); @@ -821,53 +876,48 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) break; udelay(1000); } + + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* release soft reset on s_clk & l_clk */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_LCLK_CTL_REG); + } + + /* Announce flash device presence, if flash was corrupted. */ + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel(r32 | 1, rb + PSS_GPIO_OE_REG); } /* * Mask the interrupts and clear any * pending interrupts left by BIOS/EFI */ - writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); - r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); - if (r32 == 1) { - writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); - readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); - } - r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); - if (r32 == 1) { - writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); - readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); - } - - bfa_ioc_ct2_mac_reset(rb); - bfa_ioc_ct2_sclk_init(rb); - bfa_ioc_ct2_lclk_init(rb); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); - writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), - (rb + CT2_APP_PLL_SCLK_CTL_REG)); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); - writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_LCLK_CTL_REG)); - - /* - * Announce flash device presence, if flash was corrupted. - */ - if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { - r32 = readl((rb + PSS_GPIO_OUT_REG)); - writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG)); - r32 = readl((rb + PSS_GPIO_OE_REG)); - writel((r32 | 1), (rb + PSS_GPIO_OE_REG)); + /* For first time initialization, no need to clear interrupts */ + r32 = readl(rb + HOST_SEM5_REG); + if (r32 & 0x1) { + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + } } bfa_ioc_ct2_mem_init(rb); diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index efacff3ab51..0e094fe46df 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -339,10 +339,16 @@ enum { #define __A2T_AHB_LOAD 0x00000800 #define __WGN_READY 0x00000400 #define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_CSR_CLR_REG 0x00027420 #define CT2_NFC_CSR_SET_REG 0x00027424 #define __HALT_NFC_CONTROLLER 0x00000002 #define __NFC_CONTROLLER_HALTED 0x00001000 +#define CT2_RSC_GPR15_REG 0x0002765c +#define CT2_CSI_FW_CTL_REG 0x00027080 +#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000 +#define CT2_CSI_FW_CTL_SET_REG 0x00027088 + #define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 #define __CSI_MAC_RESET 0x00000010 #define __CSI_MAC_AHB_RESET 0x00000008 diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ff78f770dec..25c4e7f2a09 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -80,8 +80,6 @@ do { \ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ } while (0) -#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ - static void bnad_add_to_list(struct bnad *bnad) { @@ -103,7 +101,7 @@ bnad_remove_from_list(struct bnad *bnad) * Reinitialize completions in CQ, once Rx is taken down */ static void -bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) +bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) { struct bna_cq_entry *cmpl, *next_cmpl; unsigned int wi_range, wis = 0, ccb_prod = 0; @@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, for (j = 0; j < frag; j++) { dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), - skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); + skb_frag_size(&skb_shinfo(skb)->frags[j]), + DMA_TO_DEVICE); dma_unmap_addr_set(&array[index], dma_addr, 0); BNA_QE_INDX_ADD(index, 1, depth); } @@ -155,7 +154,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, * so DMA unmap & freeing is fine. */ static void -bnad_free_all_txbufs(struct bnad *bnad, +bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) { u32 unmap_cons; @@ -183,13 +182,12 @@ bnad_free_all_txbufs(struct bnad *bnad, /* Data Path Handlers */ /* - * bnad_free_txbufs : Frees the Tx bufs on Tx completion + * bnad_txcmpl_process : Frees the Tx bufs on Tx completion * Can be called in a) Interrupt context * b) Sending context - * c) Tasklet context */ static u32 -bnad_free_txbufs(struct bnad *bnad, +bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) { u32 unmap_cons, sent_packets = 0, sent_bytes = 0; @@ -198,13 +196,7 @@ bnad_free_txbufs(struct bnad *bnad, struct bnad_skb_unmap *unmap_array; struct sk_buff *skb; - /* - * Just return if TX is stopped. This check is useful - * when bnad_free_txbufs() runs out of a tasklet scheduled - * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit - * but this routine runs actually after the cleanup has been - * executed. - */ + /* Just return if TX is stopped */ if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) return 0; @@ -243,57 +235,8 @@ bnad_free_txbufs(struct bnad *bnad, return sent_packets; } -/* Tx Free Tasklet function */ -/* Frees for all the tcb's in all the Tx's */ -/* - * Scheduled from sending context, so that - * the fat Tx lock is not held for too long - * in the sending context. - */ -static void -bnad_tx_free_tasklet(unsigned long bnad_ptr) -{ - struct bnad *bnad = (struct bnad *)bnad_ptr; - struct bna_tcb *tcb; - u32 acked = 0; - int i, j; - - for (i = 0; i < bnad->num_tx; i++) { - for (j = 0; j < bnad->num_txq_per_tx; j++) { - tcb = bnad->tx_info[i].tcb[j]; - if (!tcb) - continue; - if (((u16) (*tcb->hw_consumer_index) != - tcb->consumer_index) && - (!test_and_set_bit(BNAD_TXQ_FREE_SENT, - &tcb->flags))) { - acked = bnad_free_txbufs(bnad, tcb); - if (likely(test_bit(BNAD_TXQ_TX_STARTED, - &tcb->flags))) - bna_ib_ack(tcb->i_dbell, acked); - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); - } - if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, - &tcb->flags))) - continue; - if (netif_queue_stopped(bnad->netdev)) { - if (acked && netif_carrier_ok(bnad->netdev) && - BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= - BNAD_NETIF_WAKE_THRESHOLD) { - netif_wake_queue(bnad->netdev); - /* TODO */ - /* Counters for individual TxQs? */ - BNAD_UPDATE_CTR(bnad, - netif_queue_wakeup); - } - } - } - } -} - static u32 -bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) +bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) { struct net_device *netdev = bnad->netdev; u32 sent = 0; @@ -301,7 +244,7 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) return 0; - sent = bnad_free_txbufs(bnad, tcb); + sent = bnad_txcmpl_process(bnad, tcb); if (sent) { if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && @@ -330,13 +273,13 @@ bnad_msix_tx(int irq, void *data) struct bna_tcb *tcb = (struct bna_tcb *)data; struct bnad *bnad = tcb->bnad; - bnad_tx(bnad, tcb); + bnad_tx_complete(bnad, tcb); return IRQ_HANDLED; } static void -bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) +bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q = rcb->unmap_q; @@ -348,7 +291,7 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) } static void -bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q; struct bnad_skb_unmap *unmap_array; @@ -369,11 +312,11 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) DMA_FROM_DEVICE); dev_kfree_skb(skb); } - bnad_reset_rcb(bnad, rcb); + bnad_rcb_cleanup(bnad, rcb); } static void -bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) { u16 to_alloc, alloced, unmap_prod, wi_range; struct bnad_unmap_q *unmap_q = rcb->unmap_q; @@ -434,14 +377,14 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) - bnad_alloc_n_post_rxbufs(bnad, rcb); + bnad_rxq_post(bnad, rcb); smp_mb__before_clear_bit(); clear_bit(BNAD_RXQ_REFILL, &rcb->flags); } } static u32 -bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) +bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) { struct bna_cq_entry *cmpl, *next_cmpl; struct bna_rcb *rcb = NULL; @@ -453,12 +396,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); - set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); - - if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) { - clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); + if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) return 0; - } prefetch(bnad->netdev); BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, @@ -533,9 +472,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ctrl->napi, skb); - else { + else netif_receive_skb(skb); - } next: cmpl->valid = 0; @@ -646,7 +584,7 @@ bnad_isr(int irq, void *data) for (j = 0; j < bnad->num_txq_per_tx; j++) { tcb = bnad->tx_info[i].tcb[j]; if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) - bnad_tx(bnad, bnad->tx_info[i].tcb[j]); + bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); } } /* Rx processing */ @@ -839,20 +777,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) { struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tcb->txq->tx->priv; - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - - while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) - cpu_relax(); - - bnad_free_all_txbufs(bnad, tcb); - - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); tx_info->tcb[tcb->id] = NULL; + tcb->priv = NULL; } static void @@ -866,12 +793,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) } static void -bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) -{ - bnad_free_all_rxbufs(bnad, rcb); -} - -static void bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_info *rx_info = @@ -916,7 +837,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) { struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; struct bna_tcb *tcb; - struct bnad_unmap_q *unmap_q; u32 txq_id; int i; @@ -926,23 +846,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) continue; txq_id = tcb->id; - unmap_q = tcb->unmap_q; - - if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) - continue; - - while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) - cpu_relax(); - - bnad_free_all_txbufs(bnad, tcb); - - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); - + BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + BUG_ON(*(tcb->hw_consumer_index) != 0); if (netif_carrier_ok(bnad->netdev)) { printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", @@ -963,6 +869,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) } } +/* + * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. + */ +static void +bnad_tx_cleanup(struct delayed_work *work) +{ + struct bnad_tx_info *tx_info = + container_of(work, struct bnad_tx_info, tx_cleanup_work); + struct bnad *bnad = NULL; + struct bnad_unmap_q *unmap_q; + struct bna_tcb *tcb; + unsigned long flags; + uint32_t i, pending = 0; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + + bnad = tcb->bnad; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + pending++; + continue; + } + + bnad_txq_cleanup(bnad, tcb); + + unmap_q = tcb->unmap_q; + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } + + if (pending) { + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, + msecs_to_jiffies(1)); + return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_cleanup_complete(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + + static void bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) { @@ -976,8 +930,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) continue; } - mdelay(BNAD_TXRX_SYNC_MDELAY); - bna_tx_cleanup_complete(tx); + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); } static void @@ -1001,6 +954,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) } } +/* + * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. + */ +static void +bnad_rx_cleanup(void *work) +{ + struct bnad_rx_info *rx_info = + container_of(work, struct bnad_rx_info, rx_cleanup_work); + struct bnad_rx_ctrl *rx_ctrl; + struct bnad *bnad = NULL; + unsigned long flags; + uint32_t i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + + if (!rx_ctrl->ccb) + continue; + + bnad = rx_ctrl->ccb->bnad; + + /* + * Wait till the poll handler has exited + * and nothing can be scheduled anymore + */ + napi_disable(&rx_ctrl->napi); + + bnad_cq_cleanup(bnad, rx_ctrl->ccb); + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); + if (rx_ctrl->ccb->rcb[1]) + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_cleanup_complete(rx_info->rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + static void bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) { @@ -1009,8 +1000,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) struct bnad_rx_ctrl *rx_ctrl; int i; - mdelay(BNAD_TXRX_SYNC_MDELAY); - for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { rx_ctrl = &rx_info->rx_ctrl[i]; ccb = rx_ctrl->ccb; @@ -1021,12 +1010,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) if (ccb->rcb[1]) clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); - - while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags)) - cpu_relax(); } - bna_rx_cleanup_complete(rx); + queue_work(bnad->work_q, &rx_info->rx_cleanup_work); } static void @@ -1046,13 +1032,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) if (!ccb) continue; - bnad_cq_cmpl_init(bnad, ccb); + napi_enable(&rx_ctrl->napi); for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { rcb = ccb->rcb[j]; if (!rcb) continue; - bnad_free_all_rxbufs(bnad, rcb); set_bit(BNAD_RXQ_STARTED, &rcb->flags); set_bit(BNAD_RXQ_POST_OK, &rcb->flags); @@ -1063,7 +1048,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) - bnad_alloc_n_post_rxbufs(bnad, rcb); + bnad_rxq_post(bnad, rcb); smp_mb__before_clear_bit(); clear_bit(BNAD_RXQ_REFILL, &rcb->flags); } @@ -1687,7 +1672,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) if (!netif_carrier_ok(bnad->netdev)) goto poll_exit; - rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); + rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); if (rcvd >= budget) return rcvd; @@ -1704,7 +1689,7 @@ poll_exit: #define BNAD_NAPI_POLL_QUOTA 64 static void -bnad_napi_init(struct bnad *bnad, u32 rx_id) +bnad_napi_add(struct bnad *bnad, u32 rx_id) { struct bnad_rx_ctrl *rx_ctrl; int i; @@ -1718,34 +1703,18 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id) } static void -bnad_napi_enable(struct bnad *bnad, u32 rx_id) -{ - struct bnad_rx_ctrl *rx_ctrl; - int i; - - /* Initialize & enable NAPI */ - for (i = 0; i < bnad->num_rxp_per_rx; i++) { - rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; - - napi_enable(&rx_ctrl->napi); - } -} - -static void -bnad_napi_disable(struct bnad *bnad, u32 rx_id) +bnad_napi_delete(struct bnad *bnad, u32 rx_id) { int i; /* First disable and then clean up */ - for (i = 0; i < bnad->num_rxp_per_rx; i++) { - napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); + for (i = 0; i < bnad->num_rxp_per_rx; i++) netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); - } } /* Should be held with conf_lock held */ void -bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) +bnad_destroy_tx(struct bnad *bnad, u32 tx_id) { struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; @@ -1764,9 +1733,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) bnad_tx_msix_unregister(bnad, tx_info, bnad->num_txq_per_tx); - if (0 == tx_id) - tasklet_kill(&bnad->tx_free_tasklet); - spin_lock_irqsave(&bnad->bna_lock, flags); bna_tx_destroy(tx_info->tx); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -1832,6 +1798,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) goto err_return; tx_info->tx = tx; + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, + (work_func_t)bnad_tx_cleanup); + /* Register ISR for the Tx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { err = bnad_tx_msix_register(bnad, tx_info, @@ -1896,7 +1865,7 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) /* Called with mutex_lock(&bnad->conf_mutex) held */ void -bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) +bnad_destroy_rx(struct bnad *bnad, u32 rx_id) { struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; @@ -1928,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); - bnad_napi_disable(bnad, rx_id); + bnad_napi_delete(bnad, rx_id); spin_lock_irqsave(&bnad->bna_lock, flags); bna_rx_destroy(rx_info->rx); @@ -1952,7 +1921,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; static const struct bna_rx_event_cbfn rx_cbfn = { .rcb_setup_cbfn = bnad_cb_rcb_setup, - .rcb_destroy_cbfn = bnad_cb_rcb_destroy, + .rcb_destroy_cbfn = NULL, .ccb_setup_cbfn = bnad_cb_ccb_setup, .ccb_destroy_cbfn = bnad_cb_ccb_destroy, .rx_stall_cbfn = bnad_cb_rx_stall, @@ -1998,11 +1967,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) rx_info->rx = rx; spin_unlock_irqrestore(&bnad->bna_lock, flags); + INIT_WORK(&rx_info->rx_cleanup_work, + (work_func_t)(bnad_rx_cleanup)); + /* * Init NAPI, so that state is set to NAPI_STATE_SCHED, * so that IRQ handler cannot schedule NAPI at this point. */ - bnad_napi_init(bnad, rx_id); + bnad_napi_add(bnad, rx_id); /* Register ISR for the Rx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { @@ -2028,13 +2000,10 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) bna_rx_enable(rx); spin_unlock_irqrestore(&bnad->bna_lock, flags); - /* Enable scheduling of NAPI */ - bnad_napi_enable(bnad, rx_id); - return 0; err_return: - bnad_cleanup_rx(bnad, rx_id); + bnad_destroy_rx(bnad, rx_id); return err; } @@ -2519,7 +2488,7 @@ bnad_open(struct net_device *netdev) return 0; cleanup_tx: - bnad_cleanup_tx(bnad, 0); + bnad_destroy_tx(bnad, 0); err_return: mutex_unlock(&bnad->conf_mutex); @@ -2546,8 +2515,8 @@ bnad_stop(struct net_device *netdev) wait_for_completion(&bnad->bnad_completions.enet_comp); - bnad_cleanup_tx(bnad, 0); - bnad_cleanup_rx(bnad, 0); + bnad_destroy_tx(bnad, 0); + bnad_destroy_rx(bnad, 0); /* Synchronize mailbox IRQ */ bnad_mbox_irq_sync(bnad); @@ -2620,7 +2589,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index && !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { - acked = bnad_free_txbufs(bnad, tcb); + acked = bnad_txcmpl_process(bnad, tcb); if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, acked); smp_mb__before_clear_bit(); @@ -2843,9 +2812,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) bna_txq_prod_indx_doorbell(tcb); smp_mb(); - if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) - tasklet_schedule(&bnad->tx_free_tasklet); - return NETDEV_TX_OK; } @@ -3127,8 +3093,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) /* * 1. Initialize the bnad structure * 2. Setup netdev pointer in pci_dev - * 3. Initialze Tx free tasklet - * 4. Initialize no. of TxQ & CQs & MSIX vectors + * 3. Initialize no. of TxQ & CQs & MSIX vectors + * 4. Initialize work queue. */ static int bnad_init(struct bnad *bnad, @@ -3171,8 +3137,11 @@ bnad_init(struct bnad *bnad, bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; - tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, - (unsigned long)bnad); + sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); + bnad->work_q = create_singlethread_workqueue(bnad->wq_name); + + if (!bnad->work_q) + return -ENOMEM; return 0; } @@ -3185,6 +3154,12 @@ bnad_init(struct bnad *bnad, static void bnad_uninit(struct bnad *bnad) { + if (bnad->work_q) { + flush_workqueue(bnad->work_q); + destroy_workqueue(bnad->work_q); + bnad->work_q = NULL; + } + if (bnad->bar0) iounmap(bnad->bar0); pci_set_drvdata(bnad->pcidev, NULL); @@ -3304,7 +3279,6 @@ bnad_pci_probe(struct pci_dev *pdev, /* * Initialize bnad structure * Setup relation between pci_dev & netdev - * Init Tx free tasklet */ err = bnad_init(bnad, pdev, netdev); if (err) diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 55824d92699..72742be1127 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -71,7 +71,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.0.2.2" +#define BNAD_VERSION "3.0.23.0" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 @@ -210,6 +210,7 @@ struct bnad_tx_info { struct bna_tx *tx; /* 1:1 between tx_info & tx */ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; u32 tx_id; + struct delayed_work tx_cleanup_work; } ____cacheline_aligned; struct bnad_rx_info { @@ -217,6 +218,7 @@ struct bnad_rx_info { struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; u32 rx_id; + struct work_struct rx_cleanup_work; } ____cacheline_aligned; /* Unmap queues for Tx / Rx cleanup */ @@ -318,7 +320,7 @@ struct bnad { /* Burnt in MAC address */ mac_t perm_addr; - struct tasklet_struct tx_free_tasklet; + struct workqueue_struct *work_q; /* Statistics */ struct bnad_stats stats; @@ -328,6 +330,7 @@ struct bnad { char adapter_name[BNAD_NAME_LEN]; char port_name[BNAD_NAME_LEN]; char mbox_irq_name[BNAD_NAME_LEN]; + char wq_name[BNAD_NAME_LEN]; /* debugfs specific data */ char *regdata; @@ -370,8 +373,8 @@ extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id); +extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); +extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); /* Timer start/stop protos */ extern void bnad_dim_timer_start(struct bnad *bnad); diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index ab753d7334a..40e1e84f498 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -464,7 +464,7 @@ bnad_set_ringparam(struct net_device *netdev, for (i = 0; i < bnad->num_rx; i++) { if (!bnad->rx_info[i].rx) continue; - bnad_cleanup_rx(bnad, i); + bnad_destroy_rx(bnad, i); current_err = bnad_setup_rx(bnad, i); if (current_err && !err) err = current_err; @@ -492,7 +492,7 @@ bnad_set_ringparam(struct net_device *netdev, for (i = 0; i < bnad->num_tx; i++) { if (!bnad->tx_info[i].tx) continue; - bnad_cleanup_tx(bnad, i); + bnad_destroy_tx(bnad, i); current_err = bnad_setup_tx(bnad, i); if (current_err && !err) err = current_err; @@ -539,7 +539,7 @@ bnad_set_pauseparam(struct net_device *netdev, } static void -bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) +bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) { struct bnad *bnad = netdev_priv(netdev); int i, j, q_num; diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 906117016fc..77884191a8c 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -30,6 +30,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gfp.h> +#include <linux/phy.h> #include <asm/io.h> #include <asm/uaccess.h> @@ -51,21 +52,17 @@ /* * Read from a EMAC register. */ -static inline unsigned long at91_emac_read(unsigned int reg) +static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg) { - void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; - - return __raw_readl(emac_base + reg); + return __raw_readl(lp->emac_base + reg); } /* * Write to a EMAC register. */ -static inline void at91_emac_write(unsigned int reg, unsigned long value) +static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value) { - void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; - - __raw_writel(value, emac_base + reg); + __raw_writel(value, lp->emac_base + reg); } /* ........................... PHY INTERFACE ........................... */ @@ -75,32 +72,33 @@ static inline void at91_emac_write(unsigned int reg, unsigned long value) * When not called from an interrupt-handler, access to the PHY must be * protected by a spinlock. */ -static void enable_mdi(void) +static void enable_mdi(struct at91_private *lp) { unsigned long ctl; - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ } /* * Disable the MDIO bit in the MAC control register */ -static void disable_mdi(void) +static void disable_mdi(struct at91_private *lp) { unsigned long ctl; - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ } /* * Wait until the PHY operation is complete. */ -static inline void at91_phy_wait(void) { +static inline void at91_phy_wait(struct at91_private *lp) +{ unsigned long timeout = jiffies + 2; - while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { + while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { if (time_after(jiffies, timeout)) { printk("at91_ether: MIO timeout\n"); break; @@ -113,28 +111,28 @@ static inline void at91_phy_wait(void) { * Write value to the a PHY register * Note: MDI interface is assumed to already have been enabled. */ -static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value) +static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value) { - at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W + at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA)); /* Wait until IDLE bit in Network Status register is cleared */ - at91_phy_wait(); + at91_phy_wait(lp); } /* * Read value stored in a PHY register. * Note: MDI interface is assumed to already have been enabled. */ -static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value) +static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value) { - at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R + at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R | ((phy_addr & 0x1f) << 23) | (address << 18)); /* Wait until IDLE bit in Network Status register is cleared */ - at91_phy_wait(); + at91_phy_wait(lp); - *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA; + *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA; } /* ........................... PHY MANAGEMENT .......................... */ @@ -158,13 +156,13 @@ static void update_linkspeed(struct net_device *dev, int silent) } /* Link up, or auto-negotiation still in progress */ - read_phy(lp->phy_address, MII_BMSR, &bmsr); - read_phy(lp->phy_address, MII_BMCR, &bmcr); + read_phy(lp, lp->phy_address, MII_BMSR, &bmsr); + read_phy(lp, lp->phy_address, MII_BMCR, &bmcr); if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */ if (!(bmsr & BMSR_ANEGCOMPLETE)) return; /* Do nothing - another interrupt generated when negotiation complete */ - read_phy(lp->phy_address, MII_LPA, &lpa); + read_phy(lp, lp->phy_address, MII_LPA, &lpa); if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; else speed = SPEED_10; if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; @@ -175,7 +173,7 @@ static void update_linkspeed(struct net_device *dev, int silent) } /* Update the MAC */ - mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); + mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); if (speed == SPEED_100) { if (duplex == DUPLEX_FULL) /* 100 Full Duplex */ mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD; @@ -186,7 +184,7 @@ static void update_linkspeed(struct net_device *dev, int silent) mac_cfg |= AT91_EMAC_FD; else {} /* 10 Half Duplex */ } - at91_emac_write(AT91_EMAC_CFG, mac_cfg); + at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg); if (!silent) printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); @@ -207,34 +205,34 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id) * level-triggering. We therefore have to check if the PHY actually has * an IRQ pending. */ - enable_mdi(); + enable_mdi(lp); if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { - read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ + read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ if (!(phy & (1 << 0))) goto done; } else if (lp->phy_type == MII_LXT971A_ID) { - read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ + read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ if (!(phy & (1 << 2))) goto done; } else if (lp->phy_type == MII_BCM5221_ID) { - read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ + read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ if (!(phy & (1 << 0))) goto done; } else if (lp->phy_type == MII_KS8721_ID) { - read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ + read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ if (!(phy & ((1 << 2) | 1))) goto done; } - else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ - read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy); + else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ + read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy); if (!(phy & ((1 << 2) | 1))) goto done; } else if (lp->phy_type == MII_DP83848_ID) { - read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ + read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ if (!(phy & (1 << 7))) goto done; } @@ -242,7 +240,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id) update_linkspeed(dev, 0); done: - disable_mdi(); + disable_mdi(lp); return IRQ_HANDLED; } @@ -265,7 +263,7 @@ static void enable_phyirq(struct net_device *dev) return; } - irq_number = lp->board_data.phy_irq_pin; + irq_number = gpio_to_irq(lp->board_data.phy_irq_pin); status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev); if (status) { printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status); @@ -273,41 +271,41 @@ static void enable_phyirq(struct net_device *dev) } spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ - read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr); dsintr = dsintr & ~0xf00; /* clear bits 8..11 */ - write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr); } else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ - read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr); dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */ - write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr); } else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ dsintr = (1 << 15) | ( 1 << 14); - write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr); } else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ dsintr = (1 << 10) | ( 1 << 8); - write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr); } else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ - read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr); dsintr = dsintr | 0x500; /* set bits 8, 10 */ - write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr); } else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ - read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr); dsintr = dsintr | 0x3c; /* set bits 2..5 */ - write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); - read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr); + read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr); dsintr = dsintr | 0x3; /* set bits 0,1 */ - write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr); } - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); } @@ -326,46 +324,46 @@ static void disable_phyirq(struct net_device *dev) } spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ - read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr); dsintr = dsintr | 0xf00; /* set bits 8..11 */ - write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr); } else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ - read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr); dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */ - write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); + write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr); } else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ - read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr); dsintr = ~(1 << 14); - write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr); } else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ - read_phy(lp->phy_address, MII_TPISTATUS, &dsintr); + read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr); dsintr = ~((1 << 10) | (1 << 8)); - write_phy(lp->phy_address, MII_TPISTATUS, dsintr); + write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr); } else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ - read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr); dsintr = dsintr & ~0x500; /* clear bits 8, 10 */ - write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); + write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr); } else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ - read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); + read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr); dsintr = dsintr & ~0x3; /* clear bits 0, 1 */ - write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); - read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); + write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr); + read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr); dsintr = dsintr & ~0x3c; /* clear bits 2..5 */ - write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); + write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr); } - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); - irq_number = lp->board_data.phy_irq_pin; + irq_number = gpio_to_irq(lp->board_data.phy_irq_pin); free_irq(irq_number, dev); /* Free interrupt handler */ } @@ -379,17 +377,17 @@ static void reset_phy(struct net_device *dev) unsigned int bmcr; spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); /* Perform PHY reset */ - write_phy(lp->phy_address, MII_BMCR, BMCR_RESET); + write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET); /* Wait until PHY reset is complete */ do { - read_phy(lp->phy_address, MII_BMCR, &bmcr); + read_phy(lp, lp->phy_address, MII_BMCR, &bmcr); } while (!(bmcr & BMCR_RESET)); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); } #endif @@ -399,13 +397,37 @@ static void at91ether_check_link(unsigned long dev_id) struct net_device *dev = (struct net_device *) dev_id; struct at91_private *lp = netdev_priv(dev); - enable_mdi(); + enable_mdi(lp); update_linkspeed(dev, 1); - disable_mdi(); + disable_mdi(lp); mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); } +/* + * Perform any PHY-specific initialization. + */ +static void __init initialize_phy(struct at91_private *lp) +{ + unsigned int val; + + spin_lock_irq(&lp->lock); + enable_mdi(lp); + + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { + read_phy(lp, lp->phy_address, MII_DSCR_REG, &val); + if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ + lp->phy_media = PORT_FIBRE; + } else if (machine_is_csb337()) { + /* mix link activity status into LED2 link state */ + write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22); + } else if (machine_is_ecbat91()) + write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A); + + disable_mdi(lp); + spin_unlock_irq(&lp->lock); +} + /* ......................... ADDRESS MANAGEMENT ........................ */ /* @@ -454,17 +476,19 @@ static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, */ static void __init get_mac_address(struct net_device *dev) { + struct at91_private *lp = netdev_priv(dev); + /* Check Specific-Address 1 */ - if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L))) + if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L))) return; /* Check Specific-Address 2 */ - if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L))) + if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L))) return; /* Check Specific-Address 3 */ - if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L))) + if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L))) return; /* Check Specific-Address 4 */ - if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L))) + if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L))) return; printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n"); @@ -475,11 +499,13 @@ static void __init get_mac_address(struct net_device *dev) */ static void update_mac_address(struct net_device *dev) { - at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); - at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4])); + struct at91_private *lp = netdev_priv(dev); - at91_emac_write(AT91_EMAC_SA2L, 0); - at91_emac_write(AT91_EMAC_SA2H, 0); + at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); + at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4])); + + at91_emac_write(lp, AT91_EMAC_SA2L, 0); + at91_emac_write(lp, AT91_EMAC_SA2H, 0); } /* @@ -559,6 +585,7 @@ static int hash_get_index(__u8 *addr) */ static void at91ether_sethashtable(struct net_device *dev) { + struct at91_private *lp = netdev_priv(dev); struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; @@ -570,8 +597,8 @@ static void at91ether_sethashtable(struct net_device *dev) mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } - at91_emac_write(AT91_EMAC_HSL, mc_filter[0]); - at91_emac_write(AT91_EMAC_HSH, mc_filter[1]); + at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]); + at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]); } /* @@ -579,9 +606,10 @@ static void at91ether_sethashtable(struct net_device *dev) */ static void at91ether_set_multicast_list(struct net_device *dev) { + struct at91_private *lp = netdev_priv(dev); unsigned long cfg; - cfg = at91_emac_read(AT91_EMAC_CFG); + cfg = at91_emac_read(lp, AT91_EMAC_CFG); if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ cfg |= AT91_EMAC_CAF; @@ -589,34 +617,37 @@ static void at91ether_set_multicast_list(struct net_device *dev) cfg &= ~AT91_EMAC_CAF; if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ - at91_emac_write(AT91_EMAC_HSH, -1); - at91_emac_write(AT91_EMAC_HSL, -1); + at91_emac_write(lp, AT91_EMAC_HSH, -1); + at91_emac_write(lp, AT91_EMAC_HSL, -1); cfg |= AT91_EMAC_MTI; } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ at91ether_sethashtable(dev); cfg |= AT91_EMAC_MTI; } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ - at91_emac_write(AT91_EMAC_HSH, 0); - at91_emac_write(AT91_EMAC_HSL, 0); + at91_emac_write(lp, AT91_EMAC_HSH, 0); + at91_emac_write(lp, AT91_EMAC_HSL, 0); cfg &= ~AT91_EMAC_MTI; } - at91_emac_write(AT91_EMAC_CFG, cfg); + at91_emac_write(lp, AT91_EMAC_CFG, cfg); } /* ......................... ETHTOOL SUPPORT ........................... */ static int mdio_read(struct net_device *dev, int phy_id, int location) { + struct at91_private *lp = netdev_priv(dev); unsigned int value; - read_phy(phy_id, location, &value); + read_phy(lp, phy_id, location, &value); return value; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { - write_phy(phy_id, location, value); + struct at91_private *lp = netdev_priv(dev); + + write_phy(lp, phy_id, location, value); } static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -625,11 +656,11 @@ static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cm int ret; spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); ret = mii_ethtool_gset(&lp->mii, cmd); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ @@ -646,11 +677,11 @@ static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cm int ret; spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); ret = mii_ethtool_sset(&lp->mii, cmd); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); return ret; @@ -662,11 +693,11 @@ static int at91ether_nwayreset(struct net_device *dev) int ret; spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); ret = mii_nway_restart(&lp->mii); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); return ret; @@ -696,9 +727,9 @@ static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); return res; @@ -731,11 +762,11 @@ static void at91ether_start(struct net_device *dev) lp->rxBuffIndex = 0; /* Program address of descriptor list in Rx Buffer Queue register */ - at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys); + at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys); /* Enable Receive and Transmit */ - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); } /* @@ -752,8 +783,8 @@ static int at91ether_open(struct net_device *dev) clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */ /* Clear internal statistics */ - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); /* Update the MAC address (incase user has changed it) */ update_mac_address(dev); @@ -762,15 +793,15 @@ static int at91ether_open(struct net_device *dev) enable_phyirq(dev); /* Enable MAC interrupts */ - at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA + at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM | AT91_EMAC_ROVR | AT91_EMAC_ABT); /* Determine current link speed */ spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); update_linkspeed(dev, 0); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); at91ether_start(dev); @@ -787,14 +818,14 @@ static int at91ether_close(struct net_device *dev) unsigned long ctl; /* Disable Receiver and Transmitter */ - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); /* Disable PHY interrupt */ disable_phyirq(dev); /* Disable MAC interrupts */ - at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA + at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM | AT91_EMAC_ROVR | AT91_EMAC_ABT); @@ -812,7 +843,7 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_private *lp = netdev_priv(dev); - if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { + if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { netif_stop_queue(dev); /* Store packet information (to free when Tx completed) */ @@ -822,9 +853,9 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; /* Set address of the data in the Transmit Address register */ - at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); + at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr); /* Set length of the packet in the Transmit Control register */ - at91_emac_write(AT91_EMAC_TCR, skb->len); + at91_emac_write(lp, AT91_EMAC_TCR, skb->len); } else { printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); @@ -841,31 +872,32 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) */ static struct net_device_stats *at91ether_stats(struct net_device *dev) { + struct at91_private *lp = netdev_priv(dev); int ale, lenerr, seqe, lcol, ecol; if (netif_running(dev)) { - dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ - ale = at91_emac_read(AT91_EMAC_ALE); + dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */ + ale = at91_emac_read(lp, AT91_EMAC_ALE); dev->stats.rx_frame_errors += ale; /* Alignment errors */ - lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); + lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF); dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ - seqe = at91_emac_read(AT91_EMAC_SEQE); + seqe = at91_emac_read(lp, AT91_EMAC_SEQE); dev->stats.rx_crc_errors += seqe; /* CRC error */ - dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ + dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */ dev->stats.rx_errors += (ale + lenerr + seqe - + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); + + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB)); - dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ - dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ - dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ - dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ + dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */ + dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */ + dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */ + dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */ - lcol = at91_emac_read(AT91_EMAC_LCOL); - ecol = at91_emac_read(AT91_EMAC_ECOL); + lcol = at91_emac_read(lp, AT91_EMAC_LCOL); + ecol = at91_emac_read(lp, AT91_EMAC_ECOL); dev->stats.tx_window_errors += lcol; /* Late collisions */ dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ - dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); + dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol); } return &dev->stats; } @@ -922,7 +954,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) /* MAC Interrupt Status register indicates what interrupts are pending. It is automatically cleared once read. */ - intstatus = at91_emac_read(AT91_EMAC_ISR); + intstatus = at91_emac_read(lp, AT91_EMAC_ISR); if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ at91ether_rx(dev); @@ -942,9 +974,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) /* Work-around for Errata #11 */ if (intstatus & AT91_EMAC_RBNA) { - ctl = at91_emac_read(AT91_EMAC_CTL); - at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); - at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE); + ctl = at91_emac_read(lp, AT91_EMAC_CTL); + at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); + at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE); } if (intstatus & AT91_EMAC_ROVR) @@ -980,189 +1012,199 @@ static const struct net_device_ops at91ether_netdev_ops = { }; /* - * Initialize the ethernet interface + * Detect the PHY type, and its address. */ -static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, - struct platform_device *pdev, struct clk *ether_clk) +static int __init at91ether_phy_detect(struct at91_private *lp) +{ + unsigned int phyid1, phyid2; + unsigned long phy_id; + unsigned short phy_address = 0; + + while (phy_address < PHY_MAX_ADDR) { + /* Read the PHY ID registers */ + enable_mdi(lp); + read_phy(lp, phy_address, MII_PHYSID1, &phyid1); + read_phy(lp, phy_address, MII_PHYSID2, &phyid2); + disable_mdi(lp); + + phy_id = (phyid1 << 16) | (phyid2 & 0xfff0); + switch (phy_id) { + case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */ + case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */ + case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */ + case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */ + case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */ + case MII_DP83847_ID: /* National Semiconductor DP83847: */ + case MII_DP83848_ID: /* National Semiconductor DP83848: */ + case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */ + case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */ + case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */ + case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */ + /* store detected values */ + lp->phy_type = phy_id; /* Type of PHY connected */ + lp->phy_address = phy_address; /* MDI address of PHY */ + return 1; + } + + phy_address++; + } + + return 0; /* not detected */ +} + + +/* + * Detect MAC & PHY and perform ethernet interface initialization + */ +static int __init at91ether_probe(struct platform_device *pdev) { struct macb_platform_data *board_data = pdev->dev.platform_data; + struct resource *regs; struct net_device *dev; struct at91_private *lp; - unsigned int val; int res; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENOENT; + dev = alloc_etherdev(sizeof(struct at91_private)); if (!dev) return -ENOMEM; - dev->base_addr = AT91_VA_BASE_EMAC; - dev->irq = AT91RM9200_ID_EMAC; + lp = netdev_priv(dev); + lp->board_data = *board_data; + spin_lock_init(&lp->lock); + + dev->base_addr = regs->start; /* physical base address */ + lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1); + if (!lp->emac_base) { + res = -ENOMEM; + goto err_free_dev; + } + + /* Clock */ + lp->ether_clk = clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(lp->ether_clk)) { + res = -ENODEV; + goto err_ioumap; + } + clk_enable(lp->ether_clk); /* Install the interrupt handler */ + dev->irq = platform_get_irq(pdev, 0); if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { - free_netdev(dev); - return -EBUSY; + res = -EBUSY; + goto err_disable_clock; } /* Allocate memory for DMA Receive descriptors */ - lp = netdev_priv(dev); lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL); if (lp->dlist == NULL) { - free_irq(dev->irq, dev); - free_netdev(dev); - return -ENOMEM; + res = -ENOMEM; + goto err_free_irq; } - lp->board_data = *board_data; - lp->ether_clk = ether_clk; - platform_set_drvdata(pdev, dev); - - spin_lock_init(&lp->lock); ether_setup(dev); dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &at91ether_ethtool_ops; - + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ update_mac_address(dev); /* Program ethernet address into MAC */ - at91_emac_write(AT91_EMAC_CTL, 0); + at91_emac_write(lp, AT91_EMAC_CTL, 0); - if (lp->board_data.is_rmii) - at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); + if (board_data->is_rmii) + at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); else - at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); + at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); - /* Perform PHY-specific initialization */ - spin_lock_irq(&lp->lock); - enable_mdi(); - if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { - read_phy(phy_address, MII_DSCR_REG, &val); - if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ - lp->phy_media = PORT_FIBRE; - } else if (machine_is_csb337()) { - /* mix link activity status into LED2 link state */ - write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22); - } else if (machine_is_ecbat91()) - write_phy(phy_address, MII_LEDCTRL_REG, 0x156A); + /* Detect PHY */ + if (!at91ether_phy_detect(lp)) { + printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n"); + res = -ENODEV; + goto err_free_dmamem; + } - disable_mdi(); - spin_unlock_irq(&lp->lock); + initialize_phy(lp); lp->mii.dev = dev; /* Support for ethtool */ lp->mii.mdio_read = mdio_read; lp->mii.mdio_write = mdio_write; - lp->mii.phy_id = phy_address; + lp->mii.phy_id = lp->phy_address; lp->mii.phy_id_mask = 0x1f; lp->mii.reg_num_mask = 0x1f; - lp->phy_type = phy_type; /* Type of PHY connected */ - lp->phy_address = phy_address; /* MDI address of PHY */ - /* Register the network interface */ res = register_netdev(dev); - if (res) { - free_irq(dev->irq, dev); - free_netdev(dev); - dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); - return res; - } + if (res) + goto err_free_dmamem; /* Determine current link speed */ spin_lock_irq(&lp->lock); - enable_mdi(); + enable_mdi(lp); update_linkspeed(dev, 0); - disable_mdi(); + disable_mdi(lp); spin_unlock_irq(&lp->lock); netif_carrier_off(dev); /* will be enabled in open() */ /* If board has no PHY IRQ, use a timer to poll the PHY */ - if (!gpio_is_valid(lp->board_data.phy_irq_pin)) { + if (gpio_is_valid(lp->board_data.phy_irq_pin)) { + gpio_request(board_data->phy_irq_pin, "ethernet_phy"); + } else { + /* If board has no PHY IRQ, use a timer to poll the PHY */ init_timer(&lp->check_timer); lp->check_timer.data = (unsigned long)dev; lp->check_timer.function = at91ether_check_link; - } else if (lp->board_data.phy_irq_pin >= 32) - gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy"); + } /* Display ethernet banner */ printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", dev->name, (uint) dev->base_addr, dev->irq, - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", + at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", + at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", dev->dev_addr); - if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) + if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); - else if (phy_type == MII_LXT971A_ID) + else if (lp->phy_type == MII_LXT971A_ID) printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name); - else if (phy_type == MII_RTL8201_ID) + else if (lp->phy_type == MII_RTL8201_ID) printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name); - else if (phy_type == MII_BCM5221_ID) + else if (lp->phy_type == MII_BCM5221_ID) printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name); - else if (phy_type == MII_DP83847_ID) + else if (lp->phy_type == MII_DP83847_ID) printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name); - else if (phy_type == MII_DP83848_ID) + else if (lp->phy_type == MII_DP83848_ID) printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name); - else if (phy_type == MII_AC101L_ID) + else if (lp->phy_type == MII_AC101L_ID) printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name); - else if (phy_type == MII_KS8721_ID) + else if (lp->phy_type == MII_KS8721_ID) printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name); - else if (phy_type == MII_T78Q21x3_ID) + else if (lp->phy_type == MII_T78Q21x3_ID) printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name); - else if (phy_type == MII_LAN83C185_ID) + else if (lp->phy_type == MII_LAN83C185_ID) printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name); - return 0; -} - -/* - * Detect MAC and PHY and perform initialization - */ -static int __init at91ether_probe(struct platform_device *pdev) -{ - unsigned int phyid1, phyid2; - int detected = -1; - unsigned long phy_id; - unsigned short phy_address = 0; - struct clk *ether_clk; - - ether_clk = clk_get(&pdev->dev, "ether_clk"); - if (IS_ERR(ether_clk)) { - printk(KERN_ERR "at91_ether: no clock defined\n"); - return -ENODEV; - } - clk_enable(ether_clk); /* Enable Peripheral clock */ - - while ((detected != 0) && (phy_address < 32)) { - /* Read the PHY ID registers */ - enable_mdi(); - read_phy(phy_address, MII_PHYSID1, &phyid1); - read_phy(phy_address, MII_PHYSID2, &phyid2); - disable_mdi(); - - phy_id = (phyid1 << 16) | (phyid2 & 0xfff0); - switch (phy_id) { - case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */ - case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */ - case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */ - case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */ - case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */ - case MII_DP83847_ID: /* National Semiconductor DP83847: */ - case MII_DP83848_ID: /* National Semiconductor DP83848: */ - case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */ - case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */ - case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */ - case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */ - detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk); - break; - } + clk_disable(lp->ether_clk); /* Disable Peripheral clock */ - phy_address++; - } + return 0; - clk_disable(ether_clk); /* Disable Peripheral clock */ - return detected; +err_free_dmamem: + platform_set_drvdata(pdev, NULL); + dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); +err_free_irq: + free_irq(dev->irq, dev); +err_disable_clock: + clk_disable(lp->ether_clk); + clk_put(lp->ether_clk); +err_ioumap: + iounmap(lp->emac_base); +err_free_dev: + free_netdev(dev); + return res; } static int __devexit at91ether_remove(struct platform_device *pdev) @@ -1170,8 +1212,7 @@ static int __devexit at91ether_remove(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); struct at91_private *lp = netdev_priv(dev); - if (gpio_is_valid(lp->board_data.phy_irq_pin) && - lp->board_data.phy_irq_pin >= 32) + if (gpio_is_valid(lp->board_data.phy_irq_pin)) gpio_free(lp->board_data.phy_irq_pin); unregister_netdev(dev); @@ -1193,7 +1234,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) if (netif_running(net_dev)) { if (gpio_is_valid(lp->board_data.phy_irq_pin)) { - int phy_irq = lp->board_data.phy_irq_pin; + int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin); disable_irq(phy_irq); } @@ -1217,7 +1258,7 @@ static int at91ether_resume(struct platform_device *pdev) netif_start_queue(net_dev); if (gpio_is_valid(lp->board_data.phy_irq_pin)) { - int phy_irq = lp->board_data.phy_irq_pin; + int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin); enable_irq(phy_irq); } } diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h index 3725fbb0def..0ef6328fa7f 100644 --- a/drivers/net/ethernet/cadence/at91_ether.h +++ b/drivers/net/ethernet/cadence/at91_ether.h @@ -88,6 +88,7 @@ struct at91_private struct macb_platform_data board_data; /* board-specific * configuration (shared with * macb for common data */ + void __iomem *emac_base; /* base register address */ struct clk *ether_clk; /* clock */ /* PHY */ diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c4834c23be3..1466bc4e3dd 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1213,6 +1213,7 @@ static const struct ethtool_ops macb_ethtool_ops = { .set_settings = macb_set_settings, .get_drvinfo = macb_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index b9406cbfc18..845b2020f29 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1,105 +1,27 @@ /* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 - * driver for linux. + * driver for linux. + * Written 1996 by Russell Nelson, with reference to skeleton.c + * written 1993-1994 by Donald Becker. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * The author may be reached at nelson@crynwr.com, Crynwr + * Software, 521 Pleasant Valley Rd., Potsdam, NY 13676 + * + * Other contributors: + * Mike Cruse : mcruse@cti-ltd.com + * Russ Nelson + * Melody Lee : ethernet@crystal.cirrus.com + * Alan Cox + * Andrew Morton + * Oskar Schirmer : oskar@scara.com + * Deepak Saxena : dsaxena@plexity.net + * Dmitry Pervushin : dpervushin@ru.mvista.com + * Deepak Saxena : dsaxena@plexity.net + * Domenico Andreoli : cavokz@gmail.com */ -/* - Written 1996 by Russell Nelson, with reference to skeleton.c - written 1993-1994 by Donald Becker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached at nelson@crynwr.com, Crynwr - Software, 521 Pleasant Valley Rd., Potsdam, NY 13676 - - Changelog: - - Mike Cruse : mcruse@cti-ltd.com - : Changes for Linux 2.0 compatibility. - : Added dev_id parameter in net_interrupt(), - : request_irq() and free_irq(). Just NULL for now. - - Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros - : in net_open() and net_close() so kerneld would know - : that the module is in use and wouldn't eject the - : driver prematurely. - - Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c - : as an example. Disabled autoprobing in init_module(), - : not a good thing to do to other devices while Linux - : is running from all accounts. - - Russ Nelson : Jul 13 1998. Added RxOnly DMA support. - - Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility. - : email: ethernet@crystal.cirrus.com - - Alan Cox : Removed 1.2 support, added 2.1 extra counters. - - Andrew Morton : Kernel 2.3.48 - : Handle kmalloc() failures - : Other resource allocation fixes - : Add SMP locks - : Integrate Russ Nelson's ALLOW_DMA functionality back in. - : If ALLOW_DMA is true, make DMA runtime selectable - : Folded in changes from Cirrus (Melody Lee - : <klee@crystal.cirrus.com>) - : Don't call netif_wake_queue() in net_send_packet() - : Fixed an out-of-mem bug in dma_rx() - : Updated Documentation/networking/cs89x0.txt - - Andrew Morton : Kernel 2.3.99-pre1 - : Use skb_reserve to longword align IP header (two places) - : Remove a delay loop from dma_rx() - : Replace '100' with HZ - : Clean up a couple of skb API abuses - : Added 'cs89x0_dma=N' kernel boot option - : Correctly initialise lp->lock in non-module compile - - Andrew Morton : Kernel 2.3.99-pre4-1 - : MOD_INC/DEC race fix (see - : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html) - - Andrew Morton : Kernel 2.4.0-test7-pre2 - : Enhanced EEPROM support to cover more devices, - : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch - : (Jason Gunthorpe <jgg@ualberta.ca>) - - Andrew Morton : Kernel 2.4.0-test11-pre4 - : Use dev->name in request_*() (Andrey Panin) - : Fix an error-path memleak in init_module() - : Preserve return value from request_irq() - : Fix type of `media' module parm (Keith Owens) - : Use SET_MODULE_OWNER() - : Tidied up strange request_irq() abuse in net_open(). - - Andrew Morton : Kernel 2.4.3-pre1 - : Request correct number of pages for DMA (Hugh Dickens) - : Select PP_ChipID _after_ unregister_netdev in cleanup_module() - : because unregister_netdev() calls get_stats. - : Make `version[]' __initdata - : Uninlined the read/write reg/word functions. - - Oskar Schirmer : oskar@scara.com - : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=) - - Deepak Saxena : dsaxena@plexity.net - : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support - - Dmitry Pervushin : dpervushin@ru.mvista.com - : PNX010X platform support - - Deepak Saxena : dsaxena@plexity.net - : Intel IXDP2351 platform support - - Dmitry Pervushin : dpervushin@ru.mvista.com - : PNX010X platform support - - Domenico Andreoli : cavokz@gmail.com - : QQ2440 platform support - -*/ - /* * Set this to zero to disable DMA code @@ -119,14 +41,12 @@ */ #define DEBUGGING 1 -/* - Sources: - - Crynwr packet driver epktisa. - - Crystal Semiconductor data sheets. +/* Sources: + * Crynwr packet driver epktisa. + * Crystal Semiconductor data sheets. + */ -*/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/printk.h> @@ -147,8 +67,8 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gfp.h> +#include <linux/io.h> -#include <asm/io.h> #include <asm/irq.h> #include <linux/atomic.h> #if ALLOW_DMA @@ -157,35 +77,55 @@ #include "cs89x0.h" +#define cs89_dbg(val, level, fmt, ...) \ +do { \ + if (val <= net_debug) \ + pr_##level(fmt, ##__VA_ARGS__); \ +} while (0) + static char version[] __initdata = -"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n"; + "v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton"; #define DRV_NAME "cs89x0" /* First, a few definitions that the brave might change. - A zero-terminated list of I/O addresses to be probed. Some special flags.. - Addr & 1 = Read back the address port, look for signature and reset - the page window before probing - Addr & 3 = Reset the page window and probe - The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space, - but it is possible that a Cirrus board could be plugged into the ISA - slots. */ + * A zero-terminated list of I/O addresses to be probed. Some special flags.. + * Addr & 1 = Read back the address port, look for signature and reset + * the page window before probing + * Addr & 3 = Reset the page window and probe + * The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space, + * but it is possible that a Cirrus board could be plugged into the ISA + * slots. + */ /* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps - them to system IRQ numbers. This mapping is card specific and is set to - the configuration of the Cirrus Eval board for this chip. */ + * them to system IRQ numbers. This mapping is card specific and is set to + * the configuration of the Cirrus Eval board for this chip. + */ #if defined(CONFIG_MACH_IXDP2351) #define CS89x0_NONISA_IRQ -static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; -static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; +static unsigned int netcard_portlist[] __used __initdata = { + IXDP2351_VIRT_CS8900_BASE, 0 +}; +static unsigned int cs8900_irq_map[] = { + IRQ_IXDP2351_CS8900, 0, 0, 0 +}; #elif defined(CONFIG_ARCH_IXDP2X01) #define CS89x0_NONISA_IRQ -static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; -static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; +static unsigned int netcard_portlist[] __used __initdata = { + IXDP2X01_CS8900_VIRT_BASE, 0 +}; +static unsigned int cs8900_irq_map[] = { + IRQ_IXDP2X01_CS8900, 0, 0, 0 +}; #else #ifndef CONFIG_CS89x0_PLATFORM -static unsigned int netcard_portlist[] __used __initdata = - { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; -static unsigned int cs8900_irq_map[] = {10,11,12,5}; +static unsigned int netcard_portlist[] __used __initdata = { + 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, + 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0 +}; +static unsigned int cs8900_irq_map[] = { + 10, 11, 12, 5 +}; #endif #endif @@ -222,6 +162,8 @@ struct net_local { int send_underrun; /* keep track of how many underruns in a row we get */ int force; /* force various values; see FORCE* above. */ spinlock_t lock; + void __iomem *virt_addr;/* CS89x0 virtual address. */ + unsigned long size; /* Length of CS89x0 memory region. */ #if ALLOW_DMA int use_dma; /* Flag: we're using dma */ int dma; /* DMA channel */ @@ -230,119 +172,42 @@ struct net_local { unsigned char *end_dma_buff; /* points to the end of the buffer */ unsigned char *rx_dma_ptr; /* points to the next packet */ #endif -#ifdef CONFIG_CS89x0_PLATFORM - void __iomem *virt_addr;/* Virtual address for accessing the CS89x0. */ - unsigned long phys_addr;/* Physical address for accessing the CS89x0. */ - unsigned long size; /* Length of CS89x0 memory region. */ -#endif }; -/* Index to functions, as function prototypes. */ - -static int cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular); -static int net_open(struct net_device *dev); -static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t net_interrupt(int irq, void *dev_id); -static void set_multicast_list(struct net_device *dev); -static void net_timeout(struct net_device *dev); -static void net_rx(struct net_device *dev); -static int net_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); -static void reset_chip(struct net_device *dev); -static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer); -static int get_eeprom_cksum(int off, int len, int *buffer); -static int set_mac_address(struct net_device *dev, void *addr); -static void count_rx_errors(int status, struct net_device *dev); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void net_poll_controller(struct net_device *dev); -#endif -#if ALLOW_DMA -static void get_dma_channel(struct net_device *dev); -static void release_dma_buff(struct net_local *lp); -#endif - /* Example routines you must write ;->. */ #define tx_done(dev) 1 /* * Permit 'cs89x0_dma=N' in the kernel boot environment */ -#if !defined(MODULE) && (ALLOW_DMA != 0) +#if !defined(MODULE) +#if ALLOW_DMA static int g_cs89x0_dma; static int __init dma_fn(char *str) { - g_cs89x0_dma = simple_strtol(str,NULL,0); + g_cs89x0_dma = simple_strtol(str, NULL, 0); return 1; } __setup("cs89x0_dma=", dma_fn); -#endif /* !defined(MODULE) && (ALLOW_DMA != 0) */ +#endif /* ALLOW_DMA */ -#ifndef MODULE static int g_cs89x0_media__force; static int __init media_fn(char *str) { - if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45; - else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI; - else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC; + if (!strcmp(str, "rj45")) + g_cs89x0_media__force = FORCE_RJ45; + else if (!strcmp(str, "aui")) + g_cs89x0_media__force = FORCE_AUI; + else if (!strcmp(str, "bnc")) + g_cs89x0_media__force = FORCE_BNC; + return 1; } __setup("cs89x0_media=", media_fn); - - -#ifndef CONFIG_CS89x0_PLATFORM -/* Check for a network adaptor of this type, and return '0' iff one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, allocate space for the device and return success - (detachable devices only). - Return 0 on success. - */ - -struct net_device * __init cs89x0_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - unsigned *port; - int err = 0; - int irq; - int io; - - if (!dev) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - - if (net_debug) - printk("cs89x0:cs89x0_probe(0x%x)\n", io); - - if (io > 0x1ff) { /* Check a single specified location. */ - err = cs89x0_probe1(dev, io, 0); - } else if (io != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = netcard_portlist; *port; port++) { - if (cs89x0_probe1(dev, *port, 0) == 0) - break; - dev->irq = irq; - } - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - return dev; -out: - free_netdev(dev); - printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); - return ERR_PTR(err); -} -#endif #endif #if defined(CONFIG_MACH_IXDP2351) @@ -369,36 +234,22 @@ writeword(unsigned long base_addr, int portno, u16 value) { __raw_writel(value, base_addr + (portno << 1)); } -#else -static u16 -readword(unsigned long base_addr, int portno) -{ - return inw(base_addr + portno); -} - -static void -writeword(unsigned long base_addr, int portno, u16 value) -{ - outw(value, base_addr + portno); -} #endif -static void -readwords(unsigned long base_addr, int portno, void *buf, int length) +static void readwords(struct net_local *lp, int portno, void *buf, int length) { u8 *buf8 = (u8 *)buf; do { u16 tmp16; - tmp16 = readword(base_addr, portno); + tmp16 = ioread16(lp->virt_addr + portno); *buf8++ = (u8)tmp16; *buf8++ = (u8)(tmp16 >> 8); } while (--length); } -static void -writewords(unsigned long base_addr, int portno, void *buf, int length) +static void writewords(struct net_local *lp, int portno, void *buf, int length) { u8 *buf8 = (u8 *)buf; @@ -407,32 +258,37 @@ writewords(unsigned long base_addr, int portno, void *buf, int length) tmp16 = *buf8++; tmp16 |= (*buf8++) << 8; - writeword(base_addr, portno, tmp16); + iowrite16(tmp16, lp->virt_addr + portno); } while (--length); } static u16 readreg(struct net_device *dev, u16 regno) { - writeword(dev->base_addr, ADD_PORT, regno); - return readword(dev->base_addr, DATA_PORT); + struct net_local *lp = netdev_priv(dev); + + iowrite16(regno, lp->virt_addr + ADD_PORT); + return ioread16(lp->virt_addr + DATA_PORT); } static void writereg(struct net_device *dev, u16 regno, u16 value) { - writeword(dev->base_addr, ADD_PORT, regno); - writeword(dev->base_addr, DATA_PORT, value); + struct net_local *lp = netdev_priv(dev); + + iowrite16(regno, lp->virt_addr + ADD_PORT); + iowrite16(value, lp->virt_addr + DATA_PORT); } static int __init wait_eeprom_ready(struct net_device *dev) { int timeout = jiffies; - /* check to see if the EEPROM is ready, a timeout is used - - just in case EEPROM is ready when SI_BUSY in the - PP_SelfST is clear */ - while(readreg(dev, PP_SelfST) & SI_BUSY) + /* check to see if the EEPROM is ready, + * a timeout is used just in case EEPROM is ready when + * SI_BUSY in the PP_SelfST is clear + */ + while (readreg(dev, PP_SelfST) & SI_BUSY) if (jiffies - timeout >= 40) return -1; return 0; @@ -443,17 +299,19 @@ get_eeprom_data(struct net_device *dev, int off, int len, int *buffer) { int i; - if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len); + cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len); for (i = 0; i < len; i++) { - if (wait_eeprom_ready(dev) < 0) return -1; + if (wait_eeprom_ready(dev) < 0) + return -1; /* Now send the EEPROM read command and EEPROM location to read */ writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD); - if (wait_eeprom_ready(dev) < 0) return -1; + if (wait_eeprom_ready(dev) < 0) + return -1; buffer[i] = readreg(dev, PP_EEData); - if (net_debug > 3) printk("%04x ", buffer[i]); + cs89_dbg(3, cont, " %04x", buffer[i]); } - if (net_debug > 3) printk("\n"); - return 0; + cs89_dbg(3, cont, "\n"); + return 0; } static int __init @@ -470,341 +328,52 @@ get_eeprom_cksum(int off, int len, int *buffer) return -1; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void net_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - net_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -static const struct net_device_ops net_ops = { - .ndo_open = net_open, - .ndo_stop = net_close, - .ndo_tx_timeout = net_timeout, - .ndo_start_xmit = net_send_packet, - .ndo_get_stats = net_get_stats, - .ndo_set_rx_mode = set_multicast_list, - .ndo_set_mac_address = set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = net_poll_controller, -#endif - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, -}; - -/* This is the real probe routine. Linux has a history of friendly device - probes on the ISA bus. A good device probes avoids doing writes, and - verifies that the correct device exists and functions. - Return 0 on success. - */ - -static int __init -cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular) +static void +write_irq(struct net_device *dev, int chip_type, int irq) { - struct net_local *lp = netdev_priv(dev); - static unsigned version_printed; int i; - int tmp; - unsigned rev_type = 0; - int eeprom_buff[CHKSUM_LEN]; - int retval; - - /* Initialize the device structure. */ - if (!modular) { - memset(lp, 0, sizeof(*lp)); - spin_lock_init(&lp->lock); -#ifndef MODULE -#if ALLOW_DMA - if (g_cs89x0_dma) { - lp->use_dma = 1; - lp->dma = g_cs89x0_dma; - lp->dmasize = 16; /* Could make this an option... */ - } -#endif - lp->force = g_cs89x0_media__force; -#endif - - } - - /* Grab the region so we can find another board if autoIRQ fails. */ - /* WTF is going on here? */ - if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) { - printk(KERN_ERR "%s: request_region(0x%lx, 0x%x) failed\n", - DRV_NAME, ioaddr, NETCARD_IO_EXTENT); - retval = -EBUSY; - goto out1; - } - - /* if they give us an odd I/O address, then do ONE write to - the address port, to get it back to address zero, where we - expect to find the EISA signature word. An IO with a base of 0x3 - will skip the test for the ADD_PORT. */ - if (ioaddr & 1) { - if (net_debug > 1) - printk(KERN_INFO "%s: odd ioaddr 0x%lx\n", dev->name, ioaddr); - if ((ioaddr & 2) != 2) - if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) { - printk(KERN_ERR "%s: bad signature 0x%x\n", - dev->name, readword(ioaddr & ~3, ADD_PORT)); - retval = -ENODEV; - goto out2; - } - } - - ioaddr &= ~3; - printk(KERN_DEBUG "PP_addr at %lx[%x]: 0x%x\n", - ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT)); - writeword(ioaddr, ADD_PORT, PP_ChipID); - - tmp = readword(ioaddr, DATA_PORT); - if (tmp != CHIP_EISA_ID_SIG) { - printk(KERN_DEBUG "%s: incorrect signature at %lx[%x]: 0x%x!=" - CHIP_EISA_ID_SIG_STR "\n", - dev->name, ioaddr, DATA_PORT, tmp); - retval = -ENODEV; - goto out2; - } - - /* Fill in the 'dev' fields. */ - dev->base_addr = ioaddr; - - /* get the chip type */ - rev_type = readreg(dev, PRODUCT_ID_ADD); - lp->chip_type = rev_type &~ REVISON_BITS; - lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; - - /* Check the chip type and revision in order to set the correct send command - CS8920 revision C and CS8900 revision F can use the faster send. */ - lp->send_cmd = TX_AFTER_381; - if (lp->chip_type == CS8900 && lp->chip_revision >= 'F') - lp->send_cmd = TX_NOW; - if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') - lp->send_cmd = TX_NOW; - - if (net_debug && version_printed++ == 0) - printk(version); - - printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ", - dev->name, - lp->chip_type==CS8900?'0':'2', - lp->chip_type==CS8920M?"M":"", - lp->chip_revision, - dev->base_addr); - - reset_chip(dev); - - /* Here we read the current configuration of the chip. If there - is no Extended EEPROM then the idea is to not disturb the chip - configuration, it should have been correctly setup by automatic - EEPROM read on reset. So, if the chip says it read the EEPROM - the driver will always do *something* instead of complain that - adapter_cnf is 0. */ - - - if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == - (EEPROM_OK|EEPROM_PRESENT)) { - /* Load the MAC. */ - for (i=0; i < ETH_ALEN/2; i++) { - unsigned int Addr; - Addr = readreg(dev, PP_IA+i*2); - dev->dev_addr[i*2] = Addr & 0xFF; - dev->dev_addr[i*2+1] = Addr >> 8; - } - - /* Load the Adapter Configuration. - Note: Barring any more specific information from some - other source (ie EEPROM+Schematics), we would not know - how to operate a 10Base2 interface on the AUI port. - However, since we do read the status of HCB1 and use - settings that always result in calls to control_dc_dc(dev,0) - a BNC interface should work if the enable pin - (dc/dc converter) is on HCB1. It will be called AUI - however. */ - - lp->adapter_cnf = 0; - i = readreg(dev, PP_LineCTL); - /* Preserve the setting of the HCB1 pin. */ - if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL)) - lp->adapter_cnf |= A_CNF_DC_DC_POLARITY; - /* Save the sqelch bit */ - if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH) - lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH; - /* Check if the card is in 10Base-t only mode */ - if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0) - lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T; - /* Check if the card is in AUI only mode */ - if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY) - lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI; - /* Check if the card is in Auto mode. */ - if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET) - lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T | - A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO; - - if (net_debug > 1) - printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n", - dev->name, i, lp->adapter_cnf); - - /* IRQ. Other chips already probe, see below. */ - if (lp->chip_type == CS8900) - lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK; - - printk( "[Cirrus EEPROM] "); - } - - printk("\n"); - - /* First check to see if an EEPROM is attached. */ - - if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) - printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n"); - else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { - printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n"); - } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { - /* Check if the chip was able to read its own configuration starting - at 0 in the EEPROM*/ - if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) != - (EEPROM_OK|EEPROM_PRESENT)) - printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n"); - - } else { - /* This reads an extended EEPROM that is not documented - in the CS8900 datasheet. */ - - /* get transmission control word but keep the autonegotiation bits */ - if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2]; - /* Store adapter configuration */ - if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2]; - /* Store ISA configuration */ - lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2]; - dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8; - - /* eeprom_buff has 32-bit ints, so we can't just memcpy it */ - /* store the initial memory base address */ - for (i = 0; i < ETH_ALEN/2; i++) { - dev->dev_addr[i*2] = eeprom_buff[i]; - dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8; - } - if (net_debug > 1) - printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n", - dev->name, lp->adapter_cnf); - } - - /* allow them to force multiple transceivers. If they force multiple, autosense */ - { - int count = 0; - if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; } - if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; } - if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; } - if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; } - else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; } - else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; } - else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; } - } - - if (net_debug > 1) - printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n", - dev->name, lp->force, lp->adapter_cnf); - - /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */ - - /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */ - - /* FIXME: we don't set the Ethernet address on the command line. Use - ifconfig IFACE hw ether AABBCCDDEEFF */ - - printk(KERN_INFO "cs89x0 media %s%s%s", - (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"", - (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"", - (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":""); - - lp->irq_map = 0xffff; - /* If this is a CS8900 then no pnp soft */ - if (lp->chip_type != CS8900 && - /* Check if the ISA IRQ has been set */ - (i = readreg(dev, PP_CS8920_ISAINT) & 0xff, - (i != 0 && i < CS8920_NO_INTS))) { - if (!dev->irq) - dev->irq = i; - } else { - i = lp->isa_config & INT_NO_MASK; + if (chip_type == CS8900) { #ifndef CONFIG_CS89x0_PLATFORM - if (lp->chip_type == CS8900) { -#ifdef CS89x0_NONISA_IRQ - i = cs8900_irq_map[0]; + /* Search the mapping table for the corresponding IRQ pin. */ + for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) + if (cs8900_irq_map[i] == irq) + break; + /* Not found */ + if (i == ARRAY_SIZE(cs8900_irq_map)) + i = 3; #else - /* Translate the IRQ using the IRQ mapping table. */ - if (i >= ARRAY_SIZE(cs8900_irq_map)) - printk("\ncs89x0: invalid ISA interrupt number %d\n", i); - else - i = cs8900_irq_map[i]; - - lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */ - } else { - int irq_map_buff[IRQ_MAP_LEN/2]; - - if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA, - IRQ_MAP_LEN/2, - irq_map_buff) >= 0) { - if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT) - lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8); - } -#endif - } -#endif - if (!dev->irq) - dev->irq = i; - } - - printk(" IRQ %d", dev->irq); - -#if ALLOW_DMA - if (lp->use_dma) { - get_dma_channel(dev); - printk(", DMA %d", dev->dma); - } - else + /* INTRQ0 pin is used for interrupt generation. */ + i = 0; #endif - { - printk(", programmed I/O"); + writereg(dev, PP_CS8900_ISAINT, i); + } else { + writereg(dev, PP_CS8920_ISAINT, irq); } - - /* print the ethernet address. */ - printk(", MAC %pM", dev->dev_addr); - - dev->netdev_ops = &net_ops; - dev->watchdog_timeo = HZ; - - printk("\n"); - if (net_debug) - printk("cs89x0_probe1() successful\n"); - - retval = register_netdev(dev); - if (retval) - goto out3; - return 0; -out3: - writeword(dev->base_addr, ADD_PORT, PP_ChipID); -out2: - release_region(ioaddr & ~3, NETCARD_IO_EXTENT); -out1: - return retval; } +static void +count_rx_errors(int status, struct net_device *dev) +{ + dev->stats.rx_errors++; + if (status & RX_RUNT) + dev->stats.rx_length_errors++; + if (status & RX_EXTRA_DATA) + dev->stats.rx_length_errors++; + if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT))) + /* per str 172 */ + dev->stats.rx_crc_errors++; + if (status & RX_DRIBBLE) + dev->stats.rx_frame_errors++; +} /********************************* * This page contains DMA routines -**********************************/ + *********************************/ #if ALLOW_DMA -#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) +#define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17) static void get_dma_channel(struct net_device *dev) @@ -833,11 +402,10 @@ write_dma(struct net_device *dev, int chip_type, int dma) struct net_local *lp = netdev_priv(dev); if ((lp->isa_config & ANY_ISA_DMA) == 0) return; - if (chip_type == CS8900) { - writereg(dev, PP_CS8900_ISADMA, dma-5); - } else { + if (chip_type == CS8900) + writereg(dev, PP_CS8900_ISADMA, dma - 5); + else writereg(dev, PP_CS8920_ISADMA, dma); - } } static void @@ -847,18 +415,15 @@ set_dma_cfg(struct net_device *dev) if (lp->use_dma) { if ((lp->isa_config & ANY_ISA_DMA) == 0) { - if (net_debug > 3) - printk("set_dma_cfg(): no DMA\n"); + cs89_dbg(3, err, "set_dma_cfg(): no DMA\n"); return; } if (lp->isa_config & ISA_RxDMA) { lp->curr_rx_cfg |= RX_DMA_ONLY; - if (net_debug > 3) - printk("set_dma_cfg(): RX_DMA_ONLY\n"); + cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n"); } else { lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */ - if (net_debug > 3) - printk("set_dma_cfg(): AUTO_RX_DMA\n"); + cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n"); } } } @@ -868,7 +433,7 @@ dma_bufcfg(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (lp->use_dma) - return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0; + return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0; else return 0; } @@ -898,13 +463,13 @@ dma_rx(struct net_device *dev) int status, length; unsigned char *bp = lp->rx_dma_ptr; - status = bp[0] + (bp[1]<<8); - length = bp[2] + (bp[3]<<8); + status = bp[0] + (bp[1] << 8); + length = bp[2] + (bp[3] << 8); bp += 4; - if (net_debug > 5) { - printk( "%s: receiving DMA packet at %lx, status %x, length %x\n", - dev->name, (unsigned long)bp, status, length); - } + + cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n", + dev->name, (unsigned long)bp, status, length); + if ((status & RX_OK) == 0) { count_rx_errors(status, dev); goto skip_this_frame; @@ -913,14 +478,16 @@ dma_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { - if (net_debug) /* I don't think we want to do this to a stressed system */ - printk("%s: Memory squeeze, dropping packet.\n", dev->name); + /* I don't think we want to do this to a stressed system */ + cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n", + dev->name); dev->stats.rx_dropped++; /* AKPM: advance bp to the next frame */ skip_this_frame: bp += (length + 3) & ~3; - if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; + if (bp >= lp->end_dma_buff) + bp -= lp->dmasize * 1024; lp->rx_dma_ptr = bp; return; } @@ -928,63 +495,38 @@ skip_this_frame: if (bp + length > lp->end_dma_buff) { int semi_cnt = lp->end_dma_buff - bp; - memcpy(skb_put(skb,semi_cnt), bp, semi_cnt); - memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff, + memcpy(skb_put(skb, semi_cnt), bp, semi_cnt); + memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff, length - semi_cnt); } else { - memcpy(skb_put(skb,length), bp, length); + memcpy(skb_put(skb, length), bp, length); } bp += (length + 3) & ~3; - if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024; + if (bp >= lp->end_dma_buff) + bp -= lp->dmasize*1024; lp->rx_dma_ptr = bp; - if (net_debug > 3) { - printk( "%s: received %d byte DMA packet of type %x\n", - dev->name, length, - (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); - } - skb->protocol=eth_type_trans(skb,dev); + cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n", + dev->name, length, + ((skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1])); + + skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; } -#endif /* ALLOW_DMA */ - -static void __init reset_chip(struct net_device *dev) +static void release_dma_buff(struct net_local *lp) { -#if !defined(CONFIG_MACH_MX31ADS) -#if !defined(CS89x0_NONISA_IRQ) - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; -#endif /* CS89x0_NONISA_IRQ */ - int reset_start_time; - - writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); - - /* wait 30 ms */ - msleep(30); - -#if !defined(CS89x0_NONISA_IRQ) - if (lp->chip_type != CS8900) { - /* Hardware problem requires PNP registers to be reconfigured after a reset */ - writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT); - outb(dev->irq, ioaddr + DATA_PORT); - outb(0, ioaddr + DATA_PORT + 1); - - writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB); - outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); - outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); + if (lp->dma_buff) { + free_pages((unsigned long)(lp->dma_buff), + get_order(lp->dmasize * 1024)); + lp->dma_buff = NULL; } -#endif /* CS89x0_NONISA_IRQ */ - - /* Wait until the chip is reset */ - reset_start_time = jiffies; - while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2) - ; -#endif /* !CONFIG_MACH_MX31ADS */ } +#endif /* ALLOW_DMA */ static void control_dc_dc(struct net_device *dev, int on_not_off) @@ -993,8 +535,9 @@ control_dc_dc(struct net_device *dev, int on_not_off) unsigned int selfcontrol; int timenow = jiffies; /* control the DC to DC convertor in the SelfControl register. - Note: This is hooked up to a general purpose pin, might not - always be a DC to DC convertor. */ + * Note: This is hooked up to a general purpose pin, might not + * always be a DC to DC convertor. + */ selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */ if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off) @@ -1008,6 +551,49 @@ control_dc_dc(struct net_device *dev, int on_not_off) ; } +/* send a test packet - return true if carrier bits are ok */ +static int +send_test_pkt(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + char test_packet[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 46, /* A 46 in network order */ + 0, 0, /* DSAP=0 & SSAP=0 fields */ + 0xf3, 0 /* Control (Test Req + P bit set) */ + }; + long timenow = jiffies; + + writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); + + memcpy(test_packet, dev->dev_addr, ETH_ALEN); + memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN); + + iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT); + iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT); + + /* Test to see if the chip has allocated memory for the packet */ + while (jiffies - timenow < 5) + if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) + break; + if (jiffies - timenow >= 5) + return 0; /* this shouldn't happen */ + + /* Write the contents of the packet */ + writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1); + + cs89_dbg(1, debug, "Sending test packet "); + /* wait a couple of jiffies for packet to be received */ + for (timenow = jiffies; jiffies - timenow < 3;) + ; + if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { + cs89_dbg(1, cont, "succeeded\n"); + return 1; + } + cs89_dbg(1, cont, "failed\n"); + return 0; +} + #define DETECTED_NONE 0 #define DETECTED_RJ45H 1 #define DETECTED_RJ45F 2 @@ -1021,40 +607,46 @@ detect_tp(struct net_device *dev) int timenow = jiffies; int fdx; - if (net_debug > 1) printk("%s: Attempting TP\n", dev->name); + cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name); - /* If connected to another full duplex capable 10-Base-T card the link pulses - seem to be lost when the auto detect bit in the LineCTL is set. - To overcome this the auto detect bit will be cleared whilst testing the - 10-Base-T interface. This would not be necessary for the sparrow chip but - is simpler to do it anyway. */ - writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY); + /* If connected to another full duplex capable 10-Base-T card + * the link pulses seem to be lost when the auto detect bit in + * the LineCTL is set. To overcome this the auto detect bit will + * be cleared whilst testing the 10-Base-T interface. This would + * not be necessary for the sparrow chip but is simpler to do it + * anyway. + */ + writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY); control_dc_dc(dev, 0); - /* Delay for the hardware to work out if the TP cable is present - 150ms */ - for (timenow = jiffies; jiffies - timenow < 15; ) - ; + /* Delay for the hardware to work out if the TP cable is present + * - 150ms + */ + for (timenow = jiffies; jiffies - timenow < 15;) + ; if ((readreg(dev, PP_LineST) & LINK_OK) == 0) return DETECTED_NONE; if (lp->chip_type == CS8900) { - switch (lp->force & 0xf0) { + switch (lp->force & 0xf0) { #if 0 - case FORCE_AUTO: - printk("%s: cs8900 doesn't autonegotiate\n",dev->name); - return DETECTED_NONE; + case FORCE_AUTO: + pr_info("%s: cs8900 doesn't autonegotiate\n", + dev->name); + return DETECTED_NONE; #endif - /* CS8900 doesn't support AUTO, change to HALF*/ - case FORCE_AUTO: + /* CS8900 doesn't support AUTO, change to HALF*/ + case FORCE_AUTO: lp->force &= ~FORCE_AUTO; - lp->force |= FORCE_HALF; + lp->force |= FORCE_HALF; break; case FORCE_HALF: break; - case FORCE_FULL: - writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900); + case FORCE_FULL: + writereg(dev, PP_TestCTL, + readreg(dev, PP_TestCTL) | FDX_8900); break; - } + } fdx = readreg(dev, PP_TestCTL) & FDX_8900; } else { switch (lp->force & 0xf0) { @@ -1067,15 +659,15 @@ detect_tp(struct net_device *dev) case FORCE_FULL: lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX; break; - } + } writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK); if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { - printk(KERN_INFO "%s: negotiating duplex...\n",dev->name); + pr_info("%s: negotiating duplex...\n", dev->name); while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { if (jiffies - timenow > 4000) { - printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n"); + pr_err("**** Full / half duplex auto-negotiation timed out ****\n"); break; } } @@ -1088,56 +680,31 @@ detect_tp(struct net_device *dev) return DETECTED_RJ45H; } -/* send a test packet - return true if carrier bits are ok */ static int -send_test_pkt(struct net_device *dev) +detect_bnc(struct net_device *dev) { - char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0, - 0, 46, /* A 46 in network order */ - 0, 0, /* DSAP=0 & SSAP=0 fields */ - 0xf3, 0 /* Control (Test Req + P bit set) */ }; - long timenow = jiffies; - - writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); - - memcpy(test_packet, dev->dev_addr, ETH_ALEN); - memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN); - - writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL); - writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN); + struct net_local *lp = netdev_priv(dev); - /* Test to see if the chip has allocated memory for the packet */ - while (jiffies - timenow < 5) - if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) - break; - if (jiffies - timenow >= 5) - return 0; /* this shouldn't happen */ + cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name); + control_dc_dc(dev, 1); - /* Write the contents of the packet */ - writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1); + writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY); - if (net_debug > 1) printk("Sending test packet "); - /* wait a couple of jiffies for packet to be received */ - for (timenow = jiffies; jiffies - timenow < 3; ) - ; - if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { - if (net_debug > 1) printk("succeeded\n"); - return 1; - } - if (net_debug > 1) printk("failed\n"); - return 0; + if (send_test_pkt(dev)) + return DETECTED_BNC; + else + return DETECTED_NONE; } - static int detect_aui(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); - if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name); + cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name); control_dc_dc(dev, 0); - writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); + writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY); if (send_test_pkt(dev)) return DETECTED_AUI; @@ -1145,45 +712,154 @@ detect_aui(struct net_device *dev) return DETECTED_NONE; } -static int -detect_bnc(struct net_device *dev) +/* We have a good packet(s), get it/them out of the buffers. */ +static void +net_rx(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); + struct sk_buff *skb; + int status, length; - if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name); - control_dc_dc(dev, 1); + status = ioread16(lp->virt_addr + RX_FRAME_PORT); + length = ioread16(lp->virt_addr + RX_FRAME_PORT); - writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY); + if ((status & RX_OK) == 0) { + count_rx_errors(status, dev); + return; + } - if (send_test_pkt(dev)) - return DETECTED_BNC; - else - return DETECTED_NONE; + /* Malloc up new buffer. */ + skb = netdev_alloc_skb(dev, length + 2); + if (skb == NULL) { +#if 0 /* Again, this seems a cruel thing to do */ + pr_warn("%s: Memory squeeze, dropping packet\n", dev->name); +#endif + dev->stats.rx_dropped++; + return; + } + skb_reserve(skb, 2); /* longword align L3 header */ + + readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1); + if (length & 1) + skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT); + + cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n", + dev->name, length, + (skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1]); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } +/* The typical workload of the driver: + * Handle the network interface interrupts. + */ -static void -write_irq(struct net_device *dev, int chip_type, int irq) +static irqreturn_t net_interrupt(int irq, void *dev_id) { - int i; + struct net_device *dev = dev_id; + struct net_local *lp; + int status; + int handled = 0; - if (chip_type == CS8900) { -#ifndef CONFIG_CS89x0_PLATFORM - /* Search the mapping table for the corresponding IRQ pin. */ - for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) - if (cs8900_irq_map[i] == irq) - break; - /* Not found */ - if (i == ARRAY_SIZE(cs8900_irq_map)) - i = 3; -#else - /* INTRQ0 pin is used for interrupt generation. */ - i = 0; + lp = netdev_priv(dev); + + /* we MUST read all the events out of the ISQ, otherwise we'll never + * get interrupted again. As a consequence, we can't have any limit + * on the number of times we loop in the interrupt handler. The + * hardware guarantees that eventually we'll run out of events. Of + * course, if you're on a slow machine, and packets are arriving + * faster than you can read them off, you're screwed. Hasta la + * vista, baby! + */ + while ((status = ioread16(lp->virt_addr + ISQ_PORT))) { + cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status); + handled = 1; + switch (status & ISQ_EVENT_MASK) { + case ISQ_RECEIVER_EVENT: + /* Got a packet(s). */ + net_rx(dev); + break; + case ISQ_TRANSMITTER_EVENT: + dev->stats.tx_packets++; + netif_wake_queue(dev); /* Inform upper layers. */ + if ((status & (TX_OK | + TX_LOST_CRS | + TX_SQE_ERROR | + TX_LATE_COL | + TX_16_COL)) != TX_OK) { + if ((status & TX_OK) == 0) + dev->stats.tx_errors++; + if (status & TX_LOST_CRS) + dev->stats.tx_carrier_errors++; + if (status & TX_SQE_ERROR) + dev->stats.tx_heartbeat_errors++; + if (status & TX_LATE_COL) + dev->stats.tx_window_errors++; + if (status & TX_16_COL) + dev->stats.tx_aborted_errors++; + } + break; + case ISQ_BUFFER_EVENT: + if (status & READY_FOR_TX) { + /* we tried to transmit a packet earlier, + * but inexplicably ran out of buffers. + * That shouldn't happen since we only ever + * load one packet. Shrug. Do the right + * thing anyway. + */ + netif_wake_queue(dev); /* Inform upper layers. */ + } + if (status & TX_UNDERRUN) { + cs89_dbg(0, err, "%s: transmit underrun\n", + dev->name); + lp->send_underrun++; + if (lp->send_underrun == 3) + lp->send_cmd = TX_AFTER_381; + else if (lp->send_underrun == 6) + lp->send_cmd = TX_AFTER_ALL; + /* transmit cycle is done, although + * frame wasn't transmitted - this + * avoids having to wait for the upper + * layers to timeout on us, in the + * event of a tx underrun + */ + netif_wake_queue(dev); /* Inform upper layers. */ + } +#if ALLOW_DMA + if (lp->use_dma && (status & RX_DMA)) { + int count = readreg(dev, PP_DmaFrameCnt); + while (count) { + cs89_dbg(5, debug, + "%s: receiving %d DMA frames\n", + dev->name, count); + if (count > 1) + cs89_dbg(2, debug, + "%s: receiving %d DMA frames\n", + dev->name, count); + dma_rx(dev); + if (--count == 0) + count = readreg(dev, PP_DmaFrameCnt); + if (count > 0) + cs89_dbg(2, debug, + "%s: continuing with %d DMA frames\n", + dev->name, count); + } + } #endif - writereg(dev, PP_CS8900_ISAINT, i); - } else { - writereg(dev, PP_CS8920_ISAINT, irq); + break; + case ISQ_RX_MISS_EVENT: + dev->stats.rx_missed_errors += (status >> 6); + break; + case ISQ_TX_COL_EVENT: + dev->stats.collisions += (status >> 6); + break; + } } + return IRQ_RETVAL(handled); } /* Open/initialize the board. This is called (in the current kernel) @@ -1192,7 +868,7 @@ write_irq(struct net_device *dev, int chip_type, int irq) This routine should set everything up anew at each open, even registers that "should" only need to be set once at boot, so that there is non-reboot way to recover if something goes wrong. - */ +*/ /* AKPM: do we need to do any locking here? */ @@ -1208,14 +884,15 @@ net_open(struct net_device *dev) /* Allow interrupts to be generated by the chip */ /* Cirrus' release had this: */ #if 0 - writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ); #endif /* And 2.3.47 had this: */ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); for (i = 2; i < CS8920_NO_INTS; i++) { if ((1 << i) & lp->irq_map) { - if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) { + if (request_irq(i, net_interrupt, 0, dev->name, + dev) == 0) { dev->irq = i; write_irq(dev, lp->chip_type, i); /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */ @@ -1226,23 +903,21 @@ net_open(struct net_device *dev) if (i >= CS8920_NO_INTS) { writereg(dev, PP_BusCTL, 0); /* disable interrupts. */ - printk(KERN_ERR "cs89x0: can't get an interrupt\n"); + pr_err("can't get an interrupt\n"); ret = -EAGAIN; goto bad_out; } - } - else - { + } else { #if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) if (((1 << dev->irq) & lp->irq_map) == 0) { - printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", - dev->name, dev->irq, lp->irq_map); + pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", + dev->name, dev->irq, lp->irq_map); ret = -EAGAIN; goto bad_out; } #endif /* FIXME: Cirrus' release had this: */ - writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ ); + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ); /* And 2.3.47 had this: */ #if 0 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); @@ -1250,147 +925,168 @@ net_open(struct net_device *dev) write_irq(dev, lp->chip_type, dev->irq); ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); if (ret) { - printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq); + pr_err("request_irq(%d) failed\n", dev->irq); goto bad_out; } } #if ALLOW_DMA - if (lp->use_dma) { - if (lp->isa_config & ANY_ISA_DMA) { - unsigned long flags; - lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL, - get_order(lp->dmasize * 1024)); - - if (!lp->dma_buff) { - printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize); - goto release_irq; - } - if (net_debug > 1) { - printk( "%s: dma %lx %lx\n", - dev->name, - (unsigned long)lp->dma_buff, - (unsigned long)isa_virt_to_bus(lp->dma_buff)); - } - if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS || - !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) { - printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name); - goto release_irq; - } - memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */ - if (request_dma(dev->dma, dev->name)) { - printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma); - goto release_irq; - } - write_dma(dev, lp->chip_type, dev->dma); - lp->rx_dma_ptr = lp->dma_buff; - lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024; - spin_lock_irqsave(&lp->lock, flags); - disable_dma(dev->dma); - clear_dma_ff(dev->dma); - set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */ - set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff)); - set_dma_count(dev->dma, lp->dmasize*1024); - enable_dma(dev->dma); - spin_unlock_irqrestore(&lp->lock, flags); + if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) { + unsigned long flags; + lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL, + get_order(lp->dmasize * 1024)); + if (!lp->dma_buff) { + pr_err("%s: cannot get %dK memory for DMA\n", + dev->name, lp->dmasize); + goto release_irq; + } + cs89_dbg(1, debug, "%s: dma %lx %lx\n", + dev->name, + (unsigned long)lp->dma_buff, + (unsigned long)isa_virt_to_bus(lp->dma_buff)); + if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS || + !dma_page_eq(lp->dma_buff, + lp->dma_buff + lp->dmasize * 1024 - 1)) { + pr_err("%s: not usable as DMA buffer\n", dev->name); + goto release_irq; } + memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */ + if (request_dma(dev->dma, dev->name)) { + pr_err("%s: cannot get dma channel %d\n", + dev->name, dev->dma); + goto release_irq; + } + write_dma(dev, lp->chip_type, dev->dma); + lp->rx_dma_ptr = lp->dma_buff; + lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024; + spin_lock_irqsave(&lp->lock, flags); + disable_dma(dev->dma); + clear_dma_ff(dev->dma); + set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */ + set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff)); + set_dma_count(dev->dma, lp->dmasize * 1024); + enable_dma(dev->dma); + spin_unlock_irqrestore(&lp->lock, flags); } #endif /* ALLOW_DMA */ /* set the Ethernet address */ - for (i=0; i < ETH_ALEN/2; i++) - writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); + for (i = 0; i < ETH_ALEN / 2; i++) + writereg(dev, PP_IA + i * 2, + (dev->dev_addr[i * 2] | + (dev->dev_addr[i * 2 + 1] << 8))); /* while we're testing the interface, leave interrupts disabled */ writereg(dev, PP_BusCTL, MEMORY_ON); /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */ - if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) - lp->linectl = LOW_RX_SQUELCH; + if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && + (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) + lp->linectl = LOW_RX_SQUELCH; else - lp->linectl = 0; - - /* check to make sure that they have the "right" hardware available */ - switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { - case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break; - case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break; - case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; - default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); - } - if (!result) { - printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); + lp->linectl = 0; + + /* check to make sure that they have the "right" hardware available */ + switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) { + case A_CNF_MEDIA_10B_T: + result = lp->adapter_cnf & A_CNF_10B_T; + break; + case A_CNF_MEDIA_AUI: + result = lp->adapter_cnf & A_CNF_AUI; + break; + case A_CNF_MEDIA_10B_2: + result = lp->adapter_cnf & A_CNF_10B_2; + break; + default: + result = lp->adapter_cnf & (A_CNF_10B_T | + A_CNF_AUI | + A_CNF_10B_2); + } + if (!result) { + pr_err("%s: EEPROM is configured for unavailable media\n", + dev->name); release_dma: #if ALLOW_DMA free_dma(dev->dma); release_irq: release_dma_buff(lp); #endif - writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); - free_irq(dev->irq, dev); + writereg(dev, PP_LineCTL, + readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); + free_irq(dev->irq, dev); ret = -EAGAIN; goto bad_out; } - /* set the hardware to the configured choice */ - switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) { + /* set the hardware to the configured choice */ + switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) { case A_CNF_MEDIA_10B_T: - result = detect_tp(dev); - if (result==DETECTED_NONE) { - printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name); - if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ - result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */ - } + result = detect_tp(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-T (RJ-45) has no cable\n", + dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */ + } break; case A_CNF_MEDIA_AUI: - result = detect_aui(dev); - if (result==DETECTED_NONE) { - printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name); - if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ - result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ - } + result = detect_aui(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ + } break; case A_CNF_MEDIA_10B_2: - result = detect_bnc(dev); - if (result==DETECTED_NONE) { - printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name); - if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ - result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */ - } + result = detect_bnc(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */ + } break; case A_CNF_MEDIA_AUTO: writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET); - if (lp->adapter_cnf & A_CNF_10B_T) - if ((result = detect_tp(dev)) != DETECTED_NONE) + if (lp->adapter_cnf & A_CNF_10B_T) { + result = detect_tp(dev); + if (result != DETECTED_NONE) break; - if (lp->adapter_cnf & A_CNF_AUI) - if ((result = detect_aui(dev)) != DETECTED_NONE) + } + if (lp->adapter_cnf & A_CNF_AUI) { + result = detect_aui(dev); + if (result != DETECTED_NONE) break; - if (lp->adapter_cnf & A_CNF_10B_2) - if ((result = detect_bnc(dev)) != DETECTED_NONE) + } + if (lp->adapter_cnf & A_CNF_10B_2) { + result = detect_bnc(dev); + if (result != DETECTED_NONE) break; - printk(KERN_ERR "%s: no media detected\n", dev->name); + } + pr_err("%s: no media detected\n", dev->name); goto release_dma; } - switch(result) { + switch (result) { case DETECTED_NONE: - printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); + pr_err("%s: no network cable attached to configured media\n", + dev->name); goto release_dma; case DETECTED_RJ45H: - printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); + pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); break; case DETECTED_RJ45F: - printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name); + pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name); break; case DETECTED_AUI: - printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name); + pr_info("%s: using 10Base-5 (AUI)\n", dev->name); break; case DETECTED_BNC: - printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name); + pr_info("%s: using 10Base-2 (BNC)\n", dev->name); break; } /* Turn on both receive and transmit operations */ - writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); + writereg(dev, PP_LineCTL, + readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); /* Receive only error free packets addressed to this card */ lp->rx_mode = 0; @@ -1405,358 +1101,653 @@ release_irq: #endif writereg(dev, PP_RxCFG, lp->curr_rx_cfg); - writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL | - TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL); + writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL | + TX_SQE_ERROR_ENBL | + TX_OK_ENBL | + TX_LATE_COL_ENBL | + TX_JBR_ENBL | + TX_ANY_COL_ENBL | + TX_16_COL_ENBL)); - writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL | + writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL | + RX_MISS_COUNT_OVRFLOW_ENBL | #if ALLOW_DMA - dma_bufcfg(dev) | + dma_bufcfg(dev) | #endif - TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL); + TX_COL_COUNT_OVRFLOW_ENBL | + TX_UNDERRUN_ENBL)); /* now that we've got our act together, enable everything */ - writereg(dev, PP_BusCTL, ENABLE_IRQ - | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */ + writereg(dev, PP_BusCTL, (ENABLE_IRQ + | (dev->mem_start ? MEMORY_ON : 0) /* turn memory on */ #if ALLOW_DMA - | dma_busctl(dev) + | dma_busctl(dev) #endif - ); - netif_start_queue(dev); - if (net_debug > 1) - printk("cs89x0: net_open() succeeded\n"); + )); + netif_start_queue(dev); + cs89_dbg(1, debug, "net_open() succeeded\n"); return 0; bad_out: return ret; } +/* The inverse routine to net_open(). */ +static int +net_close(struct net_device *dev) +{ +#if ALLOW_DMA + struct net_local *lp = netdev_priv(dev); +#endif + + netif_stop_queue(dev); + + writereg(dev, PP_RxCFG, 0); + writereg(dev, PP_TxCFG, 0); + writereg(dev, PP_BufCFG, 0); + writereg(dev, PP_BusCTL, 0); + + free_irq(dev->irq, dev); + +#if ALLOW_DMA + if (lp->use_dma && lp->dma) { + free_dma(dev->dma); + release_dma_buff(lp); + } +#endif + + /* Update the statistics here. */ + return 0; +} + +/* Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats * +net_get_stats(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Update the statistics from the device registers. */ + dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); + dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +} + static void net_timeout(struct net_device *dev) { /* If we get here, some higher level has decided we are broken. There should really be a "kick me" function call instead. */ - if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name, - tx_done(dev) ? "IRQ conflict ?" : "network cable problem"); + cs89_dbg(0, err, "%s: transmit timed out, %s?\n", + dev->name, + tx_done(dev) ? "IRQ conflict" : "network cable problem"); /* Try to restart the adaptor. */ netif_wake_queue(dev); } -static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev) +static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; - if (net_debug > 3) { - printk("%s: sent %d byte packet of type %x\n", - dev->name, skb->len, - (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); - } + cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n", + dev->name, skb->len, + ((skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1])); /* keep the upload from being interrupted, since we - ask the chip to start transmitting before the - whole packet has been completely uploaded. */ + * ask the chip to start transmitting before the + * whole packet has been completely uploaded. + */ spin_lock_irqsave(&lp->lock, flags); netif_stop_queue(dev); /* initiate a transmit sequence */ - writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd); - writeword(dev->base_addr, TX_LEN_PORT, skb->len); + iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT); + iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT); /* Test to see if the chip has allocated memory for the packet */ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { - /* - * Gasp! It hasn't. But that shouldn't happen since + /* Gasp! It hasn't. But that shouldn't happen since * we're waiting for TxOk, so return 1 and requeue this packet. */ spin_unlock_irqrestore(&lp->lock, flags); - if (net_debug) printk("cs89x0: Tx buffer not free!\n"); + cs89_dbg(0, err, "Tx buffer not free!\n"); return NETDEV_TX_BUSY; } /* Write the contents of the packet */ - writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); + writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1); spin_unlock_irqrestore(&lp->lock, flags); dev->stats.tx_bytes += skb->len; - dev_kfree_skb (skb); + dev_kfree_skb(skb); - /* - * We DO NOT call netif_wake_queue() here. + /* We DO NOT call netif_wake_queue() here. * We also DO NOT call netif_start_queue(). * * Either of these would cause another bottom half run through - * net_send_packet() before this packet has fully gone out. That causes - * us to hit the "Gasp!" above and the send is rescheduled. it runs like - * a dog. We just return and wait for the Tx completion interrupt handler - * to restart the netdevice layer + * net_send_packet() before this packet has fully gone out. + * That causes us to hit the "Gasp!" above and the send is rescheduled. + * it runs like a dog. We just return and wait for the Tx completion + * interrupt handler to restart the netdevice layer */ return NETDEV_TX_OK; } -/* The typical workload of the driver: - Handle the network interface interrupts. */ +static void set_multicast_list(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; -static irqreturn_t net_interrupt(int irq, void *dev_id) + spin_lock_irqsave(&lp->lock, flags); + if (dev->flags & IFF_PROMISC) + lp->rx_mode = RX_ALL_ACCEPT; + else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) + /* The multicast-accept list is initialized to accept-all, + * and we rely on higher-level filtering for now. + */ + lp->rx_mode = RX_MULTCAST_ACCEPT; + else + lp->rx_mode = 0; + + writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); + + /* in promiscuous mode, we accept errored packets, + * so we have to enable interrupts on them also + */ + writereg(dev, PP_RxCFG, + (lp->curr_rx_cfg | + (lp->rx_mode == RX_ALL_ACCEPT) + ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) + : 0)); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static int set_mac_address(struct net_device *dev, void *p) { - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status; - int handled = 0; + int i; + struct sockaddr *addr = p; - ioaddr = dev->base_addr; - lp = netdev_priv(dev); + if (netif_running(dev)) + return -EBUSY; - /* we MUST read all the events out of the ISQ, otherwise we'll never - get interrupted again. As a consequence, we can't have any limit - on the number of times we loop in the interrupt handler. The - hardware guarantees that eventually we'll run out of events. Of - course, if you're on a slow machine, and packets are arriving - faster than you can read them off, you're screwed. Hasta la - vista, baby! */ - while ((status = readword(dev->base_addr, ISQ_PORT))) { - if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); - handled = 1; - switch(status & ISQ_EVENT_MASK) { - case ISQ_RECEIVER_EVENT: - /* Got a packet(s). */ - net_rx(dev); - break; - case ISQ_TRANSMITTER_EVENT: - dev->stats.tx_packets++; - netif_wake_queue(dev); /* Inform upper layers. */ - if ((status & ( TX_OK | - TX_LOST_CRS | - TX_SQE_ERROR | - TX_LATE_COL | - TX_16_COL)) != TX_OK) { - if ((status & TX_OK) == 0) - dev->stats.tx_errors++; - if (status & TX_LOST_CRS) - dev->stats.tx_carrier_errors++; - if (status & TX_SQE_ERROR) - dev->stats.tx_heartbeat_errors++; - if (status & TX_LATE_COL) - dev->stats.tx_window_errors++; - if (status & TX_16_COL) - dev->stats.tx_aborted_errors++; - } - break; - case ISQ_BUFFER_EVENT: - if (status & READY_FOR_TX) { - /* we tried to transmit a packet earlier, - but inexplicably ran out of buffers. - That shouldn't happen since we only ever - load one packet. Shrug. Do the right - thing anyway. */ - netif_wake_queue(dev); /* Inform upper layers. */ - } - if (status & TX_UNDERRUN) { - if (net_debug > 0) printk("%s: transmit underrun\n", dev->name); - lp->send_underrun++; - if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381; - else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL; - /* transmit cycle is done, although - frame wasn't transmitted - this - avoids having to wait for the upper - layers to timeout on us, in the - event of a tx underrun */ - netif_wake_queue(dev); /* Inform upper layers. */ - } -#if ALLOW_DMA - if (lp->use_dma && (status & RX_DMA)) { - int count = readreg(dev, PP_DmaFrameCnt); - while(count) { - if (net_debug > 5) - printk("%s: receiving %d DMA frames\n", dev->name, count); - if (net_debug > 2 && count >1) - printk("%s: receiving %d DMA frames\n", dev->name, count); - dma_rx(dev); - if (--count == 0) - count = readreg(dev, PP_DmaFrameCnt); - if (net_debug > 2 && count > 0) - printk("%s: continuing with %d DMA frames\n", dev->name, count); - } - } -#endif - break; - case ISQ_RX_MISS_EVENT: - dev->stats.rx_missed_errors += (status >> 6); - break; - case ISQ_TX_COL_EVENT: - dev->stats.collisions += (status >> 6); - break; - } - } - return IRQ_RETVAL(handled); + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n", + dev->name, dev->dev_addr); + + /* set the Ethernet address */ + for (i = 0; i < ETH_ALEN / 2; i++) + writereg(dev, PP_IA + i * 2, + (dev->dev_addr[i * 2] | + (dev->dev_addr[i * 2 + 1] << 8))); + + return 0; } -static void -count_rx_errors(int status, struct net_device *dev) +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void net_poll_controller(struct net_device *dev) { - dev->stats.rx_errors++; - if (status & RX_RUNT) - dev->stats.rx_length_errors++; - if (status & RX_EXTRA_DATA) - dev->stats.rx_length_errors++; - if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT))) - /* per str 172 */ - dev->stats.rx_crc_errors++; - if (status & RX_DRIBBLE) - dev->stats.rx_frame_errors++; + disable_irq(dev->irq); + net_interrupt(dev->irq, dev); + enable_irq(dev->irq); } +#endif -/* We have a good packet(s), get it/them out of the buffers. */ -static void -net_rx(struct net_device *dev) +static const struct net_device_ops net_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_tx_timeout = net_timeout, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_set_mac_address = set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = net_poll_controller, +#endif + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static void __init reset_chip(struct net_device *dev) { - struct sk_buff *skb; - int status, length; +#if !defined(CONFIG_MACH_MX31ADS) +#if !defined(CS89x0_NONISA_IRQ) + struct net_local *lp = netdev_priv(dev); +#endif /* CS89x0_NONISA_IRQ */ + int reset_start_time; - int ioaddr = dev->base_addr; - status = readword(ioaddr, RX_FRAME_PORT); - length = readword(ioaddr, RX_FRAME_PORT); + writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); - if ((status & RX_OK) == 0) { - count_rx_errors(status, dev); - return; + /* wait 30 ms */ + msleep(30); + +#if !defined(CS89x0_NONISA_IRQ) + if (lp->chip_type != CS8900) { + /* Hardware problem requires PNP registers to be reconfigured after a reset */ + iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT); + iowrite8(dev->irq, lp->virt_addr + DATA_PORT); + iowrite8(0, lp->virt_addr + DATA_PORT + 1); + + iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT); + iowrite8((dev->mem_start >> 16) & 0xff, + lp->virt_addr + DATA_PORT); + iowrite8((dev->mem_start >> 8) & 0xff, + lp->virt_addr + DATA_PORT + 1); } +#endif /* CS89x0_NONISA_IRQ */ - /* Malloc up new buffer. */ - skb = netdev_alloc_skb(dev, length + 2); - if (skb == NULL) { -#if 0 /* Again, this seems a cruel thing to do */ - printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); + /* Wait until the chip is reset */ + reset_start_time = jiffies; + while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 && + jiffies - reset_start_time < 2) + ; +#endif /* !CONFIG_MACH_MX31ADS */ +} + +/* This is the real probe routine. + * Linux has a history of friendly device probes on the ISA bus. + * A good device probes avoids doing writes, and + * verifies that the correct device exists and functions. + * Return 0 on success. + */ +static int __init +cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) +{ + struct net_local *lp = netdev_priv(dev); + int i; + int tmp; + unsigned rev_type = 0; + int eeprom_buff[CHKSUM_LEN]; + int retval; + + /* Initialize the device structure. */ + if (!modular) { + memset(lp, 0, sizeof(*lp)); + spin_lock_init(&lp->lock); +#ifndef MODULE +#if ALLOW_DMA + if (g_cs89x0_dma) { + lp->use_dma = 1; + lp->dma = g_cs89x0_dma; + lp->dmasize = 16; /* Could make this an option... */ + } +#endif + lp->force = g_cs89x0_media__force; #endif - dev->stats.rx_dropped++; - return; } - skb_reserve(skb, 2); /* longword align L3 header */ - readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); - if (length & 1) - skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT); + pr_debug("PP_addr at %p[%x]: 0x%x\n", + ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT)); + iowrite16(PP_ChipID, ioaddr + ADD_PORT); - if (net_debug > 3) { - printk( "%s: received %d byte packet of type %x\n", - dev->name, length, - (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]); + tmp = ioread16(ioaddr + DATA_PORT); + if (tmp != CHIP_EISA_ID_SIG) { + pr_debug("%s: incorrect signature at %p[%x]: 0x%x!=" + CHIP_EISA_ID_SIG_STR "\n", + dev->name, ioaddr, DATA_PORT, tmp); + retval = -ENODEV; + goto out1; } - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += length; -} + lp->virt_addr = ioaddr; -#if ALLOW_DMA -static void release_dma_buff(struct net_local *lp) -{ - if (lp->dma_buff) { - free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024)); - lp->dma_buff = NULL; + /* get the chip type */ + rev_type = readreg(dev, PRODUCT_ID_ADD); + lp->chip_type = rev_type & ~REVISON_BITS; + lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; + + /* Check the chip type and revision in order to set the correct + * send command. CS8920 revision C and CS8900 revision F can use + * the faster send. + */ + lp->send_cmd = TX_AFTER_381; + if (lp->chip_type == CS8900 && lp->chip_revision >= 'F') + lp->send_cmd = TX_NOW; + if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') + lp->send_cmd = TX_NOW; + + pr_info_once("%s\n", version); + + pr_info("%s: cs89%c0%s rev %c found at %p ", + dev->name, + lp->chip_type == CS8900 ? '0' : '2', + lp->chip_type == CS8920M ? "M" : "", + lp->chip_revision, + lp->virt_addr); + + reset_chip(dev); + + /* Here we read the current configuration of the chip. + * If there is no Extended EEPROM then the idea is to not disturb + * the chip configuration, it should have been correctly setup by + * automatic EEPROM read on reset. So, if the chip says it read + * the EEPROM the driver will always do *something* instead of + * complain that adapter_cnf is 0. + */ + + if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == + (EEPROM_OK | EEPROM_PRESENT)) { + /* Load the MAC. */ + for (i = 0; i < ETH_ALEN / 2; i++) { + unsigned int Addr; + Addr = readreg(dev, PP_IA + i * 2); + dev->dev_addr[i * 2] = Addr & 0xFF; + dev->dev_addr[i * 2 + 1] = Addr >> 8; + } + + /* Load the Adapter Configuration. + * Note: Barring any more specific information from some + * other source (ie EEPROM+Schematics), we would not know + * how to operate a 10Base2 interface on the AUI port. + * However, since we do read the status of HCB1 and use + * settings that always result in calls to control_dc_dc(dev,0) + * a BNC interface should work if the enable pin + * (dc/dc converter) is on HCB1. + * It will be called AUI however. + */ + + lp->adapter_cnf = 0; + i = readreg(dev, PP_LineCTL); + /* Preserve the setting of the HCB1 pin. */ + if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL)) + lp->adapter_cnf |= A_CNF_DC_DC_POLARITY; + /* Save the sqelch bit */ + if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH) + lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH; + /* Check if the card is in 10Base-t only mode */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0) + lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T; + /* Check if the card is in AUI only mode */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY) + lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI; + /* Check if the card is in Auto mode. */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET) + lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T | + A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO; + + cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n", + dev->name, i, lp->adapter_cnf); + + /* IRQ. Other chips already probe, see below. */ + if (lp->chip_type == CS8900) + lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK; + + pr_cont("[Cirrus EEPROM] "); } -} -#endif -/* The inverse routine to net_open(). */ -static int -net_close(struct net_device *dev) -{ -#if ALLOW_DMA - struct net_local *lp = netdev_priv(dev); -#endif + pr_cont("\n"); - netif_stop_queue(dev); + /* First check to see if an EEPROM is attached. */ - writereg(dev, PP_RxCFG, 0); - writereg(dev, PP_TxCFG, 0); - writereg(dev, PP_BufCFG, 0); - writereg(dev, PP_BusCTL, 0); + if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) + pr_warn("No EEPROM, relying on command line....\n"); + else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) { + pr_warn("EEPROM read failed, relying on command line\n"); + } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) { + /* Check if the chip was able to read its own configuration starting + at 0 in the EEPROM*/ + if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) != + (EEPROM_OK | EEPROM_PRESENT)) + pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n"); - free_irq(dev->irq, dev); + } else { + /* This reads an extended EEPROM that is not documented + * in the CS8900 datasheet. + */ -#if ALLOW_DMA - if (lp->use_dma && lp->dma) { - free_dma(dev->dma); - release_dma_buff(lp); + /* get transmission control word but keep the autonegotiation bits */ + if (!lp->auto_neg_cnf) + lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2]; + /* Store adapter configuration */ + if (!lp->adapter_cnf) + lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2]; + /* Store ISA configuration */ + lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2]; + dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8; + + /* eeprom_buff has 32-bit ints, so we can't just memcpy it */ + /* store the initial memory base address */ + for (i = 0; i < ETH_ALEN / 2; i++) { + dev->dev_addr[i * 2] = eeprom_buff[i]; + dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8; + } + cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n", + dev->name, lp->adapter_cnf); } + + /* allow them to force multiple transceivers. If they force multiple, autosense */ + { + int count = 0; + if (lp->force & FORCE_RJ45) { + lp->adapter_cnf |= A_CNF_10B_T; + count++; + } + if (lp->force & FORCE_AUI) { + lp->adapter_cnf |= A_CNF_AUI; + count++; + } + if (lp->force & FORCE_BNC) { + lp->adapter_cnf |= A_CNF_10B_2; + count++; + } + if (count > 1) + lp->adapter_cnf |= A_CNF_MEDIA_AUTO; + else if (lp->force & FORCE_RJ45) + lp->adapter_cnf |= A_CNF_MEDIA_10B_T; + else if (lp->force & FORCE_AUI) + lp->adapter_cnf |= A_CNF_MEDIA_AUI; + else if (lp->force & FORCE_BNC) + lp->adapter_cnf |= A_CNF_MEDIA_10B_2; + } + + cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n", + dev->name, lp->force, lp->adapter_cnf); + + /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */ + + /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */ + + /* FIXME: we don't set the Ethernet address on the command line. Use + * ifconfig IFACE hw ether AABBCCDDEEFF + */ + + pr_info("media %s%s%s", + (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "", + (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "", + (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : ""); + + lp->irq_map = 0xffff; + + /* If this is a CS8900 then no pnp soft */ + if (lp->chip_type != CS8900 && + /* Check if the ISA IRQ has been set */ + (i = readreg(dev, PP_CS8920_ISAINT) & 0xff, + (i != 0 && i < CS8920_NO_INTS))) { + if (!dev->irq) + dev->irq = i; + } else { + i = lp->isa_config & INT_NO_MASK; +#ifndef CONFIG_CS89x0_PLATFORM + if (lp->chip_type == CS8900) { +#ifdef CS89x0_NONISA_IRQ + i = cs8900_irq_map[0]; +#else + /* Translate the IRQ using the IRQ mapping table. */ + if (i >= ARRAY_SIZE(cs8900_irq_map)) + pr_err("invalid ISA interrupt number %d\n", i); + else + i = cs8900_irq_map[i]; + + lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */ + } else { + int irq_map_buff[IRQ_MAP_LEN/2]; + + if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA, + IRQ_MAP_LEN / 2, + irq_map_buff) >= 0) { + if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT) + lp->irq_map = ((irq_map_buff[0] >> 8) | + (irq_map_buff[1] << 8)); + } #endif + } +#endif + if (!dev->irq) + dev->irq = i; + } - /* Update the statistics here. */ - return 0; -} + pr_cont(" IRQ %d", dev->irq); -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats * -net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned long flags; +#if ALLOW_DMA + if (lp->use_dma) { + get_dma_channel(dev); + pr_cont(", DMA %d", dev->dma); + } else +#endif + pr_cont(", programmed I/O"); - spin_lock_irqsave(&lp->lock, flags); - /* Update the statistics from the device registers. */ - dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); - dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); - spin_unlock_irqrestore(&lp->lock, flags); + /* print the ethernet address. */ + pr_cont(", MAC %pM\n", dev->dev_addr); - return &dev->stats; + dev->netdev_ops = &net_ops; + dev->watchdog_timeo = HZ; + + cs89_dbg(0, info, "cs89x0_probe1() successful\n"); + + retval = register_netdev(dev); + if (retval) + goto out2; + return 0; +out2: + iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT); +out1: + return retval; } -static void set_multicast_list(struct net_device *dev) +#ifndef CONFIG_CS89x0_PLATFORM +/* + * This function converts the I/O port addres used by the cs89x0_probe() and + * init_module() functions to the I/O memory address used by the + * cs89x0_probe1() function. + */ +static int __init +cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular) { struct net_local *lp = netdev_priv(dev); - unsigned long flags; + int ret; + void __iomem *io_mem; - spin_lock_irqsave(&lp->lock, flags); - if(dev->flags&IFF_PROMISC) - { - lp->rx_mode = RX_ALL_ACCEPT; + if (!lp) + return -ENOMEM; + + dev->base_addr = ioport; + + if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) { + ret = -EBUSY; + goto out; } - else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) - { - /* The multicast-accept list is initialized to accept-all, and we - rely on higher-level filtering for now. */ - lp->rx_mode = RX_MULTCAST_ACCEPT; + + io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT); + if (!io_mem) { + ret = -ENOMEM; + goto release; } - else - lp->rx_mode = 0; - writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); + /* if they give us an odd I/O address, then do ONE write to + * the address port, to get it back to address zero, where we + * expect to find the EISA signature word. An IO with a base of 0x3 + * will skip the test for the ADD_PORT. + */ + if (ioport & 1) { + cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport); + if ((ioport & 2) != 2) { + if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) != + ADD_SIG) { + pr_err("%s: bad signature 0x%x\n", + dev->name, ioread16(io_mem + ADD_PORT)); + ret = -ENODEV; + goto unmap; + } + } + } - /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */ - writereg(dev, PP_RxCFG, lp->curr_rx_cfg | - (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0)); - spin_unlock_irqrestore(&lp->lock, flags); + ret = cs89x0_probe1(dev, io_mem, modular); + if (!ret) + goto out; +unmap: + ioport_unmap(io_mem); +release: + release_region(ioport, NETCARD_IO_EXTENT); +out: + return ret; } +#ifndef MODULE +/* Check for a network adaptor of this type, and return '0' iff one exists. + * If dev->base_addr == 0, probe all likely locations. + * If dev->base_addr == 1, always return failure. + * If dev->base_addr == 2, allocate space for the device and return success + * (detachable devices only). + * Return 0 on success. + */ -static int set_mac_address(struct net_device *dev, void *p) +struct net_device * __init cs89x0_probe(int unit) { - int i; - struct sockaddr *addr = p; - - if (netif_running(dev)) - return -EBUSY; + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + unsigned *port; + int err = 0; + int irq; + int io; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (!dev) + return ERR_PTR(-ENODEV); - if (net_debug) - printk("%s: Setting MAC address to %pM.\n", - dev->name, dev->dev_addr); + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; - /* set the Ethernet address */ - for (i=0; i < ETH_ALEN/2; i++) - writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); + cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io); - return 0; + if (io > 0x1ff) { /* Check a single specified location. */ + err = cs89x0_ioport_probe(dev, io, 0); + } else if (io != 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (port = netcard_portlist; *port; port++) { + if (cs89x0_ioport_probe(dev, *port, 0) == 0) + break; + dev->irq = irq; + } + if (!*port) + err = -ENODEV; + } + if (err) + goto out; + return dev; +out: + free_netdev(dev); + pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); + return ERR_PTR(err); } +#endif +#endif #if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) static struct net_device *dev_cs89x0; -/* - * Support the 'debug' module parm even if we're compiled for non-debug to +/* Support the 'debug' module parm even if we're compiled for non-debug to * avoid breaking someone's startup scripts */ @@ -1764,11 +1755,11 @@ static int io; static int irq; static int debug; static char media[8]; -static int duplex=-1; +static int duplex = -1; static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */ static int dma; -static int dmasize=16; /* or 64 */ +static int dmasize = 16; /* or 64 */ module_param(io, int, 0); module_param(irq, int, 0); @@ -1801,32 +1792,28 @@ MODULE_PARM_DESC(use_dma , "(ignored)"); MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton"); MODULE_LICENSE("GPL"); - /* -* media=t - specify media type - or media=2 - or media=aui - or medai=auto -* duplex=0 - specify forced half/full/autonegotiate duplex -* debug=# - debug level - - -* Default Chip Configuration: - * DMA Burst = enabled - * IOCHRDY Enabled = enabled - * UseSA = enabled - * CS8900 defaults to half-duplex if not specified on command-line - * CS8920 defaults to autoneg if not specified on command-line - * Use reset defaults for other config parameters - -* Assumptions: - * media type specified is supported (circuitry is present) - * if memory address is > 1MB, then required mem decode hw is present - * if 10B-2, then agent other than driver will enable DC/DC converter - (hw or software util) - - -*/ + * media=t - specify media type + * or media=2 + * or media=aui + * or medai=auto + * duplex=0 - specify forced half/full/autonegotiate duplex + * debug=# - debug level + * + * Default Chip Configuration: + * DMA Burst = enabled + * IOCHRDY Enabled = enabled + * UseSA = enabled + * CS8900 defaults to half-duplex if not specified on command-line + * CS8920 defaults to autoneg if not specified on command-line + * Use reset defaults for other config parameters + * + * Assumptions: + * media type specified is supported (circuitry is present) + * if memory address is > 1MB, then required mem decode hw is present + * if 10B-2, then agent other than driver will enable DC/DC converter + * (hw or software util) + */ int __init init_module(void) { @@ -1856,8 +1843,8 @@ int __init init_module(void) spin_lock_init(&lp->lock); - /* boy, they'd better get these right */ - if (!strcmp(media, "rj45")) + /* boy, they'd better get these right */ + if (!strcmp(media, "rj45")) lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; else if (!strcmp(media, "aui")) lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI; @@ -1866,27 +1853,28 @@ int __init init_module(void) else lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; - if (duplex==-1) + if (duplex == -1) lp->auto_neg_cnf = AUTO_NEG_ENABLE; - if (io == 0) { - printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n"); - printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n"); - ret = -EPERM; + if (io == 0) { + pr_err("Module autoprobing not allowed\n"); + pr_err("Append io=0xNNN\n"); + ret = -EPERM; goto out; - } else if (io <= 0x1ff) { + } else if (io <= 0x1ff) { ret = -ENXIO; goto out; } #if ALLOW_DMA if (use_dma && dmasize != 16 && dmasize != 64) { - printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize); + pr_err("dma size must be either 16K or 64K, not %dK\n", + dmasize); ret = -EPERM; goto out; } #endif - ret = cs89x0_probe1(dev, io, 1); + ret = cs89x0_ioport_probe(dev, io, 1); if (ret) goto out; @@ -1900,8 +1888,11 @@ out: void __exit cleanup_module(void) { + struct net_local *lp = netdev_priv(dev_cs89x0); + unregister_netdev(dev_cs89x0); - writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID); + iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT); + ioport_unmap(lp->virt_addr); release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); free_netdev(dev_cs89x0); } @@ -1913,6 +1904,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev) struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); struct net_local *lp; struct resource *mem_res; + void __iomem *virt_addr; int err; if (!dev) @@ -1923,29 +1915,28 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev) mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->irq = platform_get_irq(pdev, 0); if (mem_res == NULL || dev->irq <= 0) { - dev_warn(&dev->dev, "memory/interrupt resource missing.\n"); + dev_warn(&dev->dev, "memory/interrupt resource missing\n"); err = -ENXIO; goto free; } - lp->phys_addr = mem_res->start; lp->size = resource_size(mem_res); - if (!request_mem_region(lp->phys_addr, lp->size, DRV_NAME)) { - dev_warn(&dev->dev, "request_mem_region() failed.\n"); + if (!request_mem_region(mem_res->start, lp->size, DRV_NAME)) { + dev_warn(&dev->dev, "request_mem_region() failed\n"); err = -EBUSY; goto free; } - lp->virt_addr = ioremap(lp->phys_addr, lp->size); - if (!lp->virt_addr) { - dev_warn(&dev->dev, "ioremap() failed.\n"); + virt_addr = ioremap(mem_res->start, lp->size); + if (!virt_addr) { + dev_warn(&dev->dev, "ioremap() failed\n"); err = -ENOMEM; goto release; } - err = cs89x0_probe1(dev, (unsigned long)lp->virt_addr, 0); + err = cs89x0_probe1(dev, virt_addr, 0); if (err) { - dev_warn(&dev->dev, "no cs8900 or cs8920 detected.\n"); + dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n"); goto unmap; } @@ -1953,9 +1944,9 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev) return 0; unmap: - iounmap(lp->virt_addr); + iounmap(virt_addr); release: - release_mem_region(lp->phys_addr, lp->size); + release_mem_region(mem_res->start, lp->size); free: free_netdev(dev); return err; @@ -1965,10 +1956,16 @@ static int cs89x0_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct net_local *lp = netdev_priv(dev); + struct resource *mem_res; + /* This platform_get_resource() call will not return NULL, because + * the same call in cs89x0_platform_probe() has returned a non NULL + * value. + */ + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); unregister_netdev(dev); iounmap(lp->virt_addr); - release_mem_region(lp->phys_addr, lp->size); + release_mem_region(mem_res->start, lp->size); free_netdev(dev); return 0; } @@ -1996,13 +1993,3 @@ static void __exit cs89x0_cleanup(void) module_exit(cs89x0_cleanup); #endif /* CONFIG_CS89x0_PLATFORM */ - -/* - * Local variables: - * version-control: t - * kept-new-versions: 5 - * c-indent-level: 8 - * tab-width: 8 - * End: - * - */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 77b4e873f91..8132c785cea 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -944,8 +944,7 @@ static void enic_update_multicast_addr_list(struct enic *enic) for (i = 0; i < enic->mc_count; i++) { for (j = 0; j < mc_count; j++) - if (compare_ether_addr(enic->mc_addr[i], - mc_addr[j]) == 0) + if (ether_addr_equal(enic->mc_addr[i], mc_addr[j])) break; if (j == mc_count) enic_dev_del_addr(enic, enic->mc_addr[i]); @@ -953,8 +952,7 @@ static void enic_update_multicast_addr_list(struct enic *enic) for (i = 0; i < mc_count; i++) { for (j = 0; j < enic->mc_count; j++) - if (compare_ether_addr(mc_addr[i], - enic->mc_addr[j]) == 0) + if (ether_addr_equal(mc_addr[i], enic->mc_addr[j])) break; if (j == enic->mc_count) enic_dev_add_addr(enic, mc_addr[i]); @@ -999,8 +997,7 @@ static void enic_update_unicast_addr_list(struct enic *enic) for (i = 0; i < enic->uc_count; i++) { for (j = 0; j < uc_count; j++) - if (compare_ether_addr(enic->uc_addr[i], - uc_addr[j]) == 0) + if (ether_addr_equal(enic->uc_addr[i], uc_addr[j])) break; if (j == uc_count) enic_dev_del_addr(enic, enic->uc_addr[i]); @@ -1008,8 +1005,7 @@ static void enic_update_unicast_addr_list(struct enic *enic) for (i = 0; i < uc_count; i++) { for (j = 0; j < enic->uc_count; j++) - if (compare_ether_addr(uc_addr[i], - enic->uc_addr[j]) == 0) + if (ether_addr_equal(uc_addr[i], enic->uc_addr[j])) break; if (j == enic->uc_count) enic_dev_add_addr(enic, uc_addr[i]); @@ -1193,18 +1189,16 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, if (err) return err; - NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request); - NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); - if (pp->set & ENIC_SET_NAME) - NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, - pp->name); - if (pp->set & ENIC_SET_INSTANCE) - NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, - pp->instance_uuid); - if (pp->set & ENIC_SET_HOST) - NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, - pp->host_uuid); - + if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || + nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || + ((pp->set & ENIC_SET_NAME) && + nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || + ((pp->set & ENIC_SET_INSTANCE) && + nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, + pp->instance_uuid)) || + ((pp->set & ENIC_SET_HOST) && + nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c index dafea1ecb7b..43464f0a4f9 100644 --- a/drivers/net/ethernet/cisco/enic/enic_pp.c +++ b/drivers/net/ethernet/cisco/enic/enic_pp.c @@ -184,7 +184,7 @@ static int (*enic_pp_handlers[])(struct enic *enic, int vf, }; static const int enic_pp_handlers_count = - sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers); + ARRAY_SIZE(enic_pp_handlers); static int enic_pp_preassociate(struct enic *enic, int vf, struct enic_port_profile *prev_pp, int *restore_pp) diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 972b62b3183..9745fe5e803 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -4,7 +4,7 @@ config DM9000 tristate "DM9000 support" - depends on ARM || BLACKFIN || MIPS + depends on ARM || BLACKFIN || MIPS || COLDFIRE select CRC32 select NET_CORE select MII diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c index 1879f84a25a..17ae8c61968 100644 --- a/drivers/net/ethernet/dec/ewrk3.c +++ b/drivers/net/ethernet/dec/ewrk3.c @@ -1016,7 +1016,8 @@ static int ewrk3_rx(struct net_device *dev) } else { lp->pktStats.multicast++; } - } else if (compare_ether_addr(p, dev->dev_addr) == 0) { + } else if (ether_addr_equal(p, + dev->dev_addr)) { lp->pktStats.unicast++; } lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 68f1c39184d..61cc0934286 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1380,6 +1380,7 @@ static void de_free_rings (struct de_private *de) static int de_open (struct net_device *dev) { struct de_private *de = netdev_priv(dev); + const int irq = de->pdev->irq; int rc; netif_dbg(de, ifup, dev, "enabling interface\n"); @@ -1394,10 +1395,9 @@ static int de_open (struct net_device *dev) dw32(IntrMask, 0); - rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); + rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { - netdev_err(dev, "IRQ %d request failure, err=%d\n", - dev->irq, rc); + netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc); goto err_out_free; } @@ -1413,7 +1413,7 @@ static int de_open (struct net_device *dev) return 0; err_out_free_irq: - free_irq(dev->irq, dev); + free_irq(irq, dev); err_out_free: de_free_rings(de); return rc; @@ -1434,7 +1434,7 @@ static int de_close (struct net_device *dev) netif_carrier_off(dev); spin_unlock_irqrestore(&de->lock, flags); - free_irq(dev->irq, dev); + free_irq(de->pdev->irq, dev); de_free_rings(de); de_adapter_sleep(de); @@ -1444,6 +1444,7 @@ static int de_close (struct net_device *dev) static void de_tx_timeout (struct net_device *dev) { struct de_private *de = netdev_priv(dev); + const int irq = de->pdev->irq; netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), @@ -1451,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev) del_timer_sync(&de->media_timer); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&de->lock); de_stop_hw(de); @@ -1459,12 +1460,12 @@ static void de_tx_timeout (struct net_device *dev) netif_carrier_off(dev); spin_unlock_irq(&de->lock); - enable_irq(dev->irq); + enable_irq(irq); /* Update the error counts. */ __de_get_stats(de); - synchronize_irq(dev->irq); + synchronize_irq(irq); de_clean_rings(de); de_init_rings(de); @@ -2024,8 +2025,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, goto err_out_res; } - dev->irq = pdev->irq; - /* obtain and check validity of PCI I/O address */ pciaddr = pci_resource_start(pdev, 1); if (!pciaddr) { @@ -2050,7 +2049,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, pciaddr, pci_name(pdev)); goto err_out_res; } - dev->base_addr = (unsigned long) regs; de->regs = regs; de_adapter_wake(de); @@ -2078,11 +2076,9 @@ static int __devinit de_init_one (struct pci_dev *pdev, goto err_out_iomap; /* print info about board and interface just registered */ - netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", + netdev_info(dev, "%s at %p, %pM, IRQ %d\n", de->de21040 ? "21040" : "21041", - dev->base_addr, - dev->dev_addr, - dev->irq); + regs, dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); @@ -2130,9 +2126,11 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state) rtnl_lock(); if (netif_running (dev)) { + const int irq = pdev->irq; + del_timer_sync(&de->media_timer); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&de->lock); de_stop_hw(de); @@ -2141,12 +2139,12 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state) netif_carrier_off(dev); spin_unlock_irq(&de->lock); - enable_irq(dev->irq); + enable_irq(irq); /* Update the error counts. */ __de_get_stats(de); - synchronize_irq(dev->irq); + synchronize_irq(irq); de_clean_rings(de); de_adapter_sleep(de); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 18b106cc6d2..d3cd489d11a 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1874,7 +1874,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len) } else { lp->pktStats.multicast++; } - } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { + } else if (ether_addr_equal(buf, dev->dev_addr)) { lp->pktStats.unicast++; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 1eccf494548..4d6fe604fa6 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -150,6 +150,12 @@ #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ +#define dw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define dw16(reg, val) iowrite16(val, ioaddr + (reg)) +#define dr32(reg) ioread32(ioaddr + (reg)) +#define dr16(reg) ioread16(ioaddr + (reg)) +#define dr8(reg) ioread8(ioaddr + (reg)) + #define DMFE_DBUG(dbug_now, msg, value) \ do { \ if (dmfe_debug || (dbug_now)) \ @@ -178,14 +184,6 @@ #define SROM_V41_CODE 0x14 -#define SROM_CLK_WRITE(data, ioaddr) \ - outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ - udelay(5); \ - outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ - udelay(5); \ - outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ - udelay(5); - #define __CHK_IO_SIZE(pci_id, dev_rev) \ (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ DM9102A_IO_SIZE: DM9102_IO_SIZE) @@ -213,11 +211,11 @@ struct rx_desc { struct dmfe_board_info { u32 chip_id; /* Chip vendor/Device ID */ u8 chip_revision; /* Chip revision */ - struct DEVICE *next_dev; /* next device */ + struct net_device *next_dev; /* next device */ struct pci_dev *pdev; /* PCI device */ spinlock_t lock; - long ioaddr; /* I/O base address */ + void __iomem *ioaddr; /* I/O base address */ u32 cr0_data; u32 cr5_data; u32 cr6_data; @@ -320,20 +318,20 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); static int dmfe_stop(struct DEVICE *); static void dmfe_set_filter_mode(struct DEVICE *); static const struct ethtool_ops netdev_ethtool_ops; -static u16 read_srom_word(long ,int); +static u16 read_srom_word(void __iomem *, int); static irqreturn_t dmfe_interrupt(int , void *); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_dmfe (struct net_device *dev); #endif -static void dmfe_descriptor_init(struct net_device *, unsigned long); +static void dmfe_descriptor_init(struct net_device *); static void allocate_rx_buffer(struct net_device *); -static void update_cr6(u32, unsigned long); +static void update_cr6(u32, void __iomem *); static void send_filter_frame(struct DEVICE *); static void dm9132_id_table(struct DEVICE *); -static u16 phy_read(unsigned long, u8, u8, u32); -static void phy_write(unsigned long, u8, u8, u16, u32); -static void phy_write_1bit(unsigned long, u32); -static u16 phy_read_1bit(unsigned long); +static u16 phy_read(void __iomem *, u8, u8, u32); +static void phy_write(void __iomem *, u8, u8, u16, u32); +static void phy_write_1bit(void __iomem *, u32); +static u16 phy_read_1bit(void __iomem *); static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); static void dmfe_timer(unsigned long); @@ -462,14 +460,16 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, db->buf_pool_dma_start = db->buf_pool_dma_ptr; db->chip_id = ent->driver_data; - db->ioaddr = pci_resource_start(pdev, 0); + /* IO type range. */ + db->ioaddr = pci_iomap(pdev, 0, 0); + if (!db->ioaddr) + goto err_out_free_buf; + db->chip_revision = pdev->revision; db->wol_mode = 0; db->pdev = pdev; - dev->base_addr = db->ioaddr; - dev->irq = pdev->irq; pci_set_drvdata(pdev, dev); dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; @@ -484,9 +484,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, db->chip_type = 0; /* read 64 word srom data */ - for (i = 0; i < 64; i++) + for (i = 0; i < 64; i++) { ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); + } /* Set Node address */ for (i = 0; i < 6; i++) @@ -494,16 +495,18 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, err = register_netdev (dev); if (err) - goto err_out_free_buf; + goto err_out_unmap; dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", ent->driver_data >> 16, - pci_name(pdev), dev->dev_addr, dev->irq); + pci_name(pdev), dev->dev_addr, pdev->irq); pci_set_master(pdev); return 0; +err_out_unmap: + pci_iounmap(pdev, db->ioaddr); err_out_free_buf: pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, db->buf_pool_ptr, db->buf_pool_dma_ptr); @@ -532,7 +535,7 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev) if (dev) { unregister_netdev(dev); - + pci_iounmap(db->pdev, db->ioaddr); pci_free_consistent(db->pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, db->desc_pool_ptr, db->desc_pool_dma_ptr); @@ -555,13 +558,13 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev) static int dmfe_open(struct DEVICE *dev) { - int ret; struct dmfe_board_info *db = netdev_priv(dev); + const int irq = db->pdev->irq; + int ret; DMFE_DBUG(0, "dmfe_open", 0); - ret = request_irq(dev->irq, dmfe_interrupt, - IRQF_SHARED, dev->name, dev); + ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; @@ -615,14 +618,14 @@ static int dmfe_open(struct DEVICE *dev) static void dmfe_init_dm910x(struct DEVICE *dev) { struct dmfe_board_info *db = netdev_priv(dev); - unsigned long ioaddr = db->ioaddr; + void __iomem *ioaddr = db->ioaddr; DMFE_DBUG(0, "dmfe_init_dm910x()", 0); /* Reset DM910x MAC controller */ - outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */ + dw32(DCR0, DM910X_RESET); /* RESET MAC */ udelay(100); - outl(db->cr0_data, ioaddr + DCR0); + dw32(DCR0, db->cr0_data); udelay(5); /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ @@ -633,12 +636,12 @@ static void dmfe_init_dm910x(struct DEVICE *dev) db->media_mode = dmfe_media_mode; /* RESET Phyxcer Chip by GPR port bit 7 */ - outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */ + dw32(DCR12, 0x180); /* Let bit 7 output port */ if (db->chip_id == PCI_DM9009_ID) { - outl(0x80, ioaddr + DCR12); /* Issue RESET signal */ + dw32(DCR12, 0x80); /* Issue RESET signal */ mdelay(300); /* Delay 300 ms */ } - outl(0x0, ioaddr + DCR12); /* Clear RESET signal */ + dw32(DCR12, 0x0); /* Clear RESET signal */ /* Process Phyxcer Media Mode */ if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ @@ -649,7 +652,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev) db->op_mode = db->media_mode; /* Force Mode */ /* Initialize Transmit/Receive decriptor and CR3/4 */ - dmfe_descriptor_init(dev, ioaddr); + dmfe_descriptor_init(dev); /* Init CR6 to program DM910x operation */ update_cr6(db->cr6_data, ioaddr); @@ -662,10 +665,10 @@ static void dmfe_init_dm910x(struct DEVICE *dev) /* Init CR7, interrupt active bit */ db->cr7_data = CR7_DEFAULT; - outl(db->cr7_data, ioaddr + DCR7); + dw32(DCR7, db->cr7_data); /* Init CR15, Tx jabber and Rx watchdog timer */ - outl(db->cr15_data, ioaddr + DCR15); + dw32(DCR15, db->cr15_data); /* Enable DM910X Tx/Rx function */ db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; @@ -682,6 +685,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) { struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; struct tx_desc *txptr; unsigned long flags; @@ -707,7 +711,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, } /* Disable NIC interrupt */ - outl(0, dev->base_addr + DCR7); + dw32(DCR7, 0); /* transmit this packet */ txptr = db->tx_insert_ptr; @@ -721,11 +725,11 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ - outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ + dw32(DCR1, 0x1); /* Issue Tx polling */ dev->trans_start = jiffies; /* saved time stamp */ } else { db->tx_queue_cnt++; /* queue TX packet */ - outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ + dw32(DCR1, 0x1); /* Issue Tx polling */ } /* Tx resource check */ @@ -734,7 +738,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, /* Restore CR7 to enable interrupt */ spin_unlock_irqrestore(&db->lock, flags); - outl(db->cr7_data, dev->base_addr + DCR7); + dw32(DCR7, db->cr7_data); /* free this SKB */ dev_kfree_skb(skb); @@ -751,7 +755,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, static int dmfe_stop(struct DEVICE *dev) { struct dmfe_board_info *db = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; + void __iomem *ioaddr = db->ioaddr; DMFE_DBUG(0, "dmfe_stop", 0); @@ -762,12 +766,12 @@ static int dmfe_stop(struct DEVICE *dev) del_timer_sync(&db->timer); /* Reset & stop DM910X board */ - outl(DM910X_RESET, ioaddr + DCR0); - udelay(5); - phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); + dw32(DCR0, DM910X_RESET); + udelay(100); + phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); /* free interrupt */ - free_irq(dev->irq, dev); + free_irq(db->pdev->irq, dev); /* free allocated rx buffer */ dmfe_free_rxbuffer(db); @@ -794,7 +798,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id) { struct DEVICE *dev = dev_id; struct dmfe_board_info *db = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; + void __iomem *ioaddr = db->ioaddr; unsigned long flags; DMFE_DBUG(0, "dmfe_interrupt()", 0); @@ -802,15 +806,15 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id) spin_lock_irqsave(&db->lock, flags); /* Got DM910X status */ - db->cr5_data = inl(ioaddr + DCR5); - outl(db->cr5_data, ioaddr + DCR5); + db->cr5_data = dr32(DCR5); + dw32(DCR5, db->cr5_data); if ( !(db->cr5_data & 0xc1) ) { spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } /* Disable all interrupt in CR7 to solve the interrupt edge problem */ - outl(0, ioaddr + DCR7); + dw32(DCR7, 0); /* Check system status */ if (db->cr5_data & 0x2000) { @@ -838,11 +842,11 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id) if (db->dm910x_chk_mode & 0x2) { db->dm910x_chk_mode = 0x4; db->cr6_data |= 0x100; - update_cr6(db->cr6_data, db->ioaddr); + update_cr6(db->cr6_data, ioaddr); } /* Restore CR7 to enable interrupt mask */ - outl(db->cr7_data, ioaddr + DCR7); + dw32(DCR7, db->cr7_data); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; @@ -858,11 +862,14 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id) static void poll_dmfe (struct net_device *dev) { + struct dmfe_board_info *db = netdev_priv(dev); + const int irq = db->pdev->irq; + /* disable_irq here is not very nice, but with the lockless interrupt handler we have no other choice. */ - disable_irq(dev->irq); - dmfe_interrupt (dev->irq, dev); - enable_irq(dev->irq); + disable_irq(irq); + dmfe_interrupt (irq, dev); + enable_irq(irq); } #endif @@ -873,7 +880,7 @@ static void poll_dmfe (struct net_device *dev) static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) { struct tx_desc *txptr; - unsigned long ioaddr = dev->base_addr; + void __iomem *ioaddr = db->ioaddr; u32 tdes0; txptr = db->tx_remove_ptr; @@ -897,7 +904,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) db->tx_fifo_underrun++; if ( !(db->cr6_data & CR6_SFT) ) { db->cr6_data = db->cr6_data | CR6_SFT; - update_cr6(db->cr6_data, db->ioaddr); + update_cr6(db->cr6_data, ioaddr); } } if (tdes0 & 0x0100) @@ -924,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ db->tx_queue_cnt--; - outl(0x1, ioaddr + DCR1); /* Issue Tx polling */ + dw32(DCR1, 0x1); /* Issue Tx polling */ dev->trans_start = jiffies; /* saved time stamp */ } @@ -1087,12 +1094,7 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev, strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - if (np->pdev) - strlcpy(info->bus_info, pci_name(np->pdev), - sizeof(info->bus_info)); - else - sprintf(info->bus_info, "EISA 0x%lx %d", - dev->base_addr, dev->irq); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int dmfe_ethtool_set_wol(struct net_device *dev, @@ -1132,10 +1134,11 @@ static const struct ethtool_ops netdev_ethtool_ops = { static void dmfe_timer(unsigned long data) { + struct net_device *dev = (struct net_device *)data; + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; u32 tmp_cr8; unsigned char tmp_cr12; - struct DEVICE *dev = (struct DEVICE *) data; - struct dmfe_board_info *db = netdev_priv(dev); unsigned long flags; int link_ok, link_ok_phy; @@ -1148,11 +1151,10 @@ static void dmfe_timer(unsigned long data) db->first_in_callback = 1; if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { db->cr6_data &= ~0x40000; - update_cr6(db->cr6_data, db->ioaddr); - phy_write(db->ioaddr, - db->phy_addr, 0, 0x1000, db->chip_id); + update_cr6(db->cr6_data, ioaddr); + phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); db->cr6_data |= 0x40000; - update_cr6(db->cr6_data, db->ioaddr); + update_cr6(db->cr6_data, ioaddr); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; add_timer(&db->timer); spin_unlock_irqrestore(&db->lock, flags); @@ -1167,7 +1169,7 @@ static void dmfe_timer(unsigned long data) db->dm910x_chk_mode = 0x4; /* Dynamic reset DM910X : system error or transmit time-out */ - tmp_cr8 = inl(db->ioaddr + DCR8); + tmp_cr8 = dr32(DCR8); if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { db->reset_cr8++; db->wait_reset = 1; @@ -1177,7 +1179,7 @@ static void dmfe_timer(unsigned long data) /* TX polling kick monitor */ if ( db->tx_packet_cnt && time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { - outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ + dw32(DCR1, 0x1); /* Tx polling again */ /* TX Timeout */ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { @@ -1200,9 +1202,9 @@ static void dmfe_timer(unsigned long data) /* Link status check, Dynamic media type change */ if (db->chip_id == PCI_DM9132_ID) - tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */ + tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */ else - tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ + tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */ if ( ((db->chip_id == PCI_DM9102_ID) && (db->chip_revision == 0x30)) || @@ -1251,7 +1253,7 @@ static void dmfe_timer(unsigned long data) /* 10/100M link failed, used 1M Home-Net */ db->cr6_data|=0x00040000; /* bit18=1, MII */ db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ - update_cr6(db->cr6_data, db->ioaddr); + update_cr6(db->cr6_data, ioaddr); } } else if (!netif_carrier_ok(dev)) { @@ -1288,17 +1290,18 @@ static void dmfe_timer(unsigned long data) * Re-initialize DM910X board */ -static void dmfe_dynamic_reset(struct DEVICE *dev) +static void dmfe_dynamic_reset(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); /* Sopt MAC controller */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ - update_cr6(db->cr6_data, dev->base_addr); - outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ - outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); + update_cr6(db->cr6_data, ioaddr); + dw32(DCR7, 0); /* Disable Interrupt */ + dw32(DCR5, dr32(DCR5)); /* Disable upper layer interface */ netif_stop_queue(dev); @@ -1364,9 +1367,10 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) * Using Chain structure, and allocate Tx/Rx buffer */ -static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) +static void dmfe_descriptor_init(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; struct tx_desc *tmp_tx; struct rx_desc *tmp_rx; unsigned char *tmp_buf; @@ -1379,7 +1383,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) /* tx descriptor start pointer */ db->tx_insert_ptr = db->first_tx_desc; db->tx_remove_ptr = db->first_tx_desc; - outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ + dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */ /* rx descriptor start pointer */ db->first_rx_desc = (void *)db->first_tx_desc + @@ -1389,7 +1393,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) sizeof(struct tx_desc) * TX_DESC_CNT; db->rx_insert_ptr = db->first_rx_desc; db->rx_ready_ptr = db->first_rx_desc; - outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ + dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */ /* Init Transmit chain */ tmp_buf = db->buf_pool_start; @@ -1431,14 +1435,14 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) * Firstly stop DM910X , then written value and start */ -static void update_cr6(u32 cr6_data, unsigned long ioaddr) +static void update_cr6(u32 cr6_data, void __iomem *ioaddr) { u32 cr6_tmp; cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ - outl(cr6_tmp, ioaddr + DCR6); + dw32(DCR6, cr6_tmp); udelay(5); - outl(cr6_data, ioaddr + DCR6); + dw32(DCR6, cr6_data); udelay(5); } @@ -1448,24 +1452,19 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) * This setup frame initialize DM910X address filter mode */ -static void dm9132_id_table(struct DEVICE *dev) +static void dm9132_id_table(struct net_device *dev) { + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr + 0xc0; + u16 *addrptr = (u16 *)dev->dev_addr; struct netdev_hw_addr *ha; - u16 * addrptr; - unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */ - u32 hash_val; u16 i, hash_table[4]; - DMFE_DBUG(0, "dm9132_id_table()", 0); - /* Node address */ - addrptr = (u16 *) dev->dev_addr; - outw(addrptr[0], ioaddr); - ioaddr += 4; - outw(addrptr[1], ioaddr); - ioaddr += 4; - outw(addrptr[2], ioaddr); - ioaddr += 4; + for (i = 0; i < 3; i++) { + dw16(0, addrptr[i]); + ioaddr += 4; + } /* Clear Hash Table */ memset(hash_table, 0, sizeof(hash_table)); @@ -1475,13 +1474,14 @@ static void dm9132_id_table(struct DEVICE *dev) /* the multicast address in Hash Table : 64 bits */ netdev_for_each_mc_addr(ha, dev) { - hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; + u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f; + hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } /* Write the hash table to MAC MD table */ for (i = 0; i < 4; i++, ioaddr += 4) - outw(hash_table[i], ioaddr); + dw16(0, hash_table[i]); } @@ -1490,7 +1490,7 @@ static void dm9132_id_table(struct DEVICE *dev) * This setup frame initialize DM910X address filter mode */ -static void send_filter_frame(struct DEVICE *dev) +static void send_filter_frame(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); struct netdev_hw_addr *ha; @@ -1535,12 +1535,14 @@ static void send_filter_frame(struct DEVICE *dev) /* Resource Check and Send the setup packet */ if (!db->tx_packet_cnt) { + void __iomem *ioaddr = db->ioaddr; + /* Resource Empty */ db->tx_packet_cnt++; txptr->tdes0 = cpu_to_le32(0x80000000); - update_cr6(db->cr6_data | 0x2000, dev->base_addr); - outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ - update_cr6(db->cr6_data, dev->base_addr); + update_cr6(db->cr6_data | 0x2000, ioaddr); + dw32(DCR1, 0x1); /* Issue Tx polling */ + update_cr6(db->cr6_data, ioaddr); dev->trans_start = jiffies; } else db->tx_queue_cnt++; /* Put in TX queue */ @@ -1575,43 +1577,59 @@ static void allocate_rx_buffer(struct net_device *dev) db->rx_insert_ptr = rxptr; } +static void srom_clk_write(void __iomem *ioaddr, u32 data) +{ + static const u32 cmd[] = { + CR9_SROM_READ | CR9_SRCS, + CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, + CR9_SROM_READ | CR9_SRCS + }; + int i; + + for (i = 0; i < ARRAY_SIZE(cmd); i++) { + dw32(DCR9, data | cmd[i]); + udelay(5); + } +} /* * Read one word data from the serial ROM */ - -static u16 read_srom_word(long ioaddr, int offset) +static u16 read_srom_word(void __iomem *ioaddr, int offset) { + u16 srom_data; int i; - u16 srom_data = 0; - long cr9_ioaddr = ioaddr + DCR9; - outl(CR9_SROM_READ, cr9_ioaddr); - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + dw32(DCR9, CR9_SROM_READ); + udelay(5); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); /* Send the Read Command 110b */ - SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); - SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); - SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); + srom_clk_write(ioaddr, SROM_DATA_1); + srom_clk_write(ioaddr, SROM_DATA_1); + srom_clk_write(ioaddr, SROM_DATA_0); /* Send the offset */ for (i = 5; i >= 0; i--) { srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; - SROM_CLK_WRITE(srom_data, cr9_ioaddr); + srom_clk_write(ioaddr, srom_data); } - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); for (i = 16; i > 0; i--) { - outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); udelay(5); srom_data = (srom_data << 1) | - ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); udelay(5); } - outl(CR9_SROM_READ, cr9_ioaddr); + dw32(DCR9, CR9_SROM_READ); + udelay(5); return srom_data; } @@ -1620,13 +1638,14 @@ static u16 read_srom_word(long ioaddr, int offset) * Auto sense the media mode */ -static u8 dmfe_sense_speed(struct dmfe_board_info * db) +static u8 dmfe_sense_speed(struct dmfe_board_info *db) { + void __iomem *ioaddr = db->ioaddr; u8 ErrFlag = 0; u16 phy_mode; /* CR6 bit18=0, select 10/100M */ - update_cr6( (db->cr6_data & ~0x40000), db->ioaddr); + update_cr6(db->cr6_data & ~0x40000, ioaddr); phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); @@ -1665,11 +1684,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db) static void dmfe_set_phyxcer(struct dmfe_board_info *db) { + void __iomem *ioaddr = db->ioaddr; u16 phy_reg; /* Select 10/100M phyxcer */ db->cr6_data &= ~0x40000; - update_cr6(db->cr6_data, db->ioaddr); + update_cr6(db->cr6_data, ioaddr); /* DM9009 Chip: Phyxcer reg18 bit12=0 */ if (db->chip_id == PCI_DM9009_ID) { @@ -1765,18 +1785,15 @@ static void dmfe_process_mode(struct dmfe_board_info *db) * Write a word to Phy register */ -static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, +static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) { u16 i; - unsigned long ioaddr; if (chip_id == PCI_DM9132_ID) { - ioaddr = iobase + 0x80 + offset * 4; - outw(phy_data, ioaddr); + dw16(0x80 + offset * 4, phy_data); } else { /* DM9102/DM9102A Chip */ - ioaddr = iobase + DCR9; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) @@ -1816,19 +1833,16 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, * Read a word data from phy register */ -static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) +static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) { int i; u16 phy_data; - unsigned long ioaddr; if (chip_id == PCI_DM9132_ID) { /* DM9132 Chip */ - ioaddr = iobase + 0x80 + offset * 4; - phy_data = inw(ioaddr); + phy_data = dr16(0x80 + offset * 4); } else { /* DM9102/DM9102A Chip */ - ioaddr = iobase + DCR9; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) @@ -1870,13 +1884,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) * Write one bit data to Phy Controller */ -static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) +static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) { - outl(phy_data, ioaddr); /* MII Clock Low */ + dw32(DCR9, phy_data); /* MII Clock Low */ udelay(1); - outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ + dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */ udelay(1); - outl(phy_data, ioaddr); /* MII Clock Low */ + dw32(DCR9, phy_data); /* MII Clock Low */ udelay(1); } @@ -1885,14 +1899,14 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) * Read one bit phy data from PHY controller */ -static u16 phy_read_1bit(unsigned long ioaddr) +static u16 phy_read_1bit(void __iomem *ioaddr) { u16 phy_data; - outl(0x50000, ioaddr); + dw32(DCR9, 0x50000); udelay(1); - phy_data = ( inl(ioaddr) >> 19 ) & 0x1; - outl(0x40000, ioaddr); + phy_data = (dr32(DCR9) >> 19) & 0x1; + dw32(DCR9, 0x40000); udelay(1); return phy_data; @@ -1978,7 +1992,7 @@ static void dmfe_parse_srom(struct dmfe_board_info * db) /* Check DM9801 or DM9802 present or not */ db->HPNA_present = 0; - update_cr6(db->cr6_data|0x40000, db->ioaddr); + update_cr6(db->cr6_data | 0x40000, db->ioaddr); tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { /* DM9801 or DM9802 present */ @@ -2095,6 +2109,7 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pci_dev); struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; u32 tmp; /* Disable upper layer interface */ @@ -2102,11 +2117,11 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) /* Disable Tx/Rx */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); - update_cr6(db->cr6_data, dev->base_addr); + update_cr6(db->cr6_data, ioaddr); /* Disable Interrupt */ - outl(0, dev->base_addr + DCR7); - outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5); + dw32(DCR7, 0); + dw32(DCR5, dr32(DCR5)); /* Fre RX buffers */ dmfe_free_rxbuffer(db); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index fea3641d939..c4f37aca226 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -328,7 +328,7 @@ static void tulip_up(struct net_device *dev) udelay(100); if (tulip_debug > 1) - netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq); + netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq); iowrite32(tp->rx_ring_dma, ioaddr + CSR3); iowrite32(tp->tx_ring_dma, ioaddr + CSR4); @@ -515,11 +515,13 @@ media_picked: static int tulip_open(struct net_device *dev) { + struct tulip_private *tp = netdev_priv(dev); int retval; tulip_init_ring (dev); - retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); + retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED, + dev->name, dev); if (retval) goto free_ring; @@ -841,7 +843,7 @@ static int tulip_close (struct net_device *dev) netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", ioread32 (ioaddr + CSR5)); - free_irq (dev->irq, dev); + free_irq (tp->pdev->irq, dev); tulip_free_ring (dev); @@ -1489,8 +1491,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); - dev->base_addr = (unsigned long)ioaddr; - #ifdef CONFIG_TULIP_MWI if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) tulip_mwi_config (pdev, dev); @@ -1650,7 +1650,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, for (i = 0; i < 6; i++) last_phys_addr[i] = dev->dev_addr[i]; last_irq = irq; - dev->irq = irq; /* The lower four bits are the media type. */ if (board_idx >= 0 && board_idx < MAX_UNITS) { @@ -1858,7 +1857,8 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) tulip_down(dev); netif_device_detach(dev); - free_irq(dev->irq, dev); + /* FIXME: it needlessly adds an error path. */ + free_irq(tp->pdev->irq, dev); save_state: pci_save_state(pdev); @@ -1900,7 +1900,9 @@ static int tulip_resume(struct pci_dev *pdev) return retval; } - if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { + retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED, + dev->name, dev); + if (retval) { pr_err("request_irq failed in resume\n"); return retval; } @@ -1960,11 +1962,14 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev) static void poll_tulip (struct net_device *dev) { + struct tulip_private *tp = netdev_priv(dev); + const int irq = tp->pdev->irq; + /* disable_irq here is not very nice, but with the lockless interrupt handler we have no other choice. */ - disable_irq(dev->irq); - tulip_interrupt (dev->irq, dev); - enable_irq(dev->irq); + disable_irq(irq); + tulip_interrupt (irq, dev); + enable_irq(irq); } #endif diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index fc4001f6a5e..75d45f8a37d 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -42,6 +42,8 @@ #include <asm/dma.h> #include <asm/uaccess.h> +#define uw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define ur32(reg) ioread32(ioaddr + (reg)) /* Board/System/Debug information/definition ---------------- */ #define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ @@ -110,14 +112,6 @@ do { \ #define SROM_V41_CODE 0x14 -#define SROM_CLK_WRITE(data, ioaddr) \ - outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ - udelay(5); \ - outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ - udelay(5); \ - outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ - udelay(5); - /* Structure/enum declaration ------------------------------- */ struct tx_desc { __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ @@ -132,12 +126,15 @@ struct rx_desc { } __attribute__(( aligned(32) )); struct uli526x_board_info { - u32 chip_id; /* Chip vendor/Device ID */ + struct uli_phy_ops { + void (*write)(struct uli526x_board_info *, u8, u8, u16); + u16 (*read)(struct uli526x_board_info *, u8, u8); + } phy; struct net_device *next_dev; /* next device */ struct pci_dev *pdev; /* PCI device */ spinlock_t lock; - long ioaddr; /* I/O base address */ + void __iomem *ioaddr; /* I/O base address */ u32 cr0_data; u32 cr5_data; u32 cr6_data; @@ -227,21 +224,21 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *, static int uli526x_stop(struct net_device *); static void uli526x_set_filter_mode(struct net_device *); static const struct ethtool_ops netdev_ethtool_ops; -static u16 read_srom_word(long, int); +static u16 read_srom_word(struct uli526x_board_info *, int); static irqreturn_t uli526x_interrupt(int, void *); #ifdef CONFIG_NET_POLL_CONTROLLER static void uli526x_poll(struct net_device *dev); #endif -static void uli526x_descriptor_init(struct net_device *, unsigned long); +static void uli526x_descriptor_init(struct net_device *, void __iomem *); static void allocate_rx_buffer(struct net_device *); -static void update_cr6(u32, unsigned long); +static void update_cr6(u32, void __iomem *); static void send_filter_frame(struct net_device *, int); -static u16 phy_read(unsigned long, u8, u8, u32); -static u16 phy_readby_cr10(unsigned long, u8, u8); -static void phy_write(unsigned long, u8, u8, u16, u32); -static void phy_writeby_cr10(unsigned long, u8, u8, u16); -static void phy_write_1bit(unsigned long, u32, u32); -static u16 phy_read_1bit(unsigned long, u32); +static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8); +static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8); +static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16); +static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16); +static void phy_write_1bit(struct uli526x_board_info *db, u32); +static u16 phy_read_1bit(struct uli526x_board_info *db); static u8 uli526x_sense_speed(struct uli526x_board_info *); static void uli526x_process_mode(struct uli526x_board_info *); static void uli526x_timer(unsigned long); @@ -253,6 +250,18 @@ static void uli526x_free_rxbuffer(struct uli526x_board_info *); static void uli526x_init(struct net_device *); static void uli526x_set_phyxcer(struct uli526x_board_info *); +static void srom_clk_write(struct uli526x_board_info *db, u32 data) +{ + void __iomem *ioaddr = db->ioaddr; + + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS); + udelay(5); + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); + udelay(5); + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS); + udelay(5); +} + /* ULI526X network board routine ---------------------------- */ static const struct net_device_ops netdev_ops = { @@ -277,6 +286,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, { struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; + void __iomem *ioaddr; int i, err; ULI526X_DBUG(0, "uli526x_init_one()", 0); @@ -313,9 +323,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, goto err_out_disable; } - if (pci_request_regions(pdev, DRV_NAME)) { + err = pci_request_regions(pdev, DRV_NAME); + if (err < 0) { pr_err("Failed to request PCI regions\n"); - err = -ENODEV; goto err_out_disable; } @@ -323,32 +333,41 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, db = netdev_priv(dev); /* Allocate Tx/Rx descriptor memory */ + err = -ENOMEM; + db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); - if(db->desc_pool_ptr == NULL) - { - err = -ENOMEM; - goto err_out_nomem; - } + if (!db->desc_pool_ptr) + goto err_out_release; + db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); - if(db->buf_pool_ptr == NULL) - { - err = -ENOMEM; - goto err_out_nomem; - } + if (!db->buf_pool_ptr) + goto err_out_free_tx_desc; db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; db->first_tx_desc_dma = db->desc_pool_dma_ptr; db->buf_pool_start = db->buf_pool_ptr; db->buf_pool_dma_start = db->buf_pool_dma_ptr; - db->chip_id = ent->driver_data; - db->ioaddr = pci_resource_start(pdev, 0); + switch (ent->driver_data) { + case PCI_ULI5263_ID: + db->phy.write = phy_writeby_cr10; + db->phy.read = phy_readby_cr10; + break; + default: + db->phy.write = phy_writeby_cr9; + db->phy.read = phy_readby_cr9; + break; + } + + /* IO region. */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) + goto err_out_free_tx_buf; + db->ioaddr = ioaddr; db->pdev = pdev; db->init = 1; - dev->base_addr = db->ioaddr; - dev->irq = pdev->irq; pci_set_drvdata(pdev, dev); /* Register some necessary functions */ @@ -360,24 +379,24 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, /* read 64 word srom data */ for (i = 0; i < 64; i++) - ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); + ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i)); /* Set Node address */ if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ { - outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode - outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port - outl(0, db->ioaddr + DCR14); //Clear reset port - outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer - outl(0, db->ioaddr + DCR14); //Clear reset port - outl(0, db->ioaddr + DCR13); //Clear CR13 - outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port + uw32(DCR0, 0x10000); //Diagnosis mode + uw32(DCR13, 0x1c0); //Reset dianostic pointer port + uw32(DCR14, 0); //Clear reset port + uw32(DCR14, 0x10); //Reset ID Table pointer + uw32(DCR14, 0); //Clear reset port + uw32(DCR13, 0); //Clear CR13 + uw32(DCR13, 0x1b0); //Select ID Table access port //Read MAC address from CR14 for (i = 0; i < 6; i++) - dev->dev_addr[i] = inl(db->ioaddr + DCR14); + dev->dev_addr[i] = ur32(DCR14); //Read end - outl(0, db->ioaddr + DCR13); //Clear CR13 - outl(0, db->ioaddr + DCR0); //Clear CR0 + uw32(DCR13, 0); //Clear CR13 + uw32(DCR0, 0); //Clear CR0 udelay(10); } else /*Exist SROM*/ @@ -387,26 +406,26 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, } err = register_netdev (dev); if (err) - goto err_out_res; + goto err_out_unmap; netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", ent->driver_data >> 16, pci_name(pdev), - dev->dev_addr, dev->irq); + dev->dev_addr, pdev->irq); pci_set_master(pdev); return 0; -err_out_res: +err_out_unmap: + pci_iounmap(pdev, db->ioaddr); +err_out_free_tx_buf: + pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, + db->buf_pool_ptr, db->buf_pool_dma_ptr); +err_out_free_tx_desc: + pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, + db->desc_pool_ptr, db->desc_pool_dma_ptr); +err_out_release: pci_release_regions(pdev); -err_out_nomem: - if(db->desc_pool_ptr) - pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, - db->desc_pool_ptr, db->desc_pool_dma_ptr); - - if(db->buf_pool_ptr != NULL) - pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, - db->buf_pool_ptr, db->buf_pool_dma_ptr); err_out_disable: pci_disable_device(pdev); err_out_free: @@ -422,19 +441,17 @@ static void __devexit uli526x_remove_one (struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct uli526x_board_info *db = netdev_priv(dev); - ULI526X_DBUG(0, "uli526x_remove_one()", 0); - + unregister_netdev(dev); + pci_iounmap(pdev, db->ioaddr); pci_free_consistent(db->pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, db->desc_pool_ptr, db->desc_pool_dma_ptr); pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, db->buf_pool_ptr, db->buf_pool_dma_ptr); - unregister_netdev(dev); pci_release_regions(pdev); - free_netdev(dev); /* free board information */ - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); - ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); } @@ -468,7 +485,8 @@ static int uli526x_open(struct net_device *dev) /* Initialize ULI526X board */ uli526x_init(dev); - ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); + ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED, + dev->name, dev); if (ret) return ret; @@ -496,57 +514,57 @@ static int uli526x_open(struct net_device *dev) static void uli526x_init(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); - unsigned long ioaddr = db->ioaddr; + struct uli_phy_ops *phy = &db->phy; + void __iomem *ioaddr = db->ioaddr; u8 phy_tmp; u8 timeout; - u16 phy_value; u16 phy_reg_reset; ULI526X_DBUG(0, "uli526x_init()", 0); /* Reset M526x MAC controller */ - outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ + uw32(DCR0, ULI526X_RESET); /* RESET MAC */ udelay(100); - outl(db->cr0_data, ioaddr + DCR0); + uw32(DCR0, db->cr0_data); udelay(5); /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ db->phy_addr = 1; - for(phy_tmp=0;phy_tmp<32;phy_tmp++) - { - phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add - if(phy_value != 0xffff&&phy_value!=0) - { + for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) { + u16 phy_value; + + phy_value = phy->read(db, phy_tmp, 3); //peer add + if (phy_value != 0xffff && phy_value != 0) { db->phy_addr = phy_tmp; break; } } - if(phy_tmp == 32) + + if (phy_tmp == 32) pr_warn("Can not find the phy address!!!\n"); /* Parser SROM and media mode */ db->media_mode = uli526x_media_mode; /* phyxcer capability setting */ - phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); + phy_reg_reset = phy->read(db, db->phy_addr, 0); phy_reg_reset = (phy_reg_reset | 0x8000); - phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); + phy->write(db, db->phy_addr, 0, phy_reg_reset); /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management * functions") or phy data sheet for details on phy reset */ udelay(500); timeout = 10; - while (timeout-- && - phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) - udelay(100); + while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000) + udelay(100); /* Process Phyxcer Media Mode */ uli526x_set_phyxcer(db); /* Media Mode Process */ if ( !(db->media_mode & ULI526X_AUTO) ) - db->op_mode = db->media_mode; /* Force Mode */ + db->op_mode = db->media_mode; /* Force Mode */ /* Initialize Transmit/Receive decriptor and CR3/4 */ uli526x_descriptor_init(dev, ioaddr); @@ -559,10 +577,10 @@ static void uli526x_init(struct net_device *dev) /* Init CR7, interrupt active bit */ db->cr7_data = CR7_DEFAULT; - outl(db->cr7_data, ioaddr + DCR7); + uw32(DCR7, db->cr7_data); /* Init CR15, Tx jabber and Rx watchdog timer */ - outl(db->cr15_data, ioaddr + DCR15); + uw32(DCR15, db->cr15_data); /* Enable ULI526X Tx/Rx function */ db->cr6_data |= CR6_RXSC | CR6_TXSC; @@ -579,6 +597,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; struct tx_desc *txptr; unsigned long flags; @@ -604,7 +623,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, } /* Disable NIC interrupt */ - outl(0, dev->base_addr + DCR7); + uw32(DCR7, 0); /* transmit this packet */ txptr = db->tx_insert_ptr; @@ -615,10 +634,10 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, db->tx_insert_ptr = txptr->next_tx_desc; /* Transmit Packet Process */ - if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { + if (db->tx_packet_cnt < TX_DESC_CNT) { txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ - outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ + uw32(DCR1, 0x1); /* Issue Tx polling */ dev->trans_start = jiffies; /* saved time stamp */ } @@ -628,7 +647,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, /* Restore CR7 to enable interrupt */ spin_unlock_irqrestore(&db->lock, flags); - outl(db->cr7_data, dev->base_addr + DCR7); + uw32(DCR7, db->cr7_data); /* free this SKB */ dev_kfree_skb(skb); @@ -645,9 +664,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, static int uli526x_stop(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - - ULI526X_DBUG(0, "uli526x_stop", 0); + void __iomem *ioaddr = db->ioaddr; /* disable system */ netif_stop_queue(dev); @@ -656,12 +673,12 @@ static int uli526x_stop(struct net_device *dev) del_timer_sync(&db->timer); /* Reset & stop ULI526X board */ - outl(ULI526X_RESET, ioaddr + DCR0); + uw32(DCR0, ULI526X_RESET); udelay(5); - phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); + db->phy.write(db, db->phy_addr, 0, 0x8000); /* free interrupt */ - free_irq(dev->irq, dev); + free_irq(db->pdev->irq, dev); /* free allocated rx buffer */ uli526x_free_rxbuffer(db); @@ -679,18 +696,18 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct uli526x_board_info *db = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; + void __iomem *ioaddr = db->ioaddr; unsigned long flags; spin_lock_irqsave(&db->lock, flags); - outl(0, ioaddr + DCR7); + uw32(DCR7, 0); /* Got ULI526X status */ - db->cr5_data = inl(ioaddr + DCR5); - outl(db->cr5_data, ioaddr + DCR5); + db->cr5_data = ur32(DCR5); + uw32(DCR5, db->cr5_data); if ( !(db->cr5_data & 0x180c1) ) { /* Restore CR7 to enable interrupt mask */ - outl(db->cr7_data, ioaddr + DCR7); + uw32(DCR7, db->cr7_data); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } @@ -718,7 +735,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) uli526x_free_tx_pkt(dev, db); /* Restore CR7 to enable interrupt mask */ - outl(db->cr7_data, ioaddr + DCR7); + uw32(DCR7, db->cr7_data); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; @@ -727,8 +744,10 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) #ifdef CONFIG_NET_POLL_CONTROLLER static void uli526x_poll(struct net_device *dev) { + struct uli526x_board_info *db = netdev_priv(dev); + /* ISR grabs the irqsave lock, so this should be safe */ - uli526x_interrupt(dev->irq, dev); + uli526x_interrupt(db->pdev->irq, dev); } #endif @@ -962,12 +981,7 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - if (np->pdev) - strlcpy(info->bus_info, pci_name(np->pdev), - sizeof(info->bus_info)); - else - sprintf(info->bus_info, "EISA 0x%lx %d", - dev->base_addr, dev->irq); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1007,18 +1021,20 @@ static const struct ethtool_ops netdev_ethtool_ops = { static void uli526x_timer(unsigned long data) { - u32 tmp_cr8; - unsigned char tmp_cr12=0; struct net_device *dev = (struct net_device *) data; struct uli526x_board_info *db = netdev_priv(dev); + struct uli_phy_ops *phy = &db->phy; + void __iomem *ioaddr = db->ioaddr; unsigned long flags; + u8 tmp_cr12 = 0; + u32 tmp_cr8; //ULI526X_DBUG(0, "uli526x_timer()", 0); spin_lock_irqsave(&db->lock, flags); /* Dynamic reset ULI526X : system error or transmit time-out */ - tmp_cr8 = inl(db->ioaddr + DCR8); + tmp_cr8 = ur32(DCR8); if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { db->reset_cr8++; db->wait_reset = 1; @@ -1028,7 +1044,7 @@ static void uli526x_timer(unsigned long data) /* TX polling kick monitor */ if ( db->tx_packet_cnt && time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { - outl(0x1, dev->base_addr + DCR1); // Tx polling again + uw32(DCR1, 0x1); // Tx polling again // TX Timeout if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { @@ -1049,7 +1065,7 @@ static void uli526x_timer(unsigned long data) } /* Link status check, Dynamic media type change */ - if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) + if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0) tmp_cr12 = 3; if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { @@ -1062,7 +1078,7 @@ static void uli526x_timer(unsigned long data) /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ /* AUTO don't need */ if ( !(db->media_mode & 0x8) ) - phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); + phy->write(db, db->phy_addr, 0, 0x1000); /* AUTO mode, if INT phyxcer link failed, select EXT device */ if (db->media_mode & ULI526X_AUTO) { @@ -1119,12 +1135,13 @@ static void uli526x_timer(unsigned long data) static void uli526x_reset_prepare(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; /* Sopt MAC controller */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ - update_cr6(db->cr6_data, dev->base_addr); - outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ - outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); + update_cr6(db->cr6_data, ioaddr); + uw32(DCR7, 0); /* Disable Interrupt */ + uw32(DCR5, ur32(DCR5)); /* Disable upper layer interface */ netif_stop_queue(dev); @@ -1289,7 +1306,7 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk * Using Chain structure, and allocate Tx/Rx buffer */ -static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr) +static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr) { struct uli526x_board_info *db = netdev_priv(dev); struct tx_desc *tmp_tx; @@ -1304,14 +1321,14 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr /* tx descriptor start pointer */ db->tx_insert_ptr = db->first_tx_desc; db->tx_remove_ptr = db->first_tx_desc; - outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ + uw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */ /* rx descriptor start pointer */ db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; db->rx_insert_ptr = db->first_rx_desc; db->rx_ready_ptr = db->first_rx_desc; - outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ + uw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */ /* Init Transmit chain */ tmp_buf = db->buf_pool_start; @@ -1352,11 +1369,9 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr * Update CR6 value * Firstly stop ULI526X, then written value and start */ - -static void update_cr6(u32 cr6_data, unsigned long ioaddr) +static void update_cr6(u32 cr6_data, void __iomem *ioaddr) { - - outl(cr6_data, ioaddr + DCR6); + uw32(DCR6, cr6_data); udelay(5); } @@ -1375,6 +1390,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; @@ -1420,9 +1436,9 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) /* Resource Empty */ db->tx_packet_cnt++; txptr->tdes0 = cpu_to_le32(0x80000000); - update_cr6(db->cr6_data | 0x2000, dev->base_addr); - outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ - update_cr6(db->cr6_data, dev->base_addr); + update_cr6(db->cr6_data | 0x2000, ioaddr); + uw32(DCR1, 0x1); /* Issue Tx polling */ + update_cr6(db->cr6_data, ioaddr); dev->trans_start = jiffies; } else netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); @@ -1465,37 +1481,38 @@ static void allocate_rx_buffer(struct net_device *dev) * Read one word data from the serial ROM */ -static u16 read_srom_word(long ioaddr, int offset) +static u16 read_srom_word(struct uli526x_board_info *db, int offset) { - int i; + void __iomem *ioaddr = db->ioaddr; u16 srom_data = 0; - long cr9_ioaddr = ioaddr + DCR9; + int i; - outl(CR9_SROM_READ, cr9_ioaddr); - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + uw32(DCR9, CR9_SROM_READ); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); /* Send the Read Command 110b */ - SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); - SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); - SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); + srom_clk_write(db, SROM_DATA_1); + srom_clk_write(db, SROM_DATA_1); + srom_clk_write(db, SROM_DATA_0); /* Send the offset */ for (i = 5; i >= 0; i--) { srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; - SROM_CLK_WRITE(srom_data, cr9_ioaddr); + srom_clk_write(db, srom_data); } - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); for (i = 16; i > 0; i--) { - outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); udelay(5); - srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); - outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); + srom_data = (srom_data << 1) | + ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); udelay(5); } - outl(CR9_SROM_READ, cr9_ioaddr); + uw32(DCR9, CR9_SROM_READ); return srom_data; } @@ -1506,15 +1523,16 @@ static u16 read_srom_word(long ioaddr, int offset) static u8 uli526x_sense_speed(struct uli526x_board_info * db) { + struct uli_phy_ops *phy = &db->phy; u8 ErrFlag = 0; u16 phy_mode; - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = phy->read(db, db->phy_addr, 1); + phy_mode = phy->read(db, db->phy_addr, 1); if ( (phy_mode & 0x24) == 0x24 ) { - phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); + phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7); if(phy_mode&0x8000) phy_mode = 0x8000; else if(phy_mode&0x4000) @@ -1549,10 +1567,11 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db) static void uli526x_set_phyxcer(struct uli526x_board_info *db) { + struct uli_phy_ops *phy = &db->phy; u16 phy_reg; /* Phyxcer capability setting */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; + phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0; if (db->media_mode & ULI526X_AUTO) { /* AUTO Mode */ @@ -1573,10 +1592,10 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db) phy_reg|=db->PHY_reg4; db->media_mode|=ULI526X_AUTO; } - phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); + phy->write(db, db->phy_addr, 4, phy_reg); /* Restart Auto-Negotiation */ - phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); + phy->write(db, db->phy_addr, 0, 0x1200); udelay(50); } @@ -1590,6 +1609,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db) static void uli526x_process_mode(struct uli526x_board_info *db) { + struct uli_phy_ops *phy = &db->phy; u16 phy_reg; /* Full Duplex Mode Check */ @@ -1601,10 +1621,10 @@ static void uli526x_process_mode(struct uli526x_board_info *db) update_cr6(db->cr6_data, db->ioaddr); /* 10/100M phyxcer force mode need */ - if ( !(db->media_mode & 0x8)) { + if (!(db->media_mode & 0x8)) { /* Forece Mode */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); - if ( !(phy_reg & 0x1) ) { + phy_reg = phy->read(db, db->phy_addr, 6); + if (!(phy_reg & 0x1)) { /* parter without N-Way capability */ phy_reg = 0x0; switch(db->op_mode) { @@ -1613,148 +1633,126 @@ static void uli526x_process_mode(struct uli526x_board_info *db) case ULI526X_100MHF: phy_reg = 0x2000; break; case ULI526X_100MFD: phy_reg = 0x2100; break; } - phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); + phy->write(db, db->phy_addr, 0, phy_reg); } } } -/* - * Write a word to Phy register - */ - -static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) +/* M5261/M5263 Chip */ +static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr, + u8 offset, u16 phy_data) { u16 i; - unsigned long ioaddr; - - if(chip_id == PCI_ULI5263_ID) - { - phy_writeby_cr10(iobase, phy_addr, offset, phy_data); - return; - } - /* M5261/M5263 Chip */ - ioaddr = iobase + DCR9; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); + phy_write_1bit(db, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); /* Send write command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); + phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); + phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0); /* written trasnition */ - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); - phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); + phy_write_1bit(db, PHY_DATA_1); + phy_write_1bit(db, PHY_DATA_0); /* Write a word data to PHY controller */ - for ( i = 0x8000; i > 0; i >>= 1) - phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); - + for (i = 0x8000; i > 0; i >>= 1) + phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); } - -/* - * Read a word data from phy register - */ - -static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) +static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset) { - int i; u16 phy_data; - unsigned long ioaddr; - - if(chip_id == PCI_ULI5263_ID) - return phy_readby_cr10(iobase, phy_addr, offset); - /* M5261/M5263 Chip */ - ioaddr = iobase + DCR9; + int i; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); + phy_write_1bit(db, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); /* Send read command(10) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); - phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); + phy_write_1bit(db, PHY_DATA_1); + phy_write_1bit(db, PHY_DATA_0); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); + phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); + phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0); /* Skip transition state */ - phy_read_1bit(ioaddr, chip_id); + phy_read_1bit(db); /* read 16bit data */ for (phy_data = 0, i = 0; i < 16; i++) { phy_data <<= 1; - phy_data |= phy_read_1bit(ioaddr, chip_id); + phy_data |= phy_read_1bit(db); } return phy_data; } -static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) +static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr, + u8 offset) { - unsigned long ioaddr,cr10_value; + void __iomem *ioaddr = db->ioaddr; + u32 cr10_value = phy_addr; - ioaddr = iobase + DCR10; - cr10_value = phy_addr; - cr10_value = (cr10_value<<5) + offset; - cr10_value = (cr10_value<<16) + 0x08000000; - outl(cr10_value,ioaddr); + cr10_value = (cr10_value << 5) + offset; + cr10_value = (cr10_value << 16) + 0x08000000; + uw32(DCR10, cr10_value); udelay(1); - while(1) - { - cr10_value = inl(ioaddr); - if(cr10_value&0x10000000) + while (1) { + cr10_value = ur32(DCR10); + if (cr10_value & 0x10000000) break; } return cr10_value & 0x0ffff; } -static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) +static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr, + u8 offset, u16 phy_data) { - unsigned long ioaddr,cr10_value; + void __iomem *ioaddr = db->ioaddr; + u32 cr10_value = phy_addr; - ioaddr = iobase + DCR10; - cr10_value = phy_addr; - cr10_value = (cr10_value<<5) + offset; - cr10_value = (cr10_value<<16) + 0x04000000 + phy_data; - outl(cr10_value,ioaddr); + cr10_value = (cr10_value << 5) + offset; + cr10_value = (cr10_value << 16) + 0x04000000 + phy_data; + uw32(DCR10, cr10_value); udelay(1); } /* * Write one bit data to Phy Controller */ -static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) +static void phy_write_1bit(struct uli526x_board_info *db, u32 data) { - outl(phy_data , ioaddr); /* MII Clock Low */ + void __iomem *ioaddr = db->ioaddr; + + uw32(DCR9, data); /* MII Clock Low */ udelay(1); - outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ + uw32(DCR9, data | MDCLKH); /* MII Clock High */ udelay(1); - outl(phy_data , ioaddr); /* MII Clock Low */ + uw32(DCR9, data); /* MII Clock Low */ udelay(1); } @@ -1763,14 +1761,15 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) * Read one bit phy data from PHY controller */ -static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) +static u16 phy_read_1bit(struct uli526x_board_info *db) { + void __iomem *ioaddr = db->ioaddr; u16 phy_data; - outl(0x50000 , ioaddr); + uw32(DCR9, 0x50000); udelay(1); - phy_data = ( inl(ioaddr) >> 19 ) & 0x1; - outl(0x40000 , ioaddr); + phy_data = (ur32(DCR9) >> 19) & 0x1; + uw32(DCR9, 0x40000); udelay(1); return phy_data; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 2ac6fff0363..4d1ffca83c8 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -400,9 +400,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, No hold time required! */ iowrite32(0x00000001, ioaddr + PCIBusCfg); - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; - np = netdev_priv(dev); np->pci_dev = pdev; np->chip_id = chip_idx; @@ -635,17 +632,18 @@ static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; + const int irq = np->pci_dev->irq; int i; iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ netif_device_detach(dev); - i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) goto out_err; if (debug > 1) - netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); + netdev_dbg(dev, "w89c840_open() irq %d\n", irq); if((i=alloc_ringdesc(dev))) goto out_err; @@ -932,6 +930,7 @@ static void tx_timeout(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; + const int irq = np->pci_dev->irq; dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", ioread32(ioaddr + IntrStatus)); @@ -951,7 +950,7 @@ static void tx_timeout(struct net_device *dev) np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); /* * Under high load dirty_tx and the internal tx descriptor pointer @@ -966,7 +965,7 @@ static void tx_timeout(struct net_device *dev) init_rxtx_rings(dev); init_registers(dev); spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); netif_wake_queue(dev); dev->trans_start = jiffies; /* prevent tx timeout */ @@ -1500,7 +1499,7 @@ static int netdev_close(struct net_device *dev) iowrite32(0x0000, ioaddr + IntrEnable); spin_unlock_irq(&np->lock); - free_irq(dev->irq, dev); + free_irq(np->pci_dev->irq, dev); wmb(); netif_device_attach(dev); @@ -1589,7 +1588,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) iowrite32(0, ioaddr + IntrEnable); spin_unlock_irq(&np->lock); - synchronize_irq(dev->irq); + synchronize_irq(np->pci_dev->irq); netif_tx_disable(dev); np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index fdb329fe6e8..138bf83bc98 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -41,7 +41,9 @@ MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); MODULE_LICENSE("GPL"); - +#define xw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define xr32(reg) ioread32(ioaddr + (reg)) +#define xr8(reg) ioread8(ioaddr + (reg)) /* IO registers on the card, offsets */ #define CSR0 0x00 @@ -83,7 +85,7 @@ struct xircom_private { struct sk_buff *tx_skb[4]; - unsigned long io_port; + void __iomem *ioaddr; int open; /* transmit_used is the rotating counter that indicates which transmit @@ -137,7 +139,7 @@ static int link_status(struct xircom_private *card); static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { - {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, + { PCI_VDEVICE(XIRCOM, 0x0003), }, {0,}, }; MODULE_DEVICE_TABLE(pci, xircom_pci_table); @@ -146,9 +148,7 @@ static struct pci_driver xircom_ops = { .name = "xircom_cb", .id_table = xircom_pci_table, .probe = xircom_probe, - .remove = xircom_remove, - .suspend =NULL, - .resume =NULL + .remove = __devexit_p(xircom_remove), }; @@ -192,15 +192,18 @@ static const struct net_device_ops netdev_ops = { */ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *d = &pdev->dev; struct net_device *dev = NULL; struct xircom_private *private; unsigned long flags; unsigned short tmp16; + int rc; /* First do the PCI initialisation */ - if (pci_enable_device(pdev)) - return -ENODEV; + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; /* disable all powermanagement */ pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); @@ -211,11 +214,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ pci_read_config_word (pdev,PCI_STATUS, &tmp16); pci_write_config_word (pdev, PCI_STATUS,tmp16); - if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { + rc = pci_request_regions(pdev, "xircom_cb"); + if (rc < 0) { pr_err("%s: failed to allocate io-region\n", __func__); - return -ENODEV; + goto err_disable; } + rc = -ENOMEM; /* Before changing the hardware, allocate the memory. This way, we can fail gracefully if not enough memory @@ -223,17 +228,21 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ */ dev = alloc_etherdev(sizeof(struct xircom_private)); if (!dev) - goto device_fail; + goto err_release; private = netdev_priv(dev); /* Allocate the send/receive buffers */ - private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); + private->rx_buffer = dma_alloc_coherent(d, 8192, + &private->rx_dma_handle, + GFP_KERNEL); if (private->rx_buffer == NULL) { pr_err("%s: no memory for rx buffer\n", __func__); goto rx_buf_fail; } - private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); + private->tx_buffer = dma_alloc_coherent(d, 8192, + &private->tx_dma_handle, + GFP_KERNEL); if (private->tx_buffer == NULL) { pr_err("%s: no memory for tx buffer\n", __func__); goto tx_buf_fail; @@ -244,10 +253,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ private->dev = dev; private->pdev = pdev; - private->io_port = pci_resource_start(pdev, 0); + + /* IO range. */ + private->ioaddr = pci_iomap(pdev, 0, 0); + if (!private->ioaddr) + goto reg_fail; + spin_lock_init(&private->lock); - dev->irq = pdev->irq; - dev->base_addr = private->io_port; initialize_card(private); read_mac_address(private); @@ -256,9 +268,10 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ dev->netdev_ops = &netdev_ops; pci_set_drvdata(pdev, dev); - if (register_netdev(dev)) { + rc = register_netdev(dev); + if (rc < 0) { pr_err("%s: netdevice registration failed\n", __func__); - goto reg_fail; + goto err_unmap; } netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", @@ -273,17 +286,23 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ spin_unlock_irqrestore(&private->lock,flags); trigger_receive(private); +out: + return rc; - return 0; - +err_unmap: + pci_iounmap(pdev, private->ioaddr); reg_fail: - kfree(private->tx_buffer); + pci_set_drvdata(pdev, NULL); + dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle); tx_buf_fail: - kfree(private->rx_buffer); + dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle); rx_buf_fail: free_netdev(dev); -device_fail: - return -ENODEV; +err_release: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); + goto out; } @@ -297,25 +316,28 @@ static void __devexit xircom_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct xircom_private *card = netdev_priv(dev); + struct device *d = &pdev->dev; - pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle); - pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle); - - release_region(dev->base_addr, 128); unregister_netdev(dev); - free_netdev(dev); + pci_iounmap(pdev, card->ioaddr); pci_set_drvdata(pdev, NULL); + dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle); + dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); } static irqreturn_t xircom_interrupt(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct xircom_private *card = netdev_priv(dev); + void __iomem *ioaddr = card->ioaddr; unsigned int status; int i; spin_lock(&card->lock); - status = inl(card->io_port+CSR5); + status = xr32(CSR5); #if defined DEBUG && DEBUG > 1 print_binary(status); @@ -345,7 +367,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance) /* Clear all remaining interrupts */ status |= 0xffffffff; /* FIXME: make this clear only the real existing bits */ - outl(status,card->io_port+CSR5); + xw32(CSR5, status); for (i=0;i<NUMDESCRIPTORS;i++) @@ -423,11 +445,11 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, static int xircom_open(struct net_device *dev) { struct xircom_private *xp = netdev_priv(dev); + const int irq = xp->pdev->irq; int retval; - netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", - dev->irq); - retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); + netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq); + retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); if (retval) return retval; @@ -459,7 +481,7 @@ static int xircom_close(struct net_device *dev) spin_unlock_irqrestore(&card->lock,flags); card->open = 0; - free_irq(dev->irq,dev); + free_irq(card->pdev->irq, dev); return 0; @@ -469,35 +491,39 @@ static int xircom_close(struct net_device *dev) #ifdef CONFIG_NET_POLL_CONTROLLER static void xircom_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - xircom_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct xircom_private *xp = netdev_priv(dev); + const int irq = xp->pdev->irq; + + disable_irq(irq); + xircom_interrupt(irq, dev); + enable_irq(irq); } #endif static void initialize_card(struct xircom_private *card) { - unsigned int val; + void __iomem *ioaddr = card->ioaddr; unsigned long flags; + u32 val; spin_lock_irqsave(&card->lock, flags); /* First: reset the card */ - val = inl(card->io_port + CSR0); + val = xr32(CSR0); val |= 0x01; /* Software reset */ - outl(val, card->io_port + CSR0); + xw32(CSR0, val); udelay(100); /* give the card some time to reset */ - val = inl(card->io_port + CSR0); + val = xr32(CSR0); val &= ~0x01; /* disable Software reset */ - outl(val, card->io_port + CSR0); + xw32(CSR0, val); val = 0; /* Value 0x00 is a safe and conservative value for the PCI configuration settings */ - outl(val, card->io_port + CSR0); + xw32(CSR0, val); disable_all_interrupts(card); @@ -515,10 +541,9 @@ ignored; I chose zero. */ static void trigger_transmit(struct xircom_private *card) { - unsigned int val; + void __iomem *ioaddr = card->ioaddr; - val = 0; - outl(val, card->io_port + CSR1); + xw32(CSR1, 0); } /* @@ -530,10 +555,9 @@ ignored; I chose zero. */ static void trigger_receive(struct xircom_private *card) { - unsigned int val; + void __iomem *ioaddr = card->ioaddr; - val = 0; - outl(val, card->io_port + CSR2); + xw32(CSR2, 0); } /* @@ -542,6 +566,7 @@ descriptors and programs the addresses into the card. */ static void setup_descriptors(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; u32 address; int i; @@ -571,7 +596,7 @@ static void setup_descriptors(struct xircom_private *card) wmb(); /* Write the receive descriptor ring address to the card */ address = card->rx_dma_handle; - outl(address, card->io_port + CSR3); /* Receive descr list address */ + xw32(CSR3, address); /* Receive descr list address */ /* transmit descriptors */ @@ -596,7 +621,7 @@ static void setup_descriptors(struct xircom_private *card) wmb(); /* wite the transmit descriptor ring to the card */ address = card->tx_dma_handle; - outl(address, card->io_port + CSR4); /* xmit descr list address */ + xw32(CSR4, address); /* xmit descr list address */ } /* @@ -605,11 +630,12 @@ valid by setting the address in the card to 0x00. */ static void remove_descriptors(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; val = 0; - outl(val, card->io_port + CSR3); /* Receive descriptor address */ - outl(val, card->io_port + CSR4); /* Send descriptor address */ + xw32(CSR3, val); /* Receive descriptor address */ + xw32(CSR4, val); /* Send descriptor address */ } /* @@ -620,17 +646,17 @@ This function also clears the status-bit. */ static int link_status_changed(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR5); /* Status register */ - - if ((val & (1 << 27)) == 0) /* no change */ + val = xr32(CSR5); /* Status register */ + if (!(val & (1 << 27))) /* no change */ return 0; /* clear the event by writing a 1 to the bit in the status register. */ val = (1 << 27); - outl(val, card->io_port + CSR5); + xw32(CSR5, val); return 1; } @@ -642,11 +668,9 @@ in a non-stopped state. */ static int transmit_active(struct xircom_private *card) { - unsigned int val; - - val = inl(card->io_port + CSR5); /* Status register */ + void __iomem *ioaddr = card->ioaddr; - if ((val & (7 << 20)) == 0) /* transmitter disabled */ + if (!(xr32(CSR5) & (7 << 20))) /* transmitter disabled */ return 0; return 1; @@ -658,11 +682,9 @@ in a non-stopped state. */ static int receive_active(struct xircom_private *card) { - unsigned int val; - - val = inl(card->io_port + CSR5); /* Status register */ + void __iomem *ioaddr = card->ioaddr; - if ((val & (7 << 17)) == 0) /* receiver disabled */ + if (!(xr32(CSR5) & (7 << 17))) /* receiver disabled */ return 0; return 1; @@ -680,10 +702,11 @@ must be called with the lock held and interrupts disabled. */ static void activate_receiver(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; int counter; - val = inl(card->io_port + CSR6); /* Operation mode */ + val = xr32(CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ @@ -692,7 +715,7 @@ static void activate_receiver(struct xircom_private *card) val = val & ~2; /* disable the receiver */ - outl(val, card->io_port + CSR6); + xw32(CSR6, val); counter = 10; while (counter > 0) { @@ -706,9 +729,9 @@ static void activate_receiver(struct xircom_private *card) } /* enable the receiver */ - val = inl(card->io_port + CSR6); /* Operation mode */ - val = val | 2; /* enable the receiver */ - outl(val, card->io_port + CSR6); + val = xr32(CSR6); /* Operation mode */ + val = val | 2; /* enable the receiver */ + xw32(CSR6, val); /* now wait for the card to activate again */ counter = 10; @@ -733,12 +756,13 @@ must be called with the lock held and interrupts disabled. */ static void deactivate_receiver(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; int counter; - val = inl(card->io_port + CSR6); /* Operation mode */ - val = val & ~2; /* disable the receiver */ - outl(val, card->io_port + CSR6); + val = xr32(CSR6); /* Operation mode */ + val = val & ~2; /* disable the receiver */ + xw32(CSR6, val); counter = 10; while (counter > 0) { @@ -765,10 +789,11 @@ must be called with the lock held and interrupts disabled. */ static void activate_transmitter(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; int counter; - val = inl(card->io_port + CSR6); /* Operation mode */ + val = xr32(CSR6); /* Operation mode */ /* If the "active" bit is set and the receiver is already active, no need to do the expensive thing */ @@ -776,7 +801,7 @@ static void activate_transmitter(struct xircom_private *card) return; val = val & ~(1 << 13); /* disable the transmitter */ - outl(val, card->io_port + CSR6); + xw32(CSR6, val); counter = 10; while (counter > 0) { @@ -791,9 +816,9 @@ static void activate_transmitter(struct xircom_private *card) } /* enable the transmitter */ - val = inl(card->io_port + CSR6); /* Operation mode */ + val = xr32(CSR6); /* Operation mode */ val = val | (1 << 13); /* enable the transmitter */ - outl(val, card->io_port + CSR6); + xw32(CSR6, val); /* now wait for the card to activate again */ counter = 10; @@ -818,12 +843,13 @@ must be called with the lock held and interrupts disabled. */ static void deactivate_transmitter(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; int counter; - val = inl(card->io_port + CSR6); /* Operation mode */ + val = xr32(CSR6); /* Operation mode */ val = val & ~2; /* disable the transmitter */ - outl(val, card->io_port + CSR6); + xw32(CSR6, val); counter = 20; while (counter > 0) { @@ -846,11 +872,12 @@ must be called with the lock held and interrupts disabled. */ static void enable_transmit_interrupt(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR7); /* Interrupt enable register */ - val |= 1; /* enable the transmit interrupt */ - outl(val, card->io_port + CSR7); + val = xr32(CSR7); /* Interrupt enable register */ + val |= 1; /* enable the transmit interrupt */ + xw32(CSR7, val); } @@ -861,11 +888,12 @@ must be called with the lock held and interrupts disabled. */ static void enable_receive_interrupt(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR7); /* Interrupt enable register */ - val = val | (1 << 6); /* enable the receive interrupt */ - outl(val, card->io_port + CSR7); + val = xr32(CSR7); /* Interrupt enable register */ + val = val | (1 << 6); /* enable the receive interrupt */ + xw32(CSR7, val); } /* @@ -875,11 +903,12 @@ must be called with the lock held and interrupts disabled. */ static void enable_link_interrupt(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR7); /* Interrupt enable register */ - val = val | (1 << 27); /* enable the link status chage interrupt */ - outl(val, card->io_port + CSR7); + val = xr32(CSR7); /* Interrupt enable register */ + val = val | (1 << 27); /* enable the link status chage interrupt */ + xw32(CSR7, val); } @@ -891,10 +920,9 @@ must be called with the lock held and interrupts disabled. */ static void disable_all_interrupts(struct xircom_private *card) { - unsigned int val; + void __iomem *ioaddr = card->ioaddr; - val = 0; /* disable all interrupts */ - outl(val, card->io_port + CSR7); + xw32(CSR7, 0); } /* @@ -904,9 +932,10 @@ must be called with the lock held and interrupts disabled. */ static void enable_common_interrupts(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR7); /* Interrupt enable register */ + val = xr32(CSR7); /* Interrupt enable register */ val |= (1<<16); /* Normal Interrupt Summary */ val |= (1<<15); /* Abnormal Interrupt Summary */ val |= (1<<13); /* Fatal bus error */ @@ -915,7 +944,7 @@ static void enable_common_interrupts(struct xircom_private *card) val |= (1<<5); /* Transmit Underflow */ val |= (1<<2); /* Transmit Buffer Unavailable */ val |= (1<<1); /* Transmit Process Stopped */ - outl(val, card->io_port + CSR7); + xw32(CSR7, val); } /* @@ -925,11 +954,12 @@ must be called with the lock held and interrupts disabled. */ static int enable_promisc(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned int val; - val = inl(card->io_port + CSR6); + val = xr32(CSR6); val = val | (1 << 6); - outl(val, card->io_port + CSR6); + xw32(CSR6, val); return 1; } @@ -944,13 +974,16 @@ Must be called in locked state with interrupts disabled */ static int link_status(struct xircom_private *card) { - unsigned int val; + void __iomem *ioaddr = card->ioaddr; + u8 val; - val = inb(card->io_port + CSR12); + val = xr8(CSR12); - if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ + /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ + if (!(val & (1 << 2))) return 10; - if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ + /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ + if (!(val & (1 << 1))) return 100; /* If we get here -> no link at all */ @@ -969,29 +1002,31 @@ static int link_status(struct xircom_private *card) */ static void read_mac_address(struct xircom_private *card) { - unsigned char j, tuple, link, data_id, data_count; + void __iomem *ioaddr = card->ioaddr; unsigned long flags; + u8 link; int i; spin_lock_irqsave(&card->lock, flags); - outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ + xw32(CSR9, 1 << 12); /* enable boot rom access */ for (i = 0x100; i < 0x1f7; i += link + 2) { - outl(i, card->io_port + CSR10); - tuple = inl(card->io_port + CSR9) & 0xff; - outl(i + 1, card->io_port + CSR10); - link = inl(card->io_port + CSR9) & 0xff; - outl(i + 2, card->io_port + CSR10); - data_id = inl(card->io_port + CSR9) & 0xff; - outl(i + 3, card->io_port + CSR10); - data_count = inl(card->io_port + CSR9) & 0xff; + u8 tuple, data_id, data_count; + + xw32(CSR10, i); + tuple = xr32(CSR9); + xw32(CSR10, i + 1); + link = xr32(CSR9); + xw32(CSR10, i + 2); + data_id = xr32(CSR9); + xw32(CSR10, i + 3); + data_count = xr32(CSR9); if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { - /* - * This is it. We have the data we want. - */ + int j; + for (j = 0; j < 6; j++) { - outl(i + j + 4, card->io_port + CSR10); - card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; + xw32(CSR10, i + j + 4); + card->dev->dev_addr[j] = xr32(CSR9) & 0xff; } break; } else if (link == 0) { @@ -1010,6 +1045,7 @@ static void read_mac_address(struct xircom_private *card) */ static void transceiver_voodoo(struct xircom_private *card) { + void __iomem *ioaddr = card->ioaddr; unsigned long flags; /* disable all powermanagement */ @@ -1019,14 +1055,14 @@ static void transceiver_voodoo(struct xircom_private *card) spin_lock_irqsave(&card->lock, flags); - outl(0x0008, card->io_port + CSR15); - udelay(25); - outl(0xa8050000, card->io_port + CSR15); - udelay(25); - outl(0xa00f0000, card->io_port + CSR15); - udelay(25); + xw32(CSR15, 0x0008); + udelay(25); + xw32(CSR15, 0xa8050000); + udelay(25); + xw32(CSR15, 0xa00f0000); + udelay(25); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&card->lock, flags); netif_start_queue(card->dev); } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 2e09edb9cdf..a059f0c27e2 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -16,6 +16,13 @@ #include "dl2k.h" #include <linux/dma-mapping.h> +#define dw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define dw16(reg, val) iowrite16(val, ioaddr + (reg)) +#define dw8(reg, val) iowrite8(val, ioaddr + (reg)) +#define dr32(reg) ioread32(ioaddr + (reg)) +#define dr16(reg) ioread16(ioaddr + (reg)) +#define dr8(reg) ioread8(ioaddr + (reg)) + static char version[] __devinitdata = KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; #define MAX_UNITS 8 @@ -49,8 +56,13 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ /* Enable the default interrupts */ #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ UpdateStats | LinkEvent) -#define EnableInt() \ -writew(DEFAULT_INTR, ioaddr + IntEnable) + +static void dl2k_enable_int(struct netdev_private *np) +{ + void __iomem *ioaddr = np->ioaddr; + + dw16(IntEnable, DEFAULT_INTR); +} static const int max_intrloop = 50; static const int multicast_filter_limit = 0x40; @@ -73,7 +85,7 @@ static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int rio_close (struct net_device *dev); static int find_miiphy (struct net_device *dev); static int parse_eeprom (struct net_device *dev); -static int read_eeprom (long ioaddr, int eep_addr); +static int read_eeprom (struct netdev_private *, int eep_addr); static int mii_wait_link (struct net_device *dev, int wait); static int mii_set_media (struct net_device *dev); static int mii_get_media (struct net_device *dev); @@ -106,7 +118,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx; int chip_idx = ent->driver_data; int err, irq; - long ioaddr; + void __iomem *ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; @@ -124,26 +136,29 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable; pci_set_master (pdev); + + err = -ENOMEM; + dev = alloc_etherdev (sizeof (*np)); - if (!dev) { - err = -ENOMEM; + if (!dev) goto err_out_res; - } SET_NETDEV_DEV(dev, &pdev->dev); -#ifdef MEM_MAPPING - ioaddr = pci_resource_start (pdev, 1); - ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); - if (!ioaddr) { - err = -ENOMEM; + np = netdev_priv(dev); + + /* IO registers range. */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) goto err_out_dev; - } -#else - ioaddr = pci_resource_start (pdev, 0); + np->eeprom_addr = ioaddr; + +#ifdef MEM_MAPPING + /* MM registers range. */ + ioaddr = pci_iomap(pdev, 1, 0); + if (!ioaddr) + goto err_out_iounmap; #endif - dev->base_addr = ioaddr; - dev->irq = irq; - np = netdev_priv(dev); + np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); @@ -239,7 +254,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_unmap_rx; /* Fiber device? */ - np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; + np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { @@ -276,22 +291,20 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; - err_out_unmap_rx: +err_out_unmap_rx: pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); - err_out_unmap_tx: +err_out_unmap_tx: pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); - err_out_iounmap: +err_out_iounmap: #ifdef MEM_MAPPING - iounmap ((void *) ioaddr); - - err_out_dev: + pci_iounmap(pdev, np->ioaddr); #endif + pci_iounmap(pdev, np->eeprom_addr); +err_out_dev: free_netdev (dev); - - err_out_res: +err_out_res: pci_release_regions (pdev); - - err_out_disable: +err_out_disable: pci_disable_device (pdev); return err; } @@ -299,11 +312,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) static int find_miiphy (struct net_device *dev) { + struct netdev_private *np = netdev_priv(dev); int i, phy_found = 0; - struct netdev_private *np; - long ioaddr; np = netdev_priv(dev); - ioaddr = dev->base_addr; np->phy_addr = 1; for (i = 31; i >= 0; i--) { @@ -323,26 +334,19 @@ find_miiphy (struct net_device *dev) static int parse_eeprom (struct net_device *dev) { + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; int i, j; - long ioaddr = dev->base_addr; u8 sromdata[256]; u8 *psib; u32 crc; PSROM_t psrom = (PSROM_t) sromdata; - struct netdev_private *np = netdev_priv(dev); int cid, next; -#ifdef MEM_MAPPING - ioaddr = pci_resource_start (np->pdev, 0); -#endif - /* Read eeprom */ - for (i = 0; i < 128; i++) { - ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); - } -#ifdef MEM_MAPPING - ioaddr = dev->base_addr; -#endif + for (i = 0; i < 128; i++) + ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); + if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ /* Check CRC */ crc = ~ether_crc_le (256 - 4, sromdata); @@ -378,8 +382,7 @@ parse_eeprom (struct net_device *dev) return 0; case 2: /* Duplex Polarity */ np->duplex_polarity = psib[i]; - writeb (readb (ioaddr + PhyCtrl) | psib[i], - ioaddr + PhyCtrl); + dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); break; case 3: /* Wake Polarity */ np->wake_polarity = psib[i]; @@ -407,59 +410,57 @@ static int rio_open (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = np->ioaddr; + const int irq = np->pdev->irq; int i; u16 macctrl; - i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); + i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; /* Reset all logic functions */ - writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, - ioaddr + ASICCtrl + 2); + dw16(ASICCtrl + 2, + GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); /* DebugCtrl bit 4, 5, 9 must set */ - writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); + dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); /* Jumbo frame */ if (np->jumbo != 0) - writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); + dw16(MaxFrameSize, MAX_JUMBO+14); alloc_list (dev); /* Get station address */ for (i = 0; i < 6; i++) - writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); + dw8(StationAddr0 + i, dev->dev_addr[i]); set_multicast (dev); if (np->coalesce) { - writel (np->rx_coalesce | np->rx_timeout << 16, - ioaddr + RxDMAIntCtrl); + dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); } /* Set RIO to poll every N*320nsec. */ - writeb (0x20, ioaddr + RxDMAPollPeriod); - writeb (0xff, ioaddr + TxDMAPollPeriod); - writeb (0x30, ioaddr + RxDMABurstThresh); - writeb (0x30, ioaddr + RxDMAUrgentThresh); - writel (0x0007ffff, ioaddr + RmonStatMask); + dw8(RxDMAPollPeriod, 0x20); + dw8(TxDMAPollPeriod, 0xff); + dw8(RxDMABurstThresh, 0x30); + dw8(RxDMAUrgentThresh, 0x30); + dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ - writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, - ioaddr + RxDMAIntCtrl); + dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); /* VLANId */ - writew (np->vlan, ioaddr + VLANId); + dw16(VLANId, np->vlan); /* Length/Type should be 0x8100 */ - writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); + dw32(VLANTag, 0x8100 << 16 | np->vlan); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ - writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, - ioaddr + MACCtrl); + dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } init_timer (&np->timer); @@ -469,20 +470,18 @@ rio_open (struct net_device *dev) add_timer (&np->timer); /* Start Tx/Rx */ - writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, - ioaddr + MACCtrl); + dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; - writew(macctrl, ioaddr + MACCtrl); + dw16(MACCtrl, macctrl); netif_start_queue (dev); - /* Enable default interrupts */ - EnableInt (); + dl2k_enable_int(np); return 0; } @@ -533,10 +532,11 @@ rio_timer (unsigned long data) static void rio_tx_timeout (struct net_device *dev) { - long ioaddr = dev->base_addr; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", - dev->name, readl (ioaddr + TxStatus)); + dev->name, dr32(TxStatus)); rio_free_tx(dev, 0); dev->if_port = 0; dev->trans_start = jiffies; /* prevent tx timeout */ @@ -547,6 +547,7 @@ static void alloc_list (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; int i; np->cur_rx = np->cur_tx = 0; @@ -594,24 +595,23 @@ alloc_list (struct net_device *dev) } /* Set RFDListPtr */ - writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); - writel (0, dev->base_addr + RFDListPtr1); + dw32(RFDListPtr0, np->rx_ring_dma); + dw32(RFDListPtr1, 0); } static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; struct netdev_desc *txdesc; unsigned entry; - u32 ioaddr; u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ dev_kfree_skb(skb); return NETDEV_TX_OK; } - ioaddr = dev->base_addr; entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; @@ -646,9 +646,9 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) (1 << FragCountShift)); /* TxDMAPollNow */ - writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); + dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); /* Schedule ISR */ - writel(10000, ioaddr + CountDown); + dw32(CountDown, 10000); np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 && np->speed != 10) { @@ -658,10 +658,10 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) } /* The first TFDListPtr */ - if (readl (dev->base_addr + TFDListPtr0) == 0) { - writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), - dev->base_addr + TFDListPtr0); - writel (0, dev->base_addr + TFDListPtr1); + if (!dr32(TFDListPtr0)) { + dw32(TFDListPtr0, np->tx_ring_dma + + entry * sizeof (struct netdev_desc)); + dw32(TFDListPtr1, 0); } return NETDEV_TX_OK; @@ -671,17 +671,15 @@ static irqreturn_t rio_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; - struct netdev_private *np; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; unsigned int_status; - long ioaddr; int cnt = max_intrloop; int handled = 0; - ioaddr = dev->base_addr; - np = netdev_priv(dev); while (1) { - int_status = readw (ioaddr + IntStatus); - writew (int_status, ioaddr + IntStatus); + int_status = dr16(IntStatus); + dw16(IntStatus, int_status); int_status &= DEFAULT_INTR; if (int_status == 0 || --cnt < 0) break; @@ -692,7 +690,7 @@ rio_interrupt (int irq, void *dev_instance) /* TxDMAComplete interrupt */ if ((int_status & (TxDMAComplete|IntRequested))) { int tx_status; - tx_status = readl (ioaddr + TxStatus); + tx_status = dr32(TxStatus); if (tx_status & 0x01) tx_error (dev, tx_status); /* Free used tx skbuffs */ @@ -705,7 +703,7 @@ rio_interrupt (int irq, void *dev_instance) rio_error (dev, int_status); } if (np->cur_tx != np->old_tx) - writel (100, ioaddr + CountDown); + dw32(CountDown, 100); return IRQ_RETVAL(handled); } @@ -765,13 +763,11 @@ rio_free_tx (struct net_device *dev, int irq) static void tx_error (struct net_device *dev, int tx_status) { - struct netdev_private *np; - long ioaddr = dev->base_addr; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; int frame_id; int i; - np = netdev_priv(dev); - frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); @@ -779,23 +775,21 @@ tx_error (struct net_device *dev, int tx_status) /* Ttransmit Underrun */ if (tx_status & 0x10) { np->stats.tx_fifo_errors++; - writew (readw (ioaddr + TxStartThresh) + 0x10, - ioaddr + TxStartThresh); + dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ - writew (TxReset | DMAReset | FIFOReset | NetworkReset, - ioaddr + ASICCtrl + 2); + dw16(ASICCtrl + 2, + TxReset | DMAReset | FIFOReset | NetworkReset); /* Wait for ResetBusy bit clear */ for (i = 50; i > 0; i--) { - if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) + if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } rio_free_tx (dev, 1); /* Reset TFDListPtr */ - writel (np->tx_ring_dma + - np->old_tx * sizeof (struct netdev_desc), - dev->base_addr + TFDListPtr0); - writel (0, dev->base_addr + TFDListPtr1); + dw32(TFDListPtr0, np->tx_ring_dma + + np->old_tx * sizeof (struct netdev_desc)); + dw32(TFDListPtr1, 0); /* Let TxStartThresh stay default value */ } @@ -803,10 +797,10 @@ tx_error (struct net_device *dev, int tx_status) if (tx_status & 0x04) { np->stats.tx_fifo_errors++; /* TxReset and clear FIFO */ - writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); + dw16(ASICCtrl + 2, TxReset | FIFOReset); /* Wait reset done */ for (i = 50; i > 0; i--) { - if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) + if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } @@ -821,7 +815,7 @@ tx_error (struct net_device *dev, int tx_status) np->stats.collisions++; #endif /* Restart the Tx */ - writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); + dw32(MACCtrl, dr16(MACCtrl) | TxEnable); } static int @@ -931,8 +925,8 @@ receive_packet (struct net_device *dev) static void rio_error (struct net_device *dev, int int_status) { - long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; u16 macctrl; /* Link change event */ @@ -954,7 +948,7 @@ rio_error (struct net_device *dev, int int_status) TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; - writew(macctrl, ioaddr + MACCtrl); + dw16(MACCtrl, macctrl); np->link_status = 1; netif_carrier_on(dev); } else { @@ -974,7 +968,7 @@ rio_error (struct net_device *dev, int int_status) if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", dev->name, int_status); - writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); + dw16(ASICCtrl + 2, GlobalReset | HostReset); mdelay (500); } } @@ -982,8 +976,8 @@ rio_error (struct net_device *dev, int int_status) static struct net_device_stats * get_stats (struct net_device *dev) { - long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; #ifdef MEM_MAPPING int i; #endif @@ -992,106 +986,107 @@ get_stats (struct net_device *dev) /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ - np->stats.rx_packets += readl (ioaddr + FramesRcvOk); - np->stats.tx_packets += readl (ioaddr + FramesXmtOk); - np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); - np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); + np->stats.rx_packets += dr32(FramesRcvOk); + np->stats.tx_packets += dr32(FramesXmtOk); + np->stats.rx_bytes += dr32(OctetRcvOk); + np->stats.tx_bytes += dr32(OctetXmtOk); - np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); - np->stats.collisions += readl (ioaddr + SingleColFrames) - + readl (ioaddr + MultiColFrames); + np->stats.multicast = dr32(McstFramesRcvdOk); + np->stats.collisions += dr32(SingleColFrames) + + dr32(MultiColFrames); /* detailed tx errors */ - stat_reg = readw (ioaddr + FramesAbortXSColls); + stat_reg = dr16(FramesAbortXSColls); np->stats.tx_aborted_errors += stat_reg; np->stats.tx_errors += stat_reg; - stat_reg = readw (ioaddr + CarrierSenseErrors); + stat_reg = dr16(CarrierSenseErrors); np->stats.tx_carrier_errors += stat_reg; np->stats.tx_errors += stat_reg; /* Clear all other statistic register. */ - readl (ioaddr + McstOctetXmtOk); - readw (ioaddr + BcstFramesXmtdOk); - readl (ioaddr + McstFramesXmtdOk); - readw (ioaddr + BcstFramesRcvdOk); - readw (ioaddr + MacControlFramesRcvd); - readw (ioaddr + FrameTooLongErrors); - readw (ioaddr + InRangeLengthErrors); - readw (ioaddr + FramesCheckSeqErrors); - readw (ioaddr + FramesLostRxErrors); - readl (ioaddr + McstOctetXmtOk); - readl (ioaddr + BcstOctetXmtOk); - readl (ioaddr + McstFramesXmtdOk); - readl (ioaddr + FramesWDeferredXmt); - readl (ioaddr + LateCollisions); - readw (ioaddr + BcstFramesXmtdOk); - readw (ioaddr + MacControlFramesXmtd); - readw (ioaddr + FramesWEXDeferal); + dr32(McstOctetXmtOk); + dr16(BcstFramesXmtdOk); + dr32(McstFramesXmtdOk); + dr16(BcstFramesRcvdOk); + dr16(MacControlFramesRcvd); + dr16(FrameTooLongErrors); + dr16(InRangeLengthErrors); + dr16(FramesCheckSeqErrors); + dr16(FramesLostRxErrors); + dr32(McstOctetXmtOk); + dr32(BcstOctetXmtOk); + dr32(McstFramesXmtdOk); + dr32(FramesWDeferredXmt); + dr32(LateCollisions); + dr16(BcstFramesXmtdOk); + dr16(MacControlFramesXmtd); + dr16(FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) - readl (ioaddr + i); + dr32(i); #endif - readw (ioaddr + TxJumboFrames); - readw (ioaddr + RxJumboFrames); - readw (ioaddr + TCPCheckSumErrors); - readw (ioaddr + UDPCheckSumErrors); - readw (ioaddr + IPCheckSumErrors); + dr16(TxJumboFrames); + dr16(RxJumboFrames); + dr16(TCPCheckSumErrors); + dr16(UDPCheckSumErrors); + dr16(IPCheckSumErrors); return &np->stats; } static int clear_stats (struct net_device *dev) { - long ioaddr = dev->base_addr; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; #ifdef MEM_MAPPING int i; #endif /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ - readl (ioaddr + FramesRcvOk); - readl (ioaddr + FramesXmtOk); - readl (ioaddr + OctetRcvOk); - readl (ioaddr + OctetXmtOk); - - readl (ioaddr + McstFramesRcvdOk); - readl (ioaddr + SingleColFrames); - readl (ioaddr + MultiColFrames); - readl (ioaddr + LateCollisions); + dr32(FramesRcvOk); + dr32(FramesXmtOk); + dr32(OctetRcvOk); + dr32(OctetXmtOk); + + dr32(McstFramesRcvdOk); + dr32(SingleColFrames); + dr32(MultiColFrames); + dr32(LateCollisions); /* detailed rx errors */ - readw (ioaddr + FrameTooLongErrors); - readw (ioaddr + InRangeLengthErrors); - readw (ioaddr + FramesCheckSeqErrors); - readw (ioaddr + FramesLostRxErrors); + dr16(FrameTooLongErrors); + dr16(InRangeLengthErrors); + dr16(FramesCheckSeqErrors); + dr16(FramesLostRxErrors); /* detailed tx errors */ - readw (ioaddr + FramesAbortXSColls); - readw (ioaddr + CarrierSenseErrors); + dr16(FramesAbortXSColls); + dr16(CarrierSenseErrors); /* Clear all other statistic register. */ - readl (ioaddr + McstOctetXmtOk); - readw (ioaddr + BcstFramesXmtdOk); - readl (ioaddr + McstFramesXmtdOk); - readw (ioaddr + BcstFramesRcvdOk); - readw (ioaddr + MacControlFramesRcvd); - readl (ioaddr + McstOctetXmtOk); - readl (ioaddr + BcstOctetXmtOk); - readl (ioaddr + McstFramesXmtdOk); - readl (ioaddr + FramesWDeferredXmt); - readw (ioaddr + BcstFramesXmtdOk); - readw (ioaddr + MacControlFramesXmtd); - readw (ioaddr + FramesWEXDeferal); + dr32(McstOctetXmtOk); + dr16(BcstFramesXmtdOk); + dr32(McstFramesXmtdOk); + dr16(BcstFramesRcvdOk); + dr16(MacControlFramesRcvd); + dr32(McstOctetXmtOk); + dr32(BcstOctetXmtOk); + dr32(McstFramesXmtdOk); + dr32(FramesWDeferredXmt); + dr16(BcstFramesXmtdOk); + dr16(MacControlFramesXmtd); + dr16(FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) - readl (ioaddr + i); + dr32(i); #endif - readw (ioaddr + TxJumboFrames); - readw (ioaddr + RxJumboFrames); - readw (ioaddr + TCPCheckSumErrors); - readw (ioaddr + UDPCheckSumErrors); - readw (ioaddr + IPCheckSumErrors); + dr16(TxJumboFrames); + dr16(RxJumboFrames); + dr16(TCPCheckSumErrors); + dr16(UDPCheckSumErrors); + dr16(IPCheckSumErrors); return 0; } @@ -1114,10 +1109,10 @@ change_mtu (struct net_device *dev, int new_mtu) static void set_multicast (struct net_device *dev) { - long ioaddr = dev->base_addr; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; u32 hash_table[2]; u16 rx_mode = 0; - struct netdev_private *np = netdev_priv(dev); hash_table[0] = hash_table[1] = 0; /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ @@ -1153,9 +1148,9 @@ set_multicast (struct net_device *dev) rx_mode |= ReceiveVLANMatch; } - writel (hash_table[0], ioaddr + HashTable0); - writel (hash_table[1], ioaddr + HashTable1); - writew (rx_mode, ioaddr + ReceiveMode); + dw32(HashTable0, hash_table[0]); + dw32(HashTable1, hash_table[1]); + dw16(ReceiveMode, rx_mode); } static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) @@ -1284,15 +1279,15 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) #define EEP_BUSY 0x8000 /* Read the EEPROM word */ /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ -static int -read_eeprom (long ioaddr, int eep_addr) +static int read_eeprom(struct netdev_private *np, int eep_addr) { + void __iomem *ioaddr = np->eeprom_addr; int i = 1000; - outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); + + dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); while (i-- > 0) { - if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { - return inw (ioaddr + EepromData); - } + if (!(dr16(EepromCtrl) & EEP_BUSY)) + return dr16(EepromData); } return 0; } @@ -1302,38 +1297,40 @@ enum phy_ctrl_bits { MII_DUPLEX = 0x08, }; -#define mii_delay() readb(ioaddr) +#define mii_delay() dr8(PhyCtrl) static void mii_sendbit (struct net_device *dev, u32 data) { - long ioaddr = dev->base_addr + PhyCtrl; - data = (data) ? MII_DATA1 : 0; - data |= MII_WRITE; - data |= (readb (ioaddr) & 0xf8) | MII_WRITE; - writeb (data, ioaddr); + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; + dw8(PhyCtrl, data); mii_delay (); - writeb (data | MII_CLK, ioaddr); + dw8(PhyCtrl, data | MII_CLK); mii_delay (); } static int mii_getbit (struct net_device *dev) { - long ioaddr = dev->base_addr + PhyCtrl; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; u8 data; - data = (readb (ioaddr) & 0xf8) | MII_READ; - writeb (data, ioaddr); + data = (dr8(PhyCtrl) & 0xf8) | MII_READ; + dw8(PhyCtrl, data); mii_delay (); - writeb (data | MII_CLK, ioaddr); + dw8(PhyCtrl, data | MII_CLK); mii_delay (); - return ((readb (ioaddr) >> 1) & 1); + return (dr8(PhyCtrl) >> 1) & 1; } static void mii_send_bits (struct net_device *dev, u32 data, int len) { int i; + for (i = len - 1; i >= 0; i--) { mii_sendbit (dev, data & (1 << i)); } @@ -1687,28 +1684,29 @@ mii_set_media_pcs (struct net_device *dev) static int rio_close (struct net_device *dev) { - long ioaddr = dev->base_addr; struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + struct pci_dev *pdev = np->pdev; struct sk_buff *skb; int i; netif_stop_queue (dev); /* Disable interrupts */ - writew (0, ioaddr + IntEnable); + dw16(IntEnable, 0); /* Stop Tx and Rx logics */ - writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); + dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); - free_irq (dev->irq, dev); + free_irq(pdev->irq, dev); del_timer_sync (&np->timer); /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { skb = np->rx_skbuff[i]; if (skb) { - pci_unmap_single(np->pdev, - desc_to_dma(&np->rx_ring[i]), + pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]), skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; @@ -1719,8 +1717,7 @@ rio_close (struct net_device *dev) for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; if (skb) { - pci_unmap_single(np->pdev, - desc_to_dma(&np->tx_ring[i]), + pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); np->tx_skbuff[i] = NULL; @@ -1744,8 +1741,9 @@ rio_remove1 (struct pci_dev *pdev) pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); #ifdef MEM_MAPPING - iounmap ((char *) (dev->base_addr)); + pci_iounmap(pdev, np->ioaddr); #endif + pci_iounmap(pdev, np->eeprom_addr); free_netdev (dev); pci_release_regions (pdev); pci_disable_device (pdev); diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 30c2da3de54..3699565704c 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -42,23 +42,6 @@ #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) -/* This driver was written to use PCI memory space, however x86-oriented - hardware often uses I/O space accesses. */ -#ifndef MEM_MAPPING -#undef readb -#undef readw -#undef readl -#undef writeb -#undef writew -#undef writel -#define readb inb -#define readw inw -#define readl inl -#define writeb outb -#define writew outw -#define writel outl -#endif - /* Offsets to the device registers. Unlike software-only systems, device drivers interact with complex hardware. It's not useful to define symbolic names for every register bit in the @@ -384,6 +367,8 @@ struct netdev_private { dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; struct pci_dev *pdev; + void __iomem *ioaddr; + void __iomem *eeprom_addr; spinlock_t tx_lock; spinlock_t rx_lock; struct net_device_stats stats; diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index d783f4f96ec..d7bb52a7bda 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -522,9 +522,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; - np = netdev_priv(dev); np->base = ioaddr; np->pci_dev = pdev; @@ -828,18 +825,19 @@ static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; + const int irq = np->pci_dev->irq; unsigned long flags; int i; /* Do we need to reset the chip??? */ - i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) - printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", - dev->name, dev->irq); + printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq); + init_ring(dev); iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); @@ -1814,7 +1812,7 @@ static int netdev_close(struct net_device *dev) } #endif /* __i386__ debugging only */ - free_irq(dev->irq, dev); + free_irq(np->pci_dev->irq, dev); del_timer_sync(&np->timer); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index b276469f74e..290b26f868c 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -815,6 +815,7 @@ static const struct ethtool_ops dnet_ethtool_ops = { .set_settings = dnet_set_settings, .get_drvinfo = dnet_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops dnet_netdev_ops = { diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 9576ac002c2..ff4eb8fe25d 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -33,7 +33,7 @@ #include "be_hw.h" -#define DRV_VER "4.2.116u" +#define DRV_VER "4.2.220u" #define DRV_NAME "be2net" #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" @@ -162,6 +162,11 @@ static inline void queue_head_inc(struct be_queue_info *q) index_inc(&q->head, q->len); } +static inline void index_dec(u16 *index, u16 limit) +{ + *index = MODULO((*index - 1), limit); +} + static inline void queue_tail_inc(struct be_queue_info *q) { index_inc(&q->tail, q->len); @@ -308,11 +313,33 @@ struct be_vf_cfg { u32 tx_rate; }; +enum vf_state { + ENABLED = 0, + ASSIGNED = 1 +}; + #define BE_FLAGS_LINK_STATUS_INIT 1 #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 +struct phy_info { + u8 transceiver; + u8 autoneg; + u8 fc_autoneg; + u8 port_type; + u16 phy_type; + u16 interface_type; + u32 misc_params; + u16 auto_speeds_supported; + u16 fixed_speeds_supported; + int link_speed; + int forced_port_speed; + u32 dac_cable_len; + u32 advertising; + u32 supported; +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -377,29 +404,30 @@ struct be_adapter { u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ bool stats_cmd_sent; - int link_speed; - u8 port_type; - u8 transceiver; - u8 autoneg; u8 generation; /* BladeEngine ASIC generation */ u32 flash_status; struct completion flash_compl; - u32 num_vfs; - u8 is_virtfn; + u32 num_vfs; /* Number of VFs provisioned by PF driver */ + u32 dev_num_vfs; /* Number of VFs supported by HW */ + u8 virtfn; struct be_vf_cfg *vf_cfg; bool be3_native; u32 sli_family; u8 hba_port_num; u16 pvid; + struct phy_info phy; u8 wol_cap; bool wol; u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ u32 uc_macs; /* Count of secondary UC MAC programmed */ + u32 msg_enable; }; -#define be_physfn(adapter) (!adapter->is_virtfn) +#define be_physfn(adapter) (!adapter->virtfn) #define sriov_enabled(adapter) (adapter->num_vfs > 0) +#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \ + be_physfn(adapter)) #define for_all_vfs(adapter, vf_cfg, i) \ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ i++, vf_cfg++) @@ -528,14 +556,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) return val; } -static inline void be_check_sriov_fn_type(struct be_adapter *adapter) -{ - u32 sli_intf; - - pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); - adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; -} - static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) { u32 addr; @@ -583,4 +603,7 @@ extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); extern void be_parse_stats(struct be_adapter *adapter); extern int be_load_fw(struct be_adapter *adapter, u8 *func); extern bool be_is_wol_supported(struct be_adapter *adapter); +extern bool be_pause_supported(struct be_adapter *adapter); +extern u32 be_get_fw_log_level(struct be_adapter *adapter); + #endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 67b030d72df..b24623cce07 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -61,10 +61,21 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl) compl->flags = 0; } +static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) +{ + unsigned long addr; + + addr = tag1; + addr = ((addr << 16) << 16) | tag0; + return (void *)addr; +} + static int be_mcc_compl_process(struct be_adapter *adapter, - struct be_mcc_compl *compl) + struct be_mcc_compl *compl) { u16 compl_status, extd_status; + struct be_cmd_resp_hdr *resp_hdr; + u8 opcode = 0, subsystem = 0; /* Just swap the status to host endian; mcc tag is opaquely copied * from mcc_wrb */ @@ -73,32 +84,36 @@ static int be_mcc_compl_process(struct be_adapter *adapter, compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; - if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || - (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && - (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { + resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); + + if (resp_hdr) { + opcode = resp_hdr->opcode; + subsystem = resp_hdr->subsystem; + } + + if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || + (opcode == OPCODE_COMMON_WRITE_OBJECT)) && + (subsystem == CMD_SUBSYSTEM_COMMON)) { adapter->flash_status = compl_status; complete(&adapter->flash_compl); } if (compl_status == MCC_STATUS_SUCCESS) { - if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || - (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && - (compl->tag1 == CMD_SUBSYSTEM_ETH)) { + if (((opcode == OPCODE_ETH_GET_STATISTICS) || + (opcode == OPCODE_ETH_GET_PPORT_STATS)) && + (subsystem == CMD_SUBSYSTEM_ETH)) { be_parse_stats(adapter); adapter->stats_cmd_sent = false; } - if (compl->tag0 == - OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { - struct be_mcc_wrb *mcc_wrb = - queue_index_node(&adapter->mcc_obj.q, - compl->tag1); + if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && + subsystem == CMD_SUBSYSTEM_COMMON) { struct be_cmd_resp_get_cntl_addnl_attribs *resp = - embedded_payload(mcc_wrb); + (void *)resp_hdr; adapter->drv_stats.be_on_die_temperature = resp->on_die_temperature; } } else { - if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) + if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) be_get_temp_freq = 0; if (compl_status == MCC_STATUS_NOT_SUPPORTED || @@ -108,13 +123,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, "This domain(VM) is not " "permitted to execute this cmd (opcode %d)\n", - compl->tag0); + opcode); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" "status %d, extd-status %d\n", - compl->tag0, compl_status, extd_status); + opcode, compl_status, extd_status); } } done: @@ -126,7 +141,7 @@ static void be_async_link_state_process(struct be_adapter *adapter, struct be_async_event_link_state *evt) { /* When link status changes, link speed must be re-queried from FW */ - adapter->link_speed = -1; + adapter->phy.link_speed = -1; /* For the initial link status do not rely on the ASYNC event as * it may not be received in some cases. @@ -153,7 +168,7 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, { if (evt->physical_port == adapter->port_num) { /* qos_link_speed is in units of 10 Mbps */ - adapter->link_speed = evt->qos_link_speed * 10; + adapter->phy.link_speed = evt->qos_link_speed * 10; } } @@ -286,7 +301,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); adapter->fw_timeout = true; - return -1; + return -EIO; } return status; } @@ -294,8 +309,26 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) /* Notify MCC requests and wait for completion */ static int be_mcc_notify_wait(struct be_adapter *adapter) { + int status; + struct be_mcc_wrb *wrb; + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + u16 index = mcc_obj->q.head; + struct be_cmd_resp_hdr *resp; + + index_dec(&index, mcc_obj->q.len); + wrb = queue_index_node(&mcc_obj->q, index); + + resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); + be_mcc_notify(adapter); - return be_mcc_wait_compl(adapter); + + status = be_mcc_wait_compl(adapter); + if (status == -EIO) + goto out; + + status = resp->status; +out: + return status; } static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) @@ -435,14 +468,17 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, struct be_mcc_wrb *wrb, struct be_dma_mem *mem) { struct be_sge *sge; + unsigned long addr = (unsigned long)req_hdr; + u64 req_addr = addr; req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->version = 0; - wrb->tag0 = opcode; - wrb->tag1 = subsystem; + wrb->tag0 = req_addr & 0xFFFFFFFF; + wrb->tag1 = upper_32_bits(req_addr); + wrb->payload_length = cmd_len; if (mem) { wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << @@ -1221,7 +1257,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, nonemb_cmd); - req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); + req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); req->cmd_params.params.reset_stats = 0; be_mcc_notify(adapter); @@ -1283,13 +1319,10 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_cntl_addnl_attribs *req; - u16 mccq_index; int status; spin_lock_bh(&adapter->mcc_lock); - mccq_index = adapter->mcc_obj.q.head; - wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; @@ -1301,8 +1334,6 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); - wrb->tag1 = mccq_index; - be_mcc_notify(adapter); err: @@ -1824,18 +1855,16 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, - msecs_to_jiffies(12000))) + msecs_to_jiffies(30000))) status = -1; else status = adapter->flash_status; resp = embedded_payload(wrb); - if (!status) { + if (!status) *data_written = le32_to_cpu(resp->actual_write_len); - } else { + else *addn_status = resp->additional_status; - status = resp->status; - } return status; @@ -1950,7 +1979,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); - req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); + req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); req->params.offset = cpu_to_le32(offset); req->params.data_buf_size = cpu_to_le32(0x4); @@ -2136,8 +2165,7 @@ err: return status; } -int be_cmd_get_phy_info(struct be_adapter *adapter, - struct be_phy_info *phy_info) +int be_cmd_get_phy_info(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_phy_info *req; @@ -2170,9 +2198,15 @@ int be_cmd_get_phy_info(struct be_adapter *adapter, if (!status) { struct be_phy_info *resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); - phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); - phy_info->interface_type = + adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); + adapter->phy.interface_type = le16_to_cpu(resp_phy_info->interface_type); + adapter->phy.auto_speeds_supported = + le16_to_cpu(resp_phy_info->auto_speeds_supported); + adapter->phy.fixed_speeds_supported = + le16_to_cpu(resp_phy_info->fixed_speeds_supported); + adapter->phy.misc_params = + le32_to_cpu(resp_phy_info->misc_params); } pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); @@ -2555,4 +2589,60 @@ err: mutex_unlock(&adapter->mbox_lock); pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); return status; + +} +int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_ext_fat_caps *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd->va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, + cmd->size, wrb, cmd); + req->parameter_type = cpu_to_le32(1); + + status = be_mbox_notify_wait(adapter); +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd, + struct be_fat_conf_params *configs) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_ext_fat_caps *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd->va; + memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, + cmd->size, wrb, cmd); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d5b680c56af..0b1029b60f6 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -189,6 +189,8 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 +#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125 +#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126 #define OPCODE_COMMON_GET_MAC_LIST 147 #define OPCODE_COMMON_SET_MAC_LIST 148 #define OPCODE_COMMON_GET_HSW_CONFIG 152 @@ -225,8 +227,12 @@ struct be_cmd_req_hdr { #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ #define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ struct be_cmd_resp_hdr { - u32 info; /* dword 0 */ - u32 status; /* dword 1 */ + u8 opcode; /* dword 0 */ + u8 subsystem; /* dword 0 */ + u8 rsvd[2]; /* dword 0 */ + u8 status; /* dword 1 */ + u8 add_status; /* dword 1 */ + u8 rsvd1[2]; /* dword 1 */ u32 response_length; /* dword 2 */ u32 actual_resp_len; /* dword 3 */ }; @@ -1309,9 +1315,36 @@ enum { PHY_TYPE_KX4_10GB, PHY_TYPE_BASET_10GB, PHY_TYPE_BASET_1GB, + PHY_TYPE_BASEX_1GB, + PHY_TYPE_SGMII, PHY_TYPE_DISABLED = 255 }; +#define BE_SUPPORTED_SPEED_NONE 0 +#define BE_SUPPORTED_SPEED_10MBPS 1 +#define BE_SUPPORTED_SPEED_100MBPS 2 +#define BE_SUPPORTED_SPEED_1GBPS 4 +#define BE_SUPPORTED_SPEED_10GBPS 8 + +#define BE_AN_EN 0x2 +#define BE_PAUSE_SYM_EN 0x80 + +/* MAC speed valid values */ +#define SPEED_DEFAULT 0x0 +#define SPEED_FORCED_10GB 0x1 +#define SPEED_FORCED_1GB 0x2 +#define SPEED_AUTONEG_10GB 0x3 +#define SPEED_AUTONEG_1GB 0x4 +#define SPEED_AUTONEG_100MB 0x5 +#define SPEED_AUTONEG_10GB_1GB 0x6 +#define SPEED_AUTONEG_10GB_1GB_100MB 0x7 +#define SPEED_AUTONEG_1GB_100MB 0x8 +#define SPEED_AUTONEG_10MB 0x9 +#define SPEED_AUTONEG_1GB_100MB_10MB 0xa +#define SPEED_AUTONEG_100MB_10MB 0xb +#define SPEED_FORCED_100MB 0xc +#define SPEED_FORCED_10MB 0xd + struct be_cmd_req_get_phy_info { struct be_cmd_req_hdr hdr; u8 rsvd0[24]; @@ -1321,7 +1354,11 @@ struct be_phy_info { u16 phy_type; u16 interface_type; u32 misc_params; - u32 future_use[4]; + u16 ext_phy_details; + u16 rsvd; + u16 auto_speeds_supported; + u16 fixed_speeds_supported; + u32 future_use[2]; }; struct be_cmd_resp_get_phy_info { @@ -1567,6 +1604,56 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter) } } + +/************** get fat capabilites *******************/ +#define MAX_MODULES 27 +#define MAX_MODES 4 +#define MODE_UART 0 +#define FW_LOG_LEVEL_DEFAULT 48 +#define FW_LOG_LEVEL_FATAL 64 + +struct ext_fat_mode { + u8 mode; + u8 rsvd0; + u16 port_mask; + u32 dbg_lvl; + u64 fun_mask; +} __packed; + +struct ext_fat_modules { + u8 modules_str[32]; + u32 modules_id; + u32 num_modes; + struct ext_fat_mode trace_lvl[MAX_MODES]; +} __packed; + +struct be_fat_conf_params { + u32 max_log_entries; + u32 log_entry_size; + u8 log_type; + u8 max_log_funs; + u8 max_log_ports; + u8 rsvd0; + u32 supp_modes; + u32 num_modules; + struct ext_fat_modules module[MAX_MODULES]; +} __packed; + +struct be_cmd_req_get_ext_fat_caps { + struct be_cmd_req_hdr hdr; + u32 parameter_type; +}; + +struct be_cmd_resp_get_ext_fat_caps { + struct be_cmd_resp_hdr hdr; + struct be_fat_conf_params get_params; +}; + +struct be_cmd_req_set_ext_fat_caps { + struct be_cmd_req_hdr hdr; + struct be_fat_conf_params set_params; +}; + extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_cmd_POST(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, @@ -1655,8 +1742,7 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, u8 loopback_type, u8 enable); -extern int be_cmd_get_phy_info(struct be_adapter *adapter, - struct be_phy_info *phy_info); +extern int be_cmd_get_phy_info(struct be_adapter *adapter); extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); extern void be_detect_dump_ue(struct be_adapter *adapter); extern int be_cmd_get_die_temperature(struct be_adapter *adapter); @@ -1673,4 +1759,9 @@ extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, u16 intf_id); extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd); +extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd, + struct be_fat_conf_params *cfgs); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index c1ff73cb0e6..63e51d47690 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -433,102 +433,193 @@ static int be_get_sset_count(struct net_device *netdev, int stringset) } } +static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) +{ + u32 port; + + switch (phy_type) { + case PHY_TYPE_BASET_1GB: + case PHY_TYPE_BASEX_1GB: + case PHY_TYPE_SGMII: + port = PORT_TP; + break; + case PHY_TYPE_SFP_PLUS_10GB: + port = dac_cable_len ? PORT_DA : PORT_FIBRE; + break; + case PHY_TYPE_XFP_10GB: + case PHY_TYPE_SFP_1GB: + port = PORT_FIBRE; + break; + case PHY_TYPE_BASET_10GB: + port = PORT_TP; + break; + default: + port = PORT_OTHER; + } + + return port; +} + +static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) +{ + u32 val = 0; + + switch (if_type) { + case PHY_TYPE_BASET_1GB: + case PHY_TYPE_BASEX_1GB: + case PHY_TYPE_SGMII: + val |= SUPPORTED_TP; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) + val |= SUPPORTED_100baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_10MBPS) + val |= SUPPORTED_10baseT_Full; + break; + case PHY_TYPE_KX4_10GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseKX_Full; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKX4_Full; + break; + case PHY_TYPE_KR_10GB: + val |= SUPPORTED_Backplane | + SUPPORTED_10000baseKR_Full; + break; + case PHY_TYPE_SFP_PLUS_10GB: + case PHY_TYPE_XFP_10GB: + case PHY_TYPE_SFP_1GB: + val |= SUPPORTED_FIBRE; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + break; + case PHY_TYPE_BASET_10GB: + val |= SUPPORTED_TP; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) + val |= SUPPORTED_100baseT_Full; + break; + default: + val |= SUPPORTED_TP; + } + + return val; +} + +static int convert_to_et_speed(u32 be_speed) +{ + int et_speed = SPEED_10000; + + switch (be_speed) { + case PHY_LINK_SPEED_10MBPS: + et_speed = SPEED_10; + break; + case PHY_LINK_SPEED_100MBPS: + et_speed = SPEED_100; + break; + case PHY_LINK_SPEED_1GBPS: + et_speed = SPEED_1000; + break; + case PHY_LINK_SPEED_10GBPS: + et_speed = SPEED_10000; + break; + } + + return et_speed; +} + +bool be_pause_supported(struct be_adapter *adapter) +{ + return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || + adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ? + false : true; +} + static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); - struct be_phy_info phy_info; - u8 mac_speed = 0; + u8 port_speed = 0; u16 link_speed = 0; u8 link_status; + u32 et_speed = 0; int status; - if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { - status = be_cmd_link_status_query(adapter, &mac_speed, - &link_speed, &link_status, 0); - if (!status) - be_link_status_update(adapter, link_status); - - /* link_speed is in units of 10 Mbps */ - if (link_speed) { - ethtool_cmd_speed_set(ecmd, link_speed*10); + if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) { + if (adapter->phy.forced_port_speed < 0) { + status = be_cmd_link_status_query(adapter, &port_speed, + &link_speed, &link_status, 0); + if (!status) + be_link_status_update(adapter, link_status); + if (link_speed) + et_speed = link_speed * 10; + else if (link_status) + et_speed = convert_to_et_speed(port_speed); } else { - switch (mac_speed) { - case PHY_LINK_SPEED_10MBPS: - ethtool_cmd_speed_set(ecmd, SPEED_10); - break; - case PHY_LINK_SPEED_100MBPS: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - case PHY_LINK_SPEED_1GBPS: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case PHY_LINK_SPEED_10GBPS: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case PHY_LINK_SPEED_ZERO: - ethtool_cmd_speed_set(ecmd, 0); - break; - } + et_speed = adapter->phy.forced_port_speed; } - status = be_cmd_get_phy_info(adapter, &phy_info); - if (!status) { - switch (phy_info.interface_type) { - case PHY_TYPE_XFP_10GB: - case PHY_TYPE_SFP_1GB: - case PHY_TYPE_SFP_PLUS_10GB: - ecmd->port = PORT_FIBRE; - break; - default: - ecmd->port = PORT_TP; - break; - } + ethtool_cmd_speed_set(ecmd, et_speed); + + status = be_cmd_get_phy_info(adapter); + if (status) + return status; + + ecmd->supported = + convert_to_et_setting(adapter->phy.interface_type, + adapter->phy.auto_speeds_supported | + adapter->phy.fixed_speeds_supported); + ecmd->advertising = + convert_to_et_setting(adapter->phy.interface_type, + adapter->phy.auto_speeds_supported); + + ecmd->port = be_get_port_type(adapter->phy.interface_type, + adapter->phy.dac_cable_len); + + if (adapter->phy.auto_speeds_supported) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->advertising |= ADVERTISED_Autoneg; + } - switch (phy_info.interface_type) { - case PHY_TYPE_KR_10GB: - case PHY_TYPE_KX4_10GB: - ecmd->autoneg = AUTONEG_ENABLE; + if (be_pause_supported(adapter)) { + ecmd->supported |= SUPPORTED_Pause; + ecmd->advertising |= ADVERTISED_Pause; + } + + switch (adapter->phy.interface_type) { + case PHY_TYPE_KR_10GB: + case PHY_TYPE_KX4_10GB: ecmd->transceiver = XCVR_INTERNAL; - break; - default: - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_EXTERNAL; - break; - } + break; + default: + ecmd->transceiver = XCVR_EXTERNAL; + break; } /* Save for future use */ - adapter->link_speed = ethtool_cmd_speed(ecmd); - adapter->port_type = ecmd->port; - adapter->transceiver = ecmd->transceiver; - adapter->autoneg = ecmd->autoneg; + adapter->phy.link_speed = ethtool_cmd_speed(ecmd); + adapter->phy.port_type = ecmd->port; + adapter->phy.transceiver = ecmd->transceiver; + adapter->phy.autoneg = ecmd->autoneg; + adapter->phy.advertising = ecmd->advertising; + adapter->phy.supported = ecmd->supported; } else { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->port = adapter->port_type; - ecmd->transceiver = adapter->transceiver; - ecmd->autoneg = adapter->autoneg; + ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); + ecmd->port = adapter->phy.port_type; + ecmd->transceiver = adapter->phy.transceiver; + ecmd->autoneg = adapter->phy.autoneg; + ecmd->advertising = adapter->phy.advertising; + ecmd->supported = adapter->phy.supported; } - ecmd->duplex = DUPLEX_FULL; + ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; ecmd->phy_address = adapter->port_num; - switch (ecmd->port) { - case PORT_FIBRE: - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - break; - case PORT_TP: - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); - break; - case PORT_AUI: - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI); - break; - } - - if (ecmd->autoneg) { - ecmd->supported |= SUPPORTED_1000baseT_Full; - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full); - } return 0; } @@ -548,7 +639,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) struct be_adapter *adapter = netdev_priv(netdev); be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); - ecmd->autoneg = 0; + ecmd->autoneg = adapter->phy.fc_autoneg; } static int @@ -702,7 +793,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) } } - if (be_test_ddr_dma(adapter) != 0) { + if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) { data[3] = 1; test->flags |= ETH_TEST_FL_FAILED; } @@ -787,6 +878,81 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, return status; } +static u32 be_get_msg_level(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (lancer_chip(adapter)) { + dev_err(&adapter->pdev->dev, "Operation not supported\n"); + return -EOPNOTSUPP; + } + + return adapter->msg_enable; +} + +static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status; + int i, j; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + if (!extfat_cmd.va) { + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", + __func__); + goto err; + } + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (!status) { + cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + for (i = 0; i < cfgs->num_modules; i++) { + for (j = 0; j < cfgs->module[i].num_modes; j++) { + if (cfgs->module[i].trace_lvl[j].mode == + MODE_UART) + cfgs->module[i].trace_lvl[j].dbg_lvl = + cpu_to_le32(level); + } + } + status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, + cfgs); + if (status) + dev_err(&adapter->pdev->dev, + "Message level set failed\n"); + } else { + dev_err(&adapter->pdev->dev, "Message level get failed\n"); + } + + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); +err: + return; +} + +static void be_set_msg_level(struct net_device *netdev, u32 level) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (lancer_chip(adapter)) { + dev_err(&adapter->pdev->dev, "Operation not supported\n"); + return; + } + + if (adapter->msg_enable == level) + return; + + if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) + be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? + FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); + adapter->msg_enable = level; + + return; +} + const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, @@ -802,6 +968,8 @@ const struct ethtool_ops be_ethtool_ops = { .set_pauseparam = be_set_pauseparam, .get_strings = be_get_stat_strings, .set_phys_id = be_set_phys_id, + .get_msglevel = be_get_msg_level, + .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, .get_ethtool_stats = be_get_ethtool_stats, .get_regs_len = be_get_reg_len, diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index f2c89e3ccab..f38b58c8dbb 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -58,6 +58,8 @@ #define SLI_PORT_CONTROL_IP_MASK 0x08000000 +#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt @@ -162,22 +164,23 @@ #define QUERY_FAT 1 /* Flashrom related descriptors */ +#define MAX_FLASH_COMP 32 #define IMAGE_TYPE_FIRMWARE 160 #define IMAGE_TYPE_BOOTCODE 224 #define IMAGE_TYPE_OPTIONROM 32 #define NUM_FLASHDIR_ENTRIES 32 -#define IMG_TYPE_ISCSI_ACTIVE 0 -#define IMG_TYPE_REDBOOT 1 -#define IMG_TYPE_BIOS 2 -#define IMG_TYPE_PXE_BIOS 3 -#define IMG_TYPE_FCOE_BIOS 8 -#define IMG_TYPE_ISCSI_BACKUP 9 -#define IMG_TYPE_FCOE_FW_ACTIVE 10 -#define IMG_TYPE_FCOE_FW_BACKUP 11 -#define IMG_TYPE_NCSI_FW 13 -#define IMG_TYPE_PHY_FW 99 +#define OPTYPE_ISCSI_ACTIVE 0 +#define OPTYPE_REDBOOT 1 +#define OPTYPE_BIOS 2 +#define OPTYPE_PXE_BIOS 3 +#define OPTYPE_FCOE_BIOS 8 +#define OPTYPE_ISCSI_BACKUP 9 +#define OPTYPE_FCOE_FW_ACTIVE 10 +#define OPTYPE_FCOE_FW_BACKUP 11 +#define OPTYPE_NCSI_FW 13 +#define OPTYPE_PHY_FW 99 #define TN_8022 13 #define ILLEGAL_IOCTL_REQ 2 @@ -223,6 +226,24 @@ #define FLASH_REDBOOT_START_g3 (262144) #define FLASH_PHY_FW_START_g3 1310720 +#define IMAGE_NCSI 16 +#define IMAGE_OPTION_ROM_PXE 32 +#define IMAGE_OPTION_ROM_FCoE 33 +#define IMAGE_OPTION_ROM_ISCSI 34 +#define IMAGE_FLASHISM_JUMPVECTOR 48 +#define IMAGE_FLASH_ISM 49 +#define IMAGE_JUMP_VECTOR 50 +#define IMAGE_FIRMWARE_iSCSI 160 +#define IMAGE_FIRMWARE_COMP_iSCSI 161 +#define IMAGE_FIRMWARE_FCoE 162 +#define IMAGE_FIRMWARE_COMP_FCoE 163 +#define IMAGE_FIRMWARE_BACKUP_iSCSI 176 +#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177 +#define IMAGE_FIRMWARE_BACKUP_FCoE 178 +#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179 +#define IMAGE_FIRMWARE_PHY 192 +#define IMAGE_BOOT_CODE 224 + /************* Rx Packet Type Encoding **************/ #define BE_UNICAST_PACKET 0 #define BE_MULTICAST_PACKET 1 @@ -445,6 +466,7 @@ struct flash_comp { unsigned long offset; int optype; int size; + int img_type; }; struct image_hdr { @@ -481,17 +503,19 @@ struct flash_section_hdr { u32 format_rev; u32 cksum; u32 antidote; - u32 build_no; - u8 id_string[64]; - u32 active_entry_mask; - u32 valid_entry_mask; - u32 org_content_mask; - u32 rsvd0; - u32 rsvd1; - u32 rsvd2; - u32 rsvd3; - u32 rsvd4; -}; + u32 num_images; + u8 id_string[128]; + u32 rsvd[4]; +} __packed; + +struct flash_section_hdr_g2 { + u32 format_rev; + u32 cksum; + u32 antidote; + u32 build_num; + u8 id_string[128]; + u32 rsvd[8]; +} __packed; struct flash_section_entry { u32 type; @@ -503,10 +527,16 @@ struct flash_section_entry { u32 rsvd0; u32 rsvd1; u8 ver_data[32]; -}; +} __packed; struct flash_section_info { u8 cookie[32]; struct flash_section_hdr fsec_hdr; struct flash_section_entry fsec_entry[32]; -}; +} __packed; + +struct flash_section_info_g2 { + u8 cookie[32]; + struct flash_section_hdr_g2 fsec_hdr; + struct flash_section_entry fsec_entry[32]; +} __packed; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 528a886bc2c..081c7770116 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -421,6 +421,9 @@ void be_parse_stats(struct be_adapter *adapter) populate_be2_stats(adapter); } + if (lancer_chip(adapter)) + goto done; + /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ for_all_rx_queues(adapter, rxo, i) { /* below erx HW counter can actually wrap around after @@ -429,6 +432,8 @@ void be_parse_stats(struct be_adapter *adapter) accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, (u16)erx->rx_drops_no_fragments[rxo->q.id]); } +done: + return; } static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, @@ -797,22 +802,30 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) if (adapter->promiscuous) return 0; - if (adapter->vlans_added <= adapter->max_vlans) { - /* Construct VLAN Table to give to HW */ - for (i = 0; i < VLAN_N_VID; i++) { - if (adapter->vlan_tag[i]) { - vtag[ntags] = cpu_to_le16(i); - ntags++; - } - } - status = be_cmd_vlan_config(adapter, adapter->if_handle, - vtag, ntags, 1, 0); - } else { - status = be_cmd_vlan_config(adapter, adapter->if_handle, - NULL, 0, 1, 1); + if (adapter->vlans_added > adapter->max_vlans) + goto set_vlan_promisc; + + /* Construct VLAN Table to give to HW */ + for (i = 0; i < VLAN_N_VID; i++) + if (adapter->vlan_tag[i]) + vtag[ntags++] = cpu_to_le16(i); + + status = be_cmd_vlan_config(adapter, adapter->if_handle, + vtag, ntags, 1, 0); + + /* Set to VLAN promisc mode as setting VLAN filter failed */ + if (status) { + dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); + dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); + goto set_vlan_promisc; } return status; + +set_vlan_promisc: + status = be_cmd_vlan_config(adapter, adapter->if_handle, + NULL, 0, 1, 1); + return status; } static int be_vlan_add_vid(struct net_device *netdev, u16 vid) @@ -862,6 +875,7 @@ ret: static void be_set_rx_mode(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); + int status; if (netdev->flags & IFF_PROMISC) { be_cmd_rx_filter(adapter, IFF_PROMISC, ON); @@ -908,7 +922,14 @@ static void be_set_rx_mode(struct net_device *netdev) } } - be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); + status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); + + /* Set to MCAST promisc mode if setting MULTICAST address fails */ + if (status) { + dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); + dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n"); + be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); + } done: return; } @@ -1028,6 +1049,29 @@ static int be_set_vf_tx_rate(struct net_device *netdev, return status; } +static int be_find_vfs(struct be_adapter *adapter, int vf_state) +{ + struct pci_dev *dev, *pdev = adapter->pdev; + int vfs = 0, assigned_vfs = 0, pos, vf_fn; + u16 offset, stride; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); + + dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); + while (dev) { + vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF; + if (dev->is_virtfn && dev->devfn == vf_fn) { + vfs++; + if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + assigned_vfs++; + } + dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev); + } + return (vf_state == ASSIGNED) ? assigned_vfs : vfs; +} + static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) { struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); @@ -1238,6 +1282,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (netdev->features & NETIF_F_RXHASH) skb->rxhash = rxcp->rss_hash; @@ -1294,6 +1339,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, skb->len = rxcp->pkt_size; skb->data_len = rxcp->pkt_size; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (adapter->netdev->features & NETIF_F_RXHASH) skb->rxhash = rxcp->rss_hash; @@ -1555,7 +1601,9 @@ static int event_handle(struct be_eq_obj *eqo) if (!num) rearm = true; - be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num); + if (num || msix_enabled(eqo->adapter)) + be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num); + if (num) napi_schedule(&eqo->napi); @@ -1764,9 +1812,9 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) static int be_num_txqs_want(struct be_adapter *adapter) { - if (sriov_enabled(adapter) || be_is_mc(adapter) || - lancer_chip(adapter) || !be_physfn(adapter) || - adapter->generation == BE_GEN2) + if (sriov_want(adapter) || be_is_mc(adapter) || + lancer_chip(adapter) || !be_physfn(adapter) || + adapter->generation == BE_GEN2) return 1; else return MAX_TX_QS; @@ -2093,7 +2141,7 @@ static void be_msix_disable(struct be_adapter *adapter) static uint be_num_rss_want(struct be_adapter *adapter) { if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - adapter->num_vfs == 0 && be_physfn(adapter) && + !sriov_want(adapter) && be_physfn(adapter) && !be_is_mc(adapter)) return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; else @@ -2127,53 +2175,6 @@ done: return; } -static int be_sriov_enable(struct be_adapter *adapter) -{ - be_check_sriov_fn_type(adapter); - -#ifdef CONFIG_PCI_IOV - if (be_physfn(adapter) && num_vfs) { - int status, pos; - u16 dev_vfs; - - pos = pci_find_ext_capability(adapter->pdev, - PCI_EXT_CAP_ID_SRIOV); - pci_read_config_word(adapter->pdev, - pos + PCI_SRIOV_TOTAL_VF, &dev_vfs); - - adapter->num_vfs = min_t(u16, num_vfs, dev_vfs); - if (adapter->num_vfs != num_vfs) - dev_info(&adapter->pdev->dev, - "Device supports %d VFs and not %d\n", - adapter->num_vfs, num_vfs); - - status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); - if (status) - adapter->num_vfs = 0; - - if (adapter->num_vfs) { - adapter->vf_cfg = kcalloc(num_vfs, - sizeof(struct be_vf_cfg), - GFP_KERNEL); - if (!adapter->vf_cfg) - return -ENOMEM; - } - } -#endif - return 0; -} - -static void be_sriov_disable(struct be_adapter *adapter) -{ -#ifdef CONFIG_PCI_IOV - if (sriov_enabled(adapter)) { - pci_disable_sriov(adapter->pdev); - kfree(adapter->vf_cfg); - adapter->num_vfs = 0; - } -#endif -} - static inline int be_msix_vec_get(struct be_adapter *adapter, struct be_eq_obj *eqo) { @@ -2475,6 +2476,11 @@ static void be_vf_clear(struct be_adapter *adapter) struct be_vf_cfg *vf_cfg; u32 vf; + if (be_find_vfs(adapter, ASSIGNED)) { + dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n"); + goto done; + } + for_all_vfs(adapter, vf_cfg, vf) { if (lancer_chip(adapter)) be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); @@ -2484,6 +2490,10 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } + pci_disable_sriov(adapter->pdev); +done: + kfree(adapter->vf_cfg); + adapter->num_vfs = 0; } static int be_clear(struct be_adapter *adapter) @@ -2513,29 +2523,60 @@ static int be_clear(struct be_adapter *adapter) be_cmd_fw_clean(adapter); be_msix_disable(adapter); - kfree(adapter->pmac_id); + pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0); return 0; } -static void be_vf_setup_init(struct be_adapter *adapter) +static int be_vf_setup_init(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; int vf; + adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg), + GFP_KERNEL); + if (!adapter->vf_cfg) + return -ENOMEM; + for_all_vfs(adapter, vf_cfg, vf) { vf_cfg->if_handle = -1; vf_cfg->pmac_id = -1; } + return 0; } static int be_vf_setup(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; + struct device *dev = &adapter->pdev->dev; u32 cap_flags, en_flags, vf; u16 def_vlan, lnk_speed; - int status; + int status, enabled_vfs; - be_vf_setup_init(adapter); + enabled_vfs = be_find_vfs(adapter, ENABLED); + if (enabled_vfs) { + dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs); + dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); + return 0; + } + + if (num_vfs > adapter->dev_num_vfs) { + dev_warn(dev, "Device supports %d VFs and not %d\n", + adapter->dev_num_vfs, num_vfs); + num_vfs = adapter->dev_num_vfs; + } + + status = pci_enable_sriov(adapter->pdev, num_vfs); + if (!status) { + adapter->num_vfs = num_vfs; + } else { + /* Platform doesn't support SRIOV though device supports it */ + dev_warn(dev, "SRIOV enable failed\n"); + return 0; + } + + status = be_vf_setup_init(adapter); + if (status) + goto err; cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST; @@ -2546,9 +2587,11 @@ static int be_vf_setup(struct be_adapter *adapter) goto err; } - status = be_vf_eth_addr_config(adapter); - if (status) - goto err; + if (!enabled_vfs) { + status = be_vf_eth_addr_config(adapter); + if (status) + goto err; + } for_all_vfs(adapter, vf_cfg, vf) { status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, @@ -2571,11 +2614,12 @@ err: static void be_setup_init(struct be_adapter *adapter) { adapter->vlan_prio_bmap = 0xff; - adapter->link_speed = -1; + adapter->phy.link_speed = -1; adapter->if_handle = -1; adapter->be3_native = false; adapter->promiscuous = false; adapter->eq_next_idx = 0; + adapter->phy.forced_port_speed = -1; } static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) @@ -2604,9 +2648,25 @@ do_none: return status; } +/* Routine to query per function resource limits */ +static int be_get_config(struct be_adapter *adapter) +{ + int pos; + u16 dev_num_vfs; + + pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, + &dev_num_vfs); + adapter->dev_num_vfs = dev_num_vfs; + } + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; u32 cap_flags, en_flags; u32 tx_fc, rx_fc; int status; @@ -2614,6 +2674,8 @@ static int be_setup(struct be_adapter *adapter) be_setup_init(adapter); + be_get_config(adapter); + be_cmd_req_native_mode(adapter); be_msix_enable(adapter); @@ -2680,36 +2742,33 @@ static int be_setup(struct be_adapter *adapter) be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); - status = be_vid_config(adapter, false, 0); - if (status) - goto err; + be_vid_config(adapter, false, 0); be_set_rx_mode(adapter->netdev); - status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); - /* For Lancer: It is legal for this cmd to fail on VF */ - if (status && (be_physfn(adapter) || !lancer_chip(adapter))) - goto err; + be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); - if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) { - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, + if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) + be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - /* For Lancer: It is legal for this cmd to fail on VF */ - if (status && (be_physfn(adapter) || !lancer_chip(adapter))) - goto err; - } pcie_set_readrq(adapter->pdev, 4096); - if (sriov_enabled(adapter)) { - status = be_vf_setup(adapter); - if (status) - goto err; + if (be_physfn(adapter) && num_vfs) { + if (adapter->dev_num_vfs) + be_vf_setup(adapter); + else + dev_warn(dev, "device doesn't support SRIOV\n"); } + be_cmd_get_phy_info(adapter); + if (be_pause_supported(adapter)) + adapter->phy.fc_autoneg = 1; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; + pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1); return 0; err: be_clear(adapter); @@ -2731,6 +2790,8 @@ static void be_netpoll(struct net_device *netdev) #endif #define FW_FILE_HDR_SIGN "ServerEngines Corp. " +char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; + static bool be_flash_redboot(struct be_adapter *adapter, const u8 *p, u32 img_start, int image_size, int hdr_size) @@ -2760,71 +2821,105 @@ static bool be_flash_redboot(struct be_adapter *adapter, static bool phy_flashing_required(struct be_adapter *adapter) { - int status = 0; - struct be_phy_info phy_info; + return (adapter->phy.phy_type == TN_8022 && + adapter->phy.interface_type == PHY_TYPE_BASET_10GB); +} - status = be_cmd_get_phy_info(adapter, &phy_info); - if (status) - return false; - if ((phy_info.phy_type == TN_8022) && - (phy_info.interface_type == PHY_TYPE_BASET_10GB)) { - return true; +static bool is_comp_in_ufi(struct be_adapter *adapter, + struct flash_section_info *fsec, int type) +{ + int i = 0, img_type = 0; + struct flash_section_info_g2 *fsec_g2 = NULL; + + if (adapter->generation != BE_GEN3) + fsec_g2 = (struct flash_section_info_g2 *)fsec; + + for (i = 0; i < MAX_FLASH_COMP; i++) { + if (fsec_g2) + img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); + else + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + + if (img_type == type) + return true; } return false; + +} + +struct flash_section_info *get_fsec_info(struct be_adapter *adapter, + int header_size, + const struct firmware *fw) +{ + struct flash_section_info *fsec = NULL; + const u8 *p = fw->data; + + p += header_size; + while (p < (fw->data + fw->size)) { + fsec = (struct flash_section_info *)p; + if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) + return fsec; + p += 32; + } + return NULL; } static int be_flash_data(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) + const struct firmware *fw, + struct be_dma_mem *flash_cmd, + int num_of_images) { int status = 0, i, filehdr_size = 0; + int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); u32 total_bytes = 0, flash_op; int num_bytes; const u8 *p = fw->data; struct be_cmd_write_flashrom *req = flash_cmd->va; const struct flash_comp *pflashcomp; - int num_comp; - - static const struct flash_comp gen3_flash_types[10] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3}, - { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g3}, - { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3}, - { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3}, - { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g3}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g3}, - { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g3}, - { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, - FLASH_NCSI_IMAGE_MAX_SIZE_g3}, - { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW, - FLASH_PHY_FW_IMAGE_MAX_SIZE_g3} + int num_comp, hdr_size; + struct flash_section_info *fsec = NULL; + + struct flash_comp gen3_flash_types[] = { + { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI}, + { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT, + FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE}, + { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI}, + { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE}, + { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE}, + { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI}, + { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE}, + { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE}, + { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW, + FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI}, + { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW, + FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY} }; - static const struct flash_comp gen2_flash_types[8] = { - { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2}, - { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, - FLASH_REDBOOT_IMAGE_MAX_SIZE_g2}, - { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2}, - { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2}, - { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS, - FLASH_BIOS_IMAGE_MAX_SIZE_g2}, - { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2}, - { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE, - FLASH_IMAGE_MAX_SIZE_g2}, - { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP, - FLASH_IMAGE_MAX_SIZE_g2} + + struct flash_comp gen2_flash_types[] = { + { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI}, + { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT, + FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE}, + { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI}, + { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE}, + { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE}, + { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI}, + { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE}, + { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} }; if (adapter->generation == BE_GEN3) { @@ -2836,22 +2931,37 @@ static int be_flash_data(struct be_adapter *adapter, filehdr_size = sizeof(struct flash_file_hdr_g2); num_comp = ARRAY_SIZE(gen2_flash_types); } + /* Get flash section info*/ + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(&adapter->pdev->dev, + "Invalid Cookie. UFI corrupted ?\n"); + return -1; + } for (i = 0; i < num_comp; i++) { - if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && - memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) + if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) + continue; + + if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && + memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) continue; - if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) { + + if (pflashcomp[i].optype == OPTYPE_PHY_FW) { if (!phy_flashing_required(adapter)) continue; } - if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && - (!be_flash_redboot(adapter, fw->data, - pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + - (num_of_images * sizeof(struct image_hdr))))) + + hdr_size = filehdr_size + + (num_of_images * sizeof(struct image_hdr)); + + if ((pflashcomp[i].optype == OPTYPE_REDBOOT) && + (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset, + pflashcomp[i].size, hdr_size))) continue; + + /* Flash the component */ p = fw->data; - p += filehdr_size + pflashcomp[i].offset - + (num_of_images * sizeof(struct image_hdr)); + p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; if (p + pflashcomp[i].size > fw->data + fw->size) return -1; total_bytes = pflashcomp[i].size; @@ -2862,12 +2972,12 @@ static int be_flash_data(struct be_adapter *adapter, num_bytes = total_bytes; total_bytes -= num_bytes; if (!total_bytes) { - if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) + if (pflashcomp[i].optype == OPTYPE_PHY_FW) flash_op = FLASHROM_OPER_PHY_FLASH; else flash_op = FLASHROM_OPER_FLASH; } else { - if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) + if (pflashcomp[i].optype == OPTYPE_PHY_FW) flash_op = FLASHROM_OPER_PHY_SAVE; else flash_op = FLASHROM_OPER_SAVE; @@ -2879,7 +2989,7 @@ static int be_flash_data(struct be_adapter *adapter, if (status) { if ((status == ILLEGAL_IOCTL_REQ) && (pflashcomp[i].optype == - IMG_TYPE_PHY_FW)) + OPTYPE_PHY_FW)) break; dev_err(&adapter->pdev->dev, "cmd to write to flash rom failed.\n"); @@ -3280,8 +3390,6 @@ static void __devexit be_remove(struct pci_dev *pdev) be_ctrl_cleanup(adapter); - be_sriov_disable(adapter); - pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); @@ -3295,9 +3403,43 @@ bool be_is_wol_supported(struct be_adapter *adapter) !be_is_wol_excluded(adapter)) ? true : false; } -static int be_get_config(struct be_adapter *adapter) +u32 be_get_fw_log_level(struct be_adapter *adapter) { + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; int status; + u32 level = 0; + int j; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + + if (!extfat_cmd.va) { + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", + __func__); + goto err; + } + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (!status) { + cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + for (j = 0; j < cfgs->module[0].num_modes; j++) { + if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) + level = cfgs->module[0].trace_lvl[j].dbg_lvl; + } + } + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); +err: + return level; +} +static int be_get_initial_config(struct be_adapter *adapter) +{ + int status; + u32 level; status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, &adapter->function_mode, &adapter->function_caps); @@ -3335,10 +3477,13 @@ static int be_get_config(struct be_adapter *adapter) if (be_is_wol_supported(adapter)) adapter->wol = true; + level = be_get_fw_log_level(adapter); + adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + return 0; } -static int be_dev_family_check(struct be_adapter *adapter) +static int be_dev_type_check(struct be_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; u32 sli_intf = 0, if_type; @@ -3371,6 +3516,9 @@ static int be_dev_family_check(struct be_adapter *adapter) default: adapter->generation = 0; } + + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); + adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; return 0; } @@ -3514,6 +3662,14 @@ reschedule: schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } +static bool be_reset_required(struct be_adapter *adapter) +{ + u32 reg; + + pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, ®); + return reg; +} + static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -3539,7 +3695,7 @@ static int __devinit be_probe(struct pci_dev *pdev, adapter->pdev = pdev; pci_set_drvdata(pdev, adapter); - status = be_dev_family_check(adapter); + status = be_dev_type_check(adapter); if (status) goto free_netdev; @@ -3557,13 +3713,9 @@ static int __devinit be_probe(struct pci_dev *pdev, } } - status = be_sriov_enable(adapter); - if (status) - goto free_netdev; - status = be_ctrl_init(adapter); if (status) - goto disable_sriov; + goto free_netdev; if (lancer_chip(adapter)) { status = lancer_wait_ready(adapter); @@ -3590,9 +3742,11 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto ctrl_clean; - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; + if (be_reset_required(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + goto ctrl_clean; + } /* The INTR bit may be set in the card when probed by a kdump kernel * after a crash. @@ -3604,7 +3758,7 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto ctrl_clean; - status = be_get_config(adapter); + status = be_get_initial_config(adapter); if (status) goto stats_clean; @@ -3633,8 +3787,6 @@ stats_clean: be_stats_cleanup(adapter); ctrl_clean: be_ctrl_cleanup(adapter); -disable_sriov: - be_sriov_disable(adapter); free_netdev: free_netdev(netdev); pci_set_drvdata(pdev, NULL); @@ -3749,6 +3901,11 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, pci_disable_device(pdev); + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish + */ + ssleep(30); return PCI_ERS_RESULT_NEED_RESET; } diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 1637b986229..9d71c9cc300 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -545,9 +545,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, /* Reset the chip to erase previous misconfiguration. */ iowrite32(0x00000001, ioaddr + BCR); - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; - /* Make certain the descriptor lists are aligned. */ np = netdev_priv(dev); np->mem = ioaddr; @@ -832,11 +829,13 @@ static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->mem; - int i; + const int irq = np->pci_dev->irq; + int rc, i; iowrite32(0x00000001, ioaddr + BCR); /* Reset */ - if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) + rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (rc) return -EAGAIN; for (i = 0; i < 3; i++) @@ -924,8 +923,7 @@ static int netdev_open(struct net_device *dev) np->reset_timer.data = (unsigned long) dev; np->reset_timer.function = reset_timer; np->reset_timer_armed = 0; - - return 0; + return rc; } @@ -1910,7 +1908,7 @@ static int netdev_close(struct net_device *dev) del_timer_sync(&np->timer); del_timer_sync(&np->reset_timer); - free_irq(dev->irq, dev); + free_irq(np->pci_dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index a12b3f5bc02..7fa0227c9c0 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -1161,6 +1161,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 7b34d8c698d..97f947b3d94 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -811,6 +811,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = mpc52xx_fec_get_msglevel, .set_msglevel = mpc52xx_fec_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index e4e6cd2c5f8..2b7633f766d 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -963,6 +963,7 @@ static const struct ethtool_ops fs_ethtool_ops = { .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, + .get_ts_info = ethtool_op_get_ts_info, }; static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e7bed530399..1adb0245b9d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -136,7 +136,7 @@ static void gfar_netpoll(struct net_device *dev); int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull); + int amount_pull, struct napi_struct *napi); void gfar_halt(struct net_device *dev); static void gfar_halt_nodisable(struct net_device *dev); void gfar_start(struct net_device *dev); @@ -2675,12 +2675,12 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) /* gfar_process_frame() -- handle one incoming packet if skb * isn't NULL. */ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull) + int amount_pull, struct napi_struct *napi) { struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; - int ret; + gro_result_t ret; /* fcb is at the beginning if exists */ fcb = (struct rxfcb *)skb->data; @@ -2719,9 +2719,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, __vlan_hwaccel_put_tag(skb, fcb->vlctl); /* Send the packet up the stack */ - ret = netif_receive_skb(skb); + ret = napi_gro_receive(napi, skb); - if (NET_RX_DROP == ret) + if (GRO_DROP == ret) priv->extra_stats.kernel_dropped++; return 0; @@ -2783,7 +2783,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull); + gfar_process_frame(dev, skb, amount_pull, + &rx_queue->grp->napi); } else { netif_warn(priv, rx_err, dev, "Missing skb!\n"); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 4c9f8d487db..2136c7ff5e6 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1210,4 +1210,7 @@ struct filer_table { struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; }; +/* The gianfar_ptp module will set this variable */ +extern int gfar_phc_index; + #endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8d74efd04bb..8a025570d97 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/net_tstamp.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> @@ -1739,6 +1740,34 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +int gfar_phc_index = -1; + +static int gfar_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { + info->so_timestamping = + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; + } + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = gfar_phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + const struct ethtool_ops gfar_ethtool_ops = { .get_settings = gfar_gsettings, .set_settings = gfar_ssettings, @@ -1761,4 +1790,5 @@ const struct ethtool_ops gfar_ethtool_ops = { #endif .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, + .get_ts_info = gfar_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 5fd620bec15..c08e5d40fec 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -515,6 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) err = PTR_ERR(etsects->clock); goto no_clock; } + gfar_phc_clock = ptp_clock_index(etsects->clock); dev_set_drvdata(&dev->dev, etsects); @@ -538,6 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); + gfar_phc_clock = -1; ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index a97257f91a3..37b03530601 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -415,6 +415,7 @@ static const struct ethtool_ops uec_ethtool_ops = { .get_ethtool_stats = uec_get_ethtool_stats, .get_wol = uec_get_wol, .set_wol = uec_set_wol, + .get_ts_info = ethtool_op_get_ts_info, }; void uec_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c index 3d94797c8f9..4b80dc4531a 100644 --- a/drivers/net/ethernet/fujitsu/at1700.c +++ b/drivers/net/ethernet/fujitsu/at1700.c @@ -27,7 +27,7 @@ ATI provided their EEPROM configuration code header file. Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes. - MCA bus (AT1720) support by Rene Schmit <rene@bss.lu> + MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu> Bugs: The MB86965 has a design flaw that makes all probes unreliable. Not @@ -38,7 +38,6 @@ #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/mca-legacy.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> @@ -79,24 +78,6 @@ static unsigned at1700_probe_list[] __initdata = { 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 }; -/* - * MCA - */ -#ifdef CONFIG_MCA_LEGACY -static int at1700_ioaddr_pattern[] __initdata = { - 0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07 -}; - -static int at1700_mca_probe_list[] __initdata = { - 0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0 -}; - -static int at1700_irq_pattern[] __initdata = { - 0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00, - 0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00 -}; -#endif - /* use 0 for production, 1 for verification, >2 for debug */ #ifndef NET_DEBUG #define NET_DEBUG 1 @@ -114,7 +95,6 @@ struct net_local { uint tx_queue_ready:1; /* Tx queue is ready to be sent. */ uint rx_started:1; /* Packets are Rxing. */ uchar tx_queue; /* Number of packet on the Tx queue. */ - char mca_slot; /* -1 means ISA */ ushort tx_queue_len; /* Current length of the Tx queue. */ }; @@ -166,21 +146,6 @@ static void set_rx_mode(struct net_device *dev); static void net_tx_timeout (struct net_device *dev); -#ifdef CONFIG_MCA_LEGACY -struct at1720_mca_adapters_struct { - char* name; - int id; -}; -/* rEnE : maybe there are others I don't know off... */ - -static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = { - { "Allied Telesys AT1720AT", 0x6410 }, - { "Allied Telesys AT1720BT", 0x6413 }, - { "Allied Telesys AT1720T", 0x6416 }, - { NULL, 0 }, -}; -#endif - /* Check for a network adaptor of this type, and return '0' iff one exists. If dev->base_addr == 0, probe all likely locations. If dev->base_addr == 1, always return failure. @@ -194,11 +159,6 @@ static int irq; static void cleanup_card(struct net_device *dev) { -#ifdef CONFIG_MCA_LEGACY - struct net_local *lp = netdev_priv(dev); - if (lp->mca_slot >= 0) - mca_mark_as_unused(lp->mca_slot); -#endif free_irq(dev->irq, NULL); release_region(dev->base_addr, AT1700_IO_EXTENT); } @@ -273,7 +233,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15}; static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15}; unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; - int slot, ret = -ENODEV; + int ret = -ENODEV; struct net_local *lp = netdev_priv(dev); if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) @@ -288,64 +248,6 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5), read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl)); #endif - -#ifdef CONFIG_MCA_LEGACY - /* rEnE (rene@bss.lu): got this from 3c509 driver source , adapted for AT1720 */ - - /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily - modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca) - to support standard MCA probing. */ - - /* redone for multi-card detection by ZP Gu (zpg@castle.net) */ - /* now works as a module */ - - if (MCA_bus) { - int j; - int l_i; - u_char pos3, pos4; - - for (j = 0; at1720_mca_adapters[j].name != NULL; j ++) { - slot = 0; - while (slot != MCA_NOTFOUND) { - - slot = mca_find_unused_adapter( at1720_mca_adapters[j].id, slot ); - if (slot == MCA_NOTFOUND) break; - - /* if we get this far, an adapter has been detected and is - enabled */ - - pos3 = mca_read_stored_pos( slot, 3 ); - pos4 = mca_read_stored_pos( slot, 4 ); - - for (l_i = 0; l_i < 8; l_i++) - if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) - break; - ioaddr = at1700_mca_probe_list[l_i]; - - for (irq = 0; irq < 0x10; irq++) - if (((((pos4>>4) & 0x0f) | (pos3 & 0xf0)) & 0xff) == at1700_irq_pattern[irq]) - break; - - /* probing for a card at a particular IO/IRQ */ - if ((dev->irq && dev->irq != irq) || - (dev->base_addr && dev->base_addr != ioaddr)) { - slot++; /* probing next slot */ - continue; - } - - dev->irq = irq; - - /* claim the slot */ - mca_set_adapter_name( slot, at1720_mca_adapters[j].name ); - mca_mark_as_used(slot); - - goto found; - } - } - /* if we get here, we didn't find an MCA adapter - try ISA */ - } -#endif - slot = -1; /* We must check for the EEPROM-config boards first, else accessing IOCONFIG0 will move the board! */ if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr && @@ -360,11 +262,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) goto err_out; } -#ifdef CONFIG_MCA_LEGACY -found: -#endif - - /* Reset the internal state machines. */ + /* Reset the internal state machines. */ outb(0, ioaddr + RESET); if (is_at1700) { @@ -380,11 +278,11 @@ found: break; } if (i == 8) { - goto err_mca; + goto err_out; } } else { if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr) - goto err_mca; + goto err_out; irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03]; } } @@ -464,23 +362,17 @@ found: spin_lock_init(&lp->lock); lp->jumpered = is_fmv18x; - lp->mca_slot = slot; /* Snarf the interrupt vector now. */ ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev); if (ret) { printk(KERN_ERR "AT1700 at %#3x is unusable due to a " "conflict on IRQ %d.\n", ioaddr, irq); - goto err_mca; + goto err_out; } return 0; -err_mca: -#ifdef CONFIG_MCA_LEGACY - if (slot >= 0) - mca_mark_as_unused(slot); -#endif err_out: release_region(ioaddr, AT1700_IO_EXTENT); return ret; diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c deleted file mode 100644 index 8451ecd4c1e..00000000000 --- a/drivers/net/ethernet/i825xx/3c523.c +++ /dev/null @@ -1,1312 +0,0 @@ -/* - net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip) - - - This is an extension to the Linux operating system, and is covered by the - same GNU General Public License that covers that work. - - Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca) - - This is basically Michael Hipp's ni52 driver, with a new probing - algorithm and some minor changes to the 82586 CA and reset routines. - Thanks a lot Michael for a really clean i82586 implementation! Unless - otherwise documented in ni52.c, any bugs are mine. - - Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in - any way. The ni52 is a lot easier to modify. - - sources: - ni52.c - - Crynwr packet driver collection was a great reference for my first - attempt at this sucker. The 3c507 driver also helped, until I noticed - that ni52.c was a lot nicer. - - EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference - Manual, courtesy of 3Com CardFacts, documents the 3c523-specific - stuff. Information on CardFacts is found in the Ethernet HOWTO. - Also see <a href="http://www.3com.com/"> - - Microprocessor Communications Support Chips, T.J. Byers, ISBN - 0-444-01224-9, has a section on the i82586. It tells you just enough - to know that you really don't want to learn how to program the chip. - - The original device probe code was stolen from ps2esdi.c - - Known Problems: - Since most of the code was stolen from ni52.c, you'll run across the - same bugs in the 0.62 version of ni52.c, plus maybe a few because of - the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there - shouldn't be the overrun problem that the 8K ni52 has. - - This driver is for a 16K adapter. It should work fine on the 64K - adapters, but it will only use one of the 4 banks of RAM. Modifying - this for the 64K version would require a lot of heinous bank - switching, which I'm sure not interested in doing. If you try to - implement a bank switching version, you'll basically have to remember - what bank is enabled and do a switch every time you access a memory - location that's not current. You'll also have to remap pointers on - the driver side, because it only knows about 16K of the memory. - Anyone desperate or masochistic enough to try? - - It seems to be stable now when multiple transmit buffers are used. I - can't see any performance difference, but then I'm working on a 386SX. - - Multicast doesn't work. It doesn't even pretend to work. Don't use - it. Don't compile your kernel with multicast support. I don't know - why. - - Features: - This driver is useable as a loadable module. If you try to specify an - IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will - search the MCA slots until it finds a 3c523 with the specified - parameters. - - This driver does support multiple ethernet cards when used as a module - (up to MAX_3C523_CARDS, the default being 4) - - This has been tested with both BNC and TP versions, internal and - external transceivers. Haven't tested with the 64K version (that I - know of). - - History: - Jan 1st, 1996 - first public release - Feb 4th, 1996 - update to 1.3.59, incorporated multicast diffs from ni52.c - Feb 15th, 1996 - added shared irq support - Apr 1999 - added support for multiple cards when used as a module - added option to disable multicast as is causes problems - Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk> - Stuart Adamson <stuart.adamson@compsoc.net> - Nov 2001 - added support for ethtool (jgarzik) - - $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $ - */ - -#define DRV_NAME "3c523" -#define DRV_VERSION "17-Nov-2001" - -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/skbuff.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/mca-legacy.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <asm/uaccess.h> -#include <asm/processor.h> -#include <asm/io.h> - -#include "3c523.h" - -/*************************************************************************/ -#define DEBUG /* debug on */ -#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */ -#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */ - -#define make32(ptr16) (p->memtop + (short) (ptr16) ) -#define make24(ptr32) ((char *) (ptr32) - p->base) -#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop )) - -/*************************************************************************/ -/* - Tables to which we can map values in the configuration registers. - */ -static int irq_table[] __initdata = { - 12, 7, 3, 9 -}; - -static int csr_table[] __initdata = { - 0x300, 0x1300, 0x2300, 0x3300 -}; - -static int shm_table[] __initdata = { - 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000 -}; - -/******************* how to calculate the buffers ***************************** - - - * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works - * --------------- in a different (more stable?) mode. Only in this mode it's - * possible to configure the driver with 'NO_NOPCOMMANDS' - -sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; -sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT -sizeof(rfd) = 24; sizeof(rbd) = 12; -sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; -sizeof(nop_cmd) = 8; - - * if you don't know the driver, better do not change this values: */ - -#define RECV_BUFF_SIZE 1524 /* slightly oversized */ -#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ -#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ -#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ -#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ - -#if (NUM_XMIT_BUFFS == 1) -#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ -#endif - -/**************************************************************************/ - -#define DELAY(x) { mdelay(32 * x); } - -/* a much shorter delay: */ -#define DELAY_16(); { udelay(16) ; } - -/* wait for command with timeout: */ -#define WAIT_4_SCB_CMD() { int i; \ - for(i=0;i<1024;i++) { \ - if(!p->scb->cmd) break; \ - DELAY_16(); \ - if(i == 1023) { \ - pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\ - dev->name,__LINE__); \ - elmc_id_reset586(); } } } - -static irqreturn_t elmc_interrupt(int irq, void *dev_id); -static int elmc_open(struct net_device *dev); -static int elmc_close(struct net_device *dev); -static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *); -static struct net_device_stats *elmc_get_stats(struct net_device *dev); -static void elmc_timeout(struct net_device *dev); -#ifdef ELMC_MULTICAST -static void set_multicast_list(struct net_device *dev); -#endif -static const struct ethtool_ops netdev_ethtool_ops; - -/* helper-functions */ -static int init586(struct net_device *dev); -static int check586(struct net_device *dev, unsigned long where, unsigned size); -static void alloc586(struct net_device *dev); -static void startrecv586(struct net_device *dev); -static void *alloc_rfa(struct net_device *dev, void *ptr); -static void elmc_rcv_int(struct net_device *dev); -static void elmc_xmt_int(struct net_device *dev); -static void elmc_rnr_int(struct net_device *dev); - -struct priv { - unsigned long base; - char *memtop; - unsigned long mapped_start; /* Start of ioremap */ - volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first; - volatile struct scp_struct *scp; /* volatile is important */ - volatile struct iscp_struct *iscp; /* volatile is important */ - volatile struct scb_struct *scb; /* volatile is important */ - volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; -#if (NUM_XMIT_BUFFS == 1) - volatile struct transmit_cmd_struct *xmit_cmds[2]; - volatile struct nop_cmd_struct *nop_cmds[2]; -#else - volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; - volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; -#endif - volatile int nop_point, num_recv_buffs; - volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; - volatile int xmit_count, xmit_last; - volatile int slot; -}; - -#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);} -#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);} - -/* with interrupts disabled - this will clear the interrupt bit in the - 3c523 control register, and won't put it back. This effectively - disables interrupts on the card. */ -#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);} -#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);} - -/*************************************************************************/ -/* - Do a Channel Attention on the 3c523. This is extremely board dependent. - */ -static void elmc_do_attn586(int ioaddr, int ints) -{ - /* the 3c523 requires a minimum of 500 ns. The delays here might be - a little too large, and hence they may cut the performance of the - card slightly. If someone who knows a little more about Linux - timing would care to play with these, I'd appreciate it. */ - - /* this bit masking stuff is crap. I'd rather have separate - registers with strobe triggers for each of these functions. <sigh> - Ya take what ya got. */ - - outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL); - DELAY_16(); /* > 500 ns */ - outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL); -} - -/*************************************************************************/ -/* - Reset the 82586 on the 3c523. Also very board dependent. - */ -static void elmc_do_reset586(int ioaddr, int ints) -{ - /* toggle the RST bit low then high */ - outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL); - DELAY_16(); /* > 500 ns */ - outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL); - - elmc_do_attn586(ioaddr, ints); -} - -/********************************************** - * close device - */ - -static int elmc_close(struct net_device *dev) -{ - netif_stop_queue(dev); - elmc_id_reset586(); /* the hard way to stop the receiver */ - free_irq(dev->irq, dev); - return 0; -} - -/********************************************** - * open device - */ - -static int elmc_open(struct net_device *dev) -{ - int ret; - - elmc_id_attn586(); /* disable interrupts */ - - ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED, - dev->name, dev); - if (ret) { - pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); - elmc_id_reset586(); - return ret; - } - alloc586(dev); - init586(dev); - startrecv586(dev); - netif_start_queue(dev); - return 0; /* most done by init */ -} - -/********************************************** - * Check to see if there's an 82586 out there. - */ - -static int __init check586(struct net_device *dev, unsigned long where, unsigned size) -{ - struct priv *p = netdev_priv(dev); - char *iscp_addrs[2]; - int i = 0; - - p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; - p->memtop = isa_bus_to_virt((unsigned long)where) + size; - p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); - memset((char *) p->scp, 0, sizeof(struct scp_struct)); - p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ - - iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); - iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct); - - for (i = 0; i < 2; i++) { - p->iscp = (struct iscp_struct *) iscp_addrs[i]; - memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); - - p->scp->iscp = make24(p->iscp); - p->iscp->busy = 1; - - elmc_id_reset586(); - - /* reset586 does an implicit CA */ - - /* apparently, you sometimes have to kick the 82586 twice... */ - elmc_id_attn586(); - DELAY(1); - - if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */ - return 0; - } - } - return 1; -} - -/****************************************************************** - * set iscp at the right place, called by elmc_probe and open586. - */ - -static void alloc586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - elmc_id_reset586(); - DELAY(2); - - p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); - p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); - p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct)); - - memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); - memset((char *) p->scp, 0, sizeof(struct scp_struct)); - - p->scp->iscp = make24(p->iscp); - p->scp->sysbus = SYSBUSVAL; - p->iscp->scb_offset = make16(p->scb); - - p->iscp->busy = 1; - elmc_id_reset586(); - elmc_id_attn586(); - - DELAY(2); - - if (p->iscp->busy) - pr_err("%s: Init-Problems (alloc).\n", dev->name); - - memset((char *) p->scb, 0, sizeof(struct scb_struct)); -} - -/*****************************************************************/ - -static int elmc_getinfo(char *buf, int slot, void *d) -{ - int len = 0; - struct net_device *dev = d; - - if (dev == NULL) - return len; - - len += sprintf(buf + len, "Revision: 0x%x\n", - inb(dev->base_addr + ELMC_REVISION) & 0xf); - len += sprintf(buf + len, "IRQ: %d\n", dev->irq); - len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr, - dev->base_addr + ELMC_IO_EXTENT); - len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, - dev->mem_end - 1); - len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? - "External" : "Internal"); - len += sprintf(buf + len, "Device: %s\n", dev->name); - len += sprintf(buf + len, "Hardware Address: %pM\n", - dev->dev_addr); - - return len; -} /* elmc_getinfo() */ - -static const struct net_device_ops netdev_ops = { - .ndo_open = elmc_open, - .ndo_stop = elmc_close, - .ndo_get_stats = elmc_get_stats, - .ndo_start_xmit = elmc_send_packet, - .ndo_tx_timeout = elmc_timeout, -#ifdef ELMC_MULTICAST - .ndo_set_rx_mode = set_multicast_list, -#endif - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/*****************************************************************/ - -static int __init do_elmc_probe(struct net_device *dev) -{ - static int slot; - int base_addr = dev->base_addr; - int irq = dev->irq; - u_char status = 0; - u_char revision = 0; - int i = 0; - unsigned int size = 0; - int retval; - struct priv *pr = netdev_priv(dev); - - if (MCA_bus == 0) { - return -ENODEV; - } - /* search through the slots for the 3c523. */ - slot = mca_find_adapter(ELMC_MCA_ID, 0); - while (slot != -1) { - status = mca_read_stored_pos(slot, 2); - - dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6]; - dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1]; - - /* - If we're trying to match a specified irq or IO address, - we'll reject a match unless it's what we're looking for. - Also reject it if the card is already in use. - */ - - if ((irq && irq != dev->irq) || - (base_addr && base_addr != dev->base_addr)) { - slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); - continue; - } - if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) { - slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); - continue; - } - - /* found what we're looking for... */ - break; - } - - /* we didn't find any 3c523 in the slots we checked for */ - if (slot == MCA_NOTFOUND) - return (base_addr || irq) ? -ENXIO : -ENODEV; - - mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC"); - mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev); - - /* if we get this far, adapter has been found - carry on */ - pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1); - - /* Now we extract configuration info from the card. - The 3c523 provides information in two of the POS registers, but - the second one is only needed if we want to tell the card what IRQ - to use. I suspect that whoever sets the thing up initially would - prefer we don't screw with those things. - - Note that we read the status info when we found the card... - - See 3c523.h for more details. - */ - - /* revision is stored in the first 4 bits of the revision register */ - revision = inb(dev->base_addr + ELMC_REVISION) & 0xf; - - /* according to docs, we read the interrupt and write it back to - the IRQ select register, since the POST might not configure the IRQ - properly. */ - switch (dev->irq) { - case 3: - mca_write_pos(slot, 3, 0x04); - break; - case 7: - mca_write_pos(slot, 3, 0x02); - break; - case 9: - mca_write_pos(slot, 3, 0x08); - break; - case 12: - mca_write_pos(slot, 3, 0x01); - break; - } - - pr->slot = slot; - - pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision, - dev->base_addr); - - /* Determine if we're using the on-board transceiver (i.e. coax) or - an external one. The information is pretty much useless, but I - guess it's worth brownie points. */ - dev->if_port = (status & ELMC_STATUS_DISABLE_THIN); - - /* The 3c523 has a 24K chunk of memory. The first 16K is the - shared memory, while the last 8K is for the EtherStart BIOS ROM. - Which we don't care much about here. We'll just tell Linux that - we're using 16K. MCA won't permit address space conflicts caused - by not mapping the other 8K. */ - dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3]; - - /* We're using MCA, so it's a given that the information about memory - size is correct. The Crynwr drivers do something like this. */ - - elmc_id_reset586(); /* seems like a good idea before checking it... */ - - size = 0x4000; /* check for 16K mem */ - if (!check586(dev, dev->mem_start, size)) { - pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name, - dev->mem_start); - retval = -ENODEV; - goto err_out; - } - dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ - - pr->memtop = isa_bus_to_virt(dev->mem_start) + size; - pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; - alloc586(dev); - - elmc_id_reset586(); /* make sure it doesn't generate spurious ints */ - - /* set number of receive-buffs according to memsize */ - pr->num_recv_buffs = NUM_RECV_BUFFS_16; - - /* dump all the assorted information */ - pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name, - dev->irq, dev->if_port ? "ex" : "in", - dev->mem_start, dev->mem_end - 1); - - /* The hardware address for the 3c523 is stored in the first six - bytes of the IO address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(dev->base_addr + i); - - pr_info("%s: hardware address %pM\n", - dev->name, dev->dev_addr); - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = HZ; - dev->ethtool_ops = &netdev_ethtool_ops; - - /* note that we haven't actually requested the IRQ from the kernel. - That gets done in elmc_open(). I'm not sure that's such a good idea, - but it works, so I'll go with it. */ - -#ifndef ELMC_MULTICAST - dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */ -#endif - - retval = register_netdev(dev); - if (retval) - goto err_out; - - return 0; -err_out: - mca_set_adapter_procfn(slot, NULL, NULL); - release_region(dev->base_addr, ELMC_IO_EXTENT); - return retval; -} - -#ifdef MODULE -static void cleanup_card(struct net_device *dev) -{ - mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot, - NULL, NULL); - release_region(dev->base_addr, ELMC_IO_EXTENT); -} -#else -struct net_device * __init elmc_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct priv)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_elmc_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -/********************************************** - * init the chip (elmc-interrupt should be disabled?!) - * needs a correct 'allocated' memory - */ - -static int init586(struct net_device *dev) -{ - void *ptr; - unsigned long s; - int i, result = 0; - struct priv *p = netdev_priv(dev); - volatile struct configure_cmd_struct *cfg_cmd; - volatile struct iasetup_cmd_struct *ias_cmd; - volatile struct tdr_cmd_struct *tdr_cmd; - volatile struct mcsetup_cmd_struct *mc_cmd; - struct netdev_hw_addr *ha; - int num_addrs = netdev_mc_count(dev); - - ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); - - cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */ - cfg_cmd->cmd_status = 0; - cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; - cfg_cmd->cmd_link = 0xffff; - - cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ - cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */ - cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ - cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ - cfg_cmd->priority = 0x00; - cfg_cmd->ifs = 0x60; - cfg_cmd->time_low = 0x00; - cfg_cmd->time_high = 0xf2; - cfg_cmd->promisc = 0; - if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) - cfg_cmd->promisc = 1; - cfg_cmd->carr_coll = 0x00; - - p->scb->cbl_offset = make16(cfg_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_id_attn586(); - - s = jiffies; /* warning: only active with interrupts on !! */ - while (!(cfg_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - - if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) { - pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status); - return 1; - } - /* - * individual address setup - */ - ias_cmd = (struct iasetup_cmd_struct *) ptr; - - ias_cmd->cmd_status = 0; - ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; - ias_cmd->cmd_link = 0xffff; - - memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN); - - p->scb->cbl_offset = make16(ias_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_id_attn586(); - - s = jiffies; - while (!(ias_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - - if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) { - pr_warning("%s (elmc): individual address setup command failed: %04x\n", - dev->name, ias_cmd->cmd_status); - return 1; - } - /* - * TDR, wire check .. e.g. no resistor e.t.c - */ - tdr_cmd = (struct tdr_cmd_struct *) ptr; - - tdr_cmd->cmd_status = 0; - tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; - tdr_cmd->cmd_link = 0xffff; - tdr_cmd->status = 0; - - p->scb->cbl_offset = make16(tdr_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_attn586(); - - s = jiffies; - while (!(tdr_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) { - pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__); - result = 1; - break; - } - } - - if (!result) { - DELAY(2); /* wait for result */ - result = tdr_cmd->status; - - p->scb->cmd = p->scb->status & STAT_MASK; - elmc_id_attn586(); /* ack the interrupts */ - - if (result & TDR_LNK_OK) { - /* empty */ - } else if (result & TDR_XCVR_PRB) { - pr_warning("%s: TDR: Transceiver problem!\n", dev->name); - } else if (result & TDR_ET_OPN) { - pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK); - } else if (result & TDR_ET_SRT) { - if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ - pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK); - } else { - pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result); - } - } - /* - * ack interrupts - */ - p->scb->cmd = p->scb->status & STAT_MASK; - elmc_id_attn586(); - - /* - * alloc nop/xmit-cmds - */ -#if (NUM_XMIT_BUFFS == 1) - for (i = 0; i < 2; i++) { - p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; - p->nop_cmds[i]->cmd_cmd = CMD_NOP; - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - } - p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */ - ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); -#else - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; - p->nop_cmds[i]->cmd_cmd = CMD_NOP; - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */ - ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); - } -#endif - - ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */ - - /* - * Multicast setup - */ - - if (num_addrs) { - /* I don't understand this: do we really need memory after the init? */ - int len = ((char *) p->iscp - (char *) ptr - 8) / 6; - if (len <= 0) { - pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name); - } else { - if (len < num_addrs) { - num_addrs = len; - pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n", - dev->name, num_addrs); - } - mc_cmd = (struct mcsetup_cmd_struct *) ptr; - mc_cmd->cmd_status = 0; - mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; - mc_cmd->cmd_link = 0xffff; - mc_cmd->mc_cnt = num_addrs * 6; - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy((char *) mc_cmd->mc_list[i++], - ha->addr, 6); - p->scb->cbl_offset = make16(mc_cmd); - p->scb->cmd = CUC_START; - elmc_id_attn586(); - s = jiffies; - while (!(mc_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - if (!(mc_cmd->cmd_status & STAT_COMPL)) { - pr_warning("%s: Can't apply multicast-address-list.\n", dev->name); - } - } - } - /* - * alloc xmit-buffs / init xmit_cmds - */ - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */ - ptr = (char *) ptr + XMIT_BUFF_SIZE; - p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */ - ptr = (char *) ptr + sizeof(struct tbd_struct); - if ((void *) ptr > (void *) p->iscp) { - pr_err("%s: not enough shared-mem for your configuration!\n", dev->name); - return 1; - } - memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct)); - memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct)); - p->xmit_cmds[i]->cmd_status = STAT_COMPL; - p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; - p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); - p->xmit_buffs[i]->next = 0xffff; - p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); - } - - p->xmit_count = 0; - p->xmit_last = 0; -#ifndef NO_NOPCOMMANDS - p->nop_point = 0; -#endif - - /* - * 'start transmitter' (nop-loop) - */ -#ifndef NO_NOPCOMMANDS - p->scb->cbl_offset = make16(p->nop_cmds[0]); - p->scb->cmd = CUC_START; - elmc_id_attn586(); - WAIT_4_SCB_CMD(); -#else - p->xmit_cmds[0]->cmd_link = 0xffff; - p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT; -#endif - - return 0; -} - -/****************************************************** - * This is a helper routine for elmc_rnr_int() and init586(). - * It sets up the Receive Frame Area (RFA). - */ - -static void *alloc_rfa(struct net_device *dev, void *ptr) -{ - volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr; - volatile struct rbd_struct *rbd; - int i; - struct priv *p = netdev_priv(dev); - - memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs); - p->rfd_first = rfd; - - for (i = 0; i < p->num_recv_buffs; i++) { - rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs); - } - rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */ - - ptr = (void *) (rfd + p->num_recv_buffs); - - rbd = (struct rbd_struct *) ptr; - ptr = (void *) (rbd + p->num_recv_buffs); - - /* clr descriptors */ - memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs); - - for (i = 0; i < p->num_recv_buffs; i++) { - rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs)); - rbd[i].size = RECV_BUFF_SIZE; - rbd[i].buffer = make24(ptr); - ptr = (char *) ptr + RECV_BUFF_SIZE; - } - - p->rfd_top = p->rfd_first; - p->rfd_last = p->rfd_first + p->num_recv_buffs - 1; - - p->scb->rfa_offset = make16(p->rfd_first); - p->rfd_first->rbd_offset = make16(rbd); - - return ptr; -} - - -/************************************************** - * Interrupt Handler ... - */ - -static irqreturn_t -elmc_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned short stat; - struct priv *p; - - if (!netif_running(dev)) { - /* The 3c523 has this habit of generating interrupts during the - reset. I'm not sure if the ni52 has this same problem, but it's - really annoying if we haven't finished initializing it. I was - hoping all the elmc_id_* commands would disable this, but I - might have missed a few. */ - - elmc_id_attn586(); /* ack inter. and disable any more */ - return IRQ_HANDLED; - } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) { - /* wasn't this device */ - return IRQ_NONE; - } - /* reading ELMC_CTRL also clears the INT bit. */ - - p = netdev_priv(dev); - - while ((stat = p->scb->status & STAT_MASK)) - { - p->scb->cmd = stat; - elmc_attn586(); /* ack inter. */ - - if (stat & STAT_CX) { - /* command with I-bit set complete */ - elmc_xmt_int(dev); - } - if (stat & STAT_FR) { - /* received a frame */ - elmc_rcv_int(dev); - } -#ifndef NO_NOPCOMMANDS - if (stat & STAT_CNA) { - /* CU went 'not ready' */ - if (netif_running(dev)) { - pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n", - dev->name, (int) stat, (int) p->scb->status); - } - } -#endif - - if (stat & STAT_RNR) { - /* RU went 'not ready' */ - - if (p->scb->status & RU_SUSPEND) { - /* special case: RU_SUSPEND */ - - WAIT_4_SCB_CMD(); - p->scb->cmd = RUC_RESUME; - elmc_attn586(); - } else { - pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", - dev->name, (int) stat, (int) p->scb->status); - elmc_rnr_int(dev); - } - } - WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */ - if (p->scb->cmd) { /* timed out? */ - break; - } - } - return IRQ_HANDLED; -} - -/******************************************************* - * receive-interrupt - */ - -static void elmc_rcv_int(struct net_device *dev) -{ - int status; - unsigned short totlen; - struct sk_buff *skb; - struct rbd_struct *rbd; - struct priv *p = netdev_priv(dev); - - for (; (status = p->rfd_top->status) & STAT_COMPL;) { - rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); - - if (status & STAT_OK) { /* frame received without error? */ - if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */ - totlen &= RBD_MASK; /* length of this frame */ - rbd->status = 0; - skb = netdev_alloc_skb(dev, totlen + 2); - if (skb != NULL) { - skb_reserve(skb, 2); /* 16 byte alignment */ - skb_put(skb,totlen); - skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += totlen; - } else { - dev->stats.rx_dropped++; - } - } else { - pr_warning("%s: received oversized frame.\n", dev->name); - dev->stats.rx_dropped++; - } - } else { /* frame !(ok), only with 'save-bad-frames' */ - pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status); - dev->stats.rx_errors++; - } - p->rfd_top->status = 0; - p->rfd_top->last = RFD_SUSP; - p->rfd_last->last = 0; /* delete RU_SUSP */ - p->rfd_last = p->rfd_top; - p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ - } -} - -/********************************************************** - * handle 'Receiver went not ready'. - */ - -static void elmc_rnr_int(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - dev->stats.rx_errors++; - - WAIT_4_SCB_CMD(); /* wait for the last cmd */ - p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ - elmc_attn586(); - WAIT_4_SCB_CMD(); /* wait for accept cmd. */ - - alloc_rfa(dev, (char *) p->rfd_first); - startrecv586(dev); /* restart RU */ - - pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status); - -} - -/********************************************************** - * handle xmit - interrupt - */ - -static void elmc_xmt_int(struct net_device *dev) -{ - int status; - struct priv *p = netdev_priv(dev); - - status = p->xmit_cmds[p->xmit_last]->cmd_status; - if (!(status & STAT_COMPL)) { - pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); - } - if (status & STAT_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += (status & TCMD_MAXCOLLMASK); - } else { - dev->stats.tx_errors++; - if (status & TCMD_LATECOLL) { - pr_warning("%s: late collision detected.\n", dev->name); - dev->stats.collisions++; - } else if (status & TCMD_NOCARRIER) { - dev->stats.tx_carrier_errors++; - pr_warning("%s: no carrier detected.\n", dev->name); - } else if (status & TCMD_LOSTCTS) { - pr_warning("%s: loss of CTS detected.\n", dev->name); - } else if (status & TCMD_UNDERRUN) { - dev->stats.tx_fifo_errors++; - pr_warning("%s: DMA underrun detected.\n", dev->name); - } else if (status & TCMD_MAXCOLL) { - pr_warning("%s: Max. collisions exceeded.\n", dev->name); - dev->stats.collisions += 16; - } - } - -#if (NUM_XMIT_BUFFS != 1) - if ((++p->xmit_last) == NUM_XMIT_BUFFS) { - p->xmit_last = 0; - } -#endif - - netif_wake_queue(dev); -} - -/*********************************************************** - * (re)start the receiver - */ - -static void startrecv586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - p->scb->rfa_offset = make16(p->rfd_first); - p->scb->cmd = RUC_START; - elmc_attn586(); /* start cmd. */ - WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */ -} - -/****************************************************** - * timeout - */ - -static void elmc_timeout(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - /* COMMAND-UNIT active? */ - if (p->scb->status & CU_ACTIVE) { - pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name); - pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name, - (int)p->xmit_cmds[0]->cmd_status, - (int)p->nop_cmds[0]->cmd_status, - (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point); - p->scb->cmd = CUC_ABORT; - elmc_attn586(); - WAIT_4_SCB_CMD(); - p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); - p->scb->cmd = CUC_START; - elmc_attn586(); - WAIT_4_SCB_CMD(); - netif_wake_queue(dev); - } else { - pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n", - dev->name, p->scb->status); - pr_debug("%s: command-stats: %04x %04x\n", dev->name, - p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status); - elmc_close(dev); - elmc_open(dev); - } -} - -/****************************************************** - * send frame - */ - -static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int len; - int i; -#ifndef NO_NOPCOMMANDS - int next_nop; -#endif - struct priv *p = netdev_priv(dev); - - netif_stop_queue(dev); - - len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; - - if (len != skb->len) - memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); - skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); - -#if (NUM_XMIT_BUFFS == 1) -#ifdef NO_NOPCOMMANDS - p->xmit_buffs[0]->size = TBD_LAST | len; - for (i = 0; i < 16; i++) { - p->scb->cbl_offset = make16(p->xmit_cmds[0]); - p->scb->cmd = CUC_START; - p->xmit_cmds[0]->cmd_status = 0; - elmc_attn586(); - if (!i) { - dev_kfree_skb(skb); - } - WAIT_4_SCB_CMD(); - if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */ - break; - } - if (p->xmit_cmds[0]->cmd_status) { - break; - } - if (i == 15) { - pr_warning("%s: Can't start transmit-command.\n", dev->name); - } - } -#else - next_nop = (p->nop_point + 1) & 0x1; - p->xmit_buffs[0]->size = TBD_LAST | len; - - p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link - = make16((p->nop_cmds[next_nop])); - p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; - - p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); - p->nop_point = next_nop; - dev_kfree_skb(skb); -#endif -#else - p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; - if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) { - next_nop = 0; - } - p->xmit_cmds[p->xmit_count]->cmd_status = 0; - p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link - = make16((p->nop_cmds[next_nop])); - p->nop_cmds[next_nop]->cmd_status = 0; - p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); - p->xmit_count = next_nop; - if (p->xmit_count != p->xmit_last) - netif_wake_queue(dev); - dev_kfree_skb(skb); -#endif - return NETDEV_TX_OK; -} - -/******************************************* - * Someone wanna have the statistics - */ - -static struct net_device_stats *elmc_get_stats(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - unsigned short crc, aln, rsc, ovrn; - - crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ - p->scb->crc_errs -= crc; - aln = p->scb->aln_errs; - p->scb->aln_errs -= aln; - rsc = p->scb->rsc_errs; - p->scb->rsc_errs -= rsc; - ovrn = p->scb->ovrn_errs; - p->scb->ovrn_errs -= ovrn; - - dev->stats.rx_crc_errors += crc; - dev->stats.rx_fifo_errors += ovrn; - dev->stats.rx_frame_errors += aln; - dev->stats.rx_dropped += rsc; - - return &dev->stats; -} - -/******************************************************** - * Set MC list .. - */ - -#ifdef ELMC_MULTICAST -static void set_multicast_list(struct net_device *dev) -{ - if (!dev->start) { - /* without a running interface, promiscuous doesn't work */ - return; - } - dev->start = 0; - alloc586(dev); - init586(dev); - startrecv586(dev); - dev->start = 1; -} -#endif - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -#ifdef MODULE - -/* Increase if needed ;) */ -#define MAX_3C523_CARDS 4 - -static struct net_device *dev_elmc[MAX_3C523_CARDS]; -static int irq[MAX_3C523_CARDS]; -static int io[MAX_3C523_CARDS]; -module_param_array(irq, int, NULL, 0); -module_param_array(io, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - int this_dev,found = 0; - - /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */ - for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) { - struct net_device *dev = alloc_etherdev(sizeof(struct priv)); - if (!dev) - break; - dev->irq=irq[this_dev]; - dev->base_addr=io[this_dev]; - if (do_elmc_probe(dev) == 0) { - dev_elmc[this_dev] = dev; - found++; - continue; - } - free_netdev(dev); - if (io[this_dev]==0) - break; - pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]); - } - - if(found==0) { - if (io[0]==0) - pr_notice("3c523.c: No 3c523 cards found\n"); - return -ENXIO; - } else return 0; -} - -void __exit cleanup_module(void) -{ - int this_dev; - for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) { - struct net_device *dev = dev_elmc[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} - -#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/3c523.h b/drivers/net/ethernet/i825xx/3c523.h deleted file mode 100644 index 6956441687b..00000000000 --- a/drivers/net/ethernet/i825xx/3c523.h +++ /dev/null @@ -1,355 +0,0 @@ -#ifndef _3c523_INCLUDE_ -#define _3c523_INCLUDE_ -/* - This is basically a hacked version of ni52.h, for the 3c523 - Etherlink/MC. -*/ - -/* - * Intel i82586 Ethernet definitions - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * Copyright 1995 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca) - * - * See 3c523.c for details. - * - * $Header: /home/chrisb/linux-1.2.13-3c523/drivers/net/RCS/3c523.h,v 1.6 1996/01/20 05:09:00 chrisb Exp chrisb $ - */ - -/* - * where to find the System Configuration Pointer (SCP) - */ -#define SCP_DEFAULT_ADDRESS 0xfffff4 - - -/* - * System Configuration Pointer Struct - */ - -struct scp_struct -{ - unsigned short zero_dum0; /* has to be zero */ - unsigned char sysbus; /* 0=16Bit,1=8Bit */ - unsigned char zero_dum1; /* has to be zero for 586 */ - unsigned short zero_dum2; - unsigned short zero_dum3; - char *iscp; /* pointer to the iscp-block */ -}; - - -/* - * Intermediate System Configuration Pointer (ISCP) - */ -struct iscp_struct -{ - unsigned char busy; /* 586 clears after successful init */ - unsigned char zero_dummy; /* hast to be zero */ - unsigned short scb_offset; /* pointeroffset to the scb_base */ - char *scb_base; /* base-address of all 16-bit offsets */ -}; - -/* - * System Control Block (SCB) - */ -struct scb_struct -{ - unsigned short status; /* status word */ - unsigned short cmd; /* command word */ - unsigned short cbl_offset; /* pointeroffset, command block list */ - unsigned short rfa_offset; /* pointeroffset, receive frame area */ - unsigned short crc_errs; /* CRC-Error counter */ - unsigned short aln_errs; /* alignmenterror counter */ - unsigned short rsc_errs; /* Resourceerror counter */ - unsigned short ovrn_errs; /* OVerrunerror counter */ -}; - -/* - * possible command values for the command word - */ -#define RUC_MASK 0x0070 /* mask for RU commands */ -#define RUC_NOP 0x0000 /* NOP-command */ -#define RUC_START 0x0010 /* start RU */ -#define RUC_RESUME 0x0020 /* resume RU after suspend */ -#define RUC_SUSPEND 0x0030 /* suspend RU */ -#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ - -#define CUC_MASK 0x0700 /* mask for CU command */ -#define CUC_NOP 0x0000 /* NOP-command */ -#define CUC_START 0x0100 /* start execution of 1. cmd on the CBL */ -#define CUC_RESUME 0x0200 /* resume after suspend */ -#define CUC_SUSPEND 0x0300 /* Suspend CU */ -#define CUC_ABORT 0x0400 /* abort command operation immediately */ - -#define ACK_MASK 0xf000 /* mask for ACK command */ -#define ACK_CX 0x8000 /* acknowledges STAT_CX */ -#define ACK_FR 0x4000 /* ack. STAT_FR */ -#define ACK_CNA 0x2000 /* ack. STAT_CNA */ -#define ACK_RNR 0x1000 /* ack. STAT_RNR */ - -/* - * possible status values for the status word - */ -#define STAT_MASK 0xf000 /* mask for cause of interrupt */ -#define STAT_CX 0x8000 /* CU finished cmd with its I bit set */ -#define STAT_FR 0x4000 /* RU finished receiving a frame */ -#define STAT_CNA 0x2000 /* CU left active state */ -#define STAT_RNR 0x1000 /* RU left ready state */ - -#define CU_STATUS 0x700 /* CU status, 0=idle */ -#define CU_SUSPEND 0x100 /* CU is suspended */ -#define CU_ACTIVE 0x200 /* CU is active */ - -#define RU_STATUS 0x70 /* RU status, 0=idle */ -#define RU_SUSPEND 0x10 /* RU suspended */ -#define RU_NOSPACE 0x20 /* RU no resources */ -#define RU_READY 0x40 /* RU is ready */ - -/* - * Receive Frame Descriptor (RFD) - */ -struct rfd_struct -{ - unsigned short status; /* status word */ - unsigned short last; /* Bit15,Last Frame on List / Bit14,suspend */ - unsigned short next; /* linkoffset to next RFD */ - unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ - unsigned char dest[6]; /* ethernet-address, destination */ - unsigned char source[6]; /* ethernet-address, source */ - unsigned short length; /* 802.3 frame-length */ - unsigned short zero_dummy; /* dummy */ -}; - -#define RFD_LAST 0x8000 /* last: last rfd in the list */ -#define RFD_SUSP 0x4000 /* last: suspend RU after */ -#define RFD_ERRMASK 0x0fe1 /* status: errormask */ -#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA */ -#define RFD_RNR 0x0200 /* status: receiver out of resources */ - -/* - * Receive Buffer Descriptor (RBD) - */ -struct rbd_struct -{ - unsigned short status; /* status word,number of used bytes in buff */ - unsigned short next; /* pointeroffset to next RBD */ - char *buffer; /* receive buffer address pointer */ - unsigned short size; /* size of this buffer */ - unsigned short zero_dummy; /* dummy */ -}; - -#define RBD_LAST 0x8000 /* last buffer */ -#define RBD_USED 0x4000 /* this buffer has data */ -#define RBD_MASK 0x3fff /* size-mask for length */ - -/* - * Statusvalues for Commands/RFD - */ -#define STAT_COMPL 0x8000 /* status: frame/command is complete */ -#define STAT_BUSY 0x4000 /* status: frame/command is busy */ -#define STAT_OK 0x2000 /* status: frame/command is ok */ - -/* - * Action-Commands - */ -#define CMD_NOP 0x0000 /* NOP */ -#define CMD_IASETUP 0x0001 /* initial address setup command */ -#define CMD_CONFIGURE 0x0002 /* configure command */ -#define CMD_MCSETUP 0x0003 /* MC setup command */ -#define CMD_XMIT 0x0004 /* transmit command */ -#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ -#define CMD_DUMP 0x0006 /* dump command */ -#define CMD_DIAGNOSE 0x0007 /* diagnose command */ - -/* - * Action command bits - */ -#define CMD_LAST 0x8000 /* indicates last command in the CBL */ -#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ -#define CMD_INT 0x2000 /* generate interrupt after execution */ - -/* - * NOP - command - */ -struct nop_cmd_struct -{ - unsigned short cmd_status; /* status of this command */ - unsigned short cmd_cmd; /* the command itself (+bits) */ - unsigned short cmd_link; /* offsetpointer to next command */ -}; - -/* - * IA Setup command - */ -struct iasetup_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned char iaddr[6]; -}; - -/* - * Configure command - */ -struct configure_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned char byte_cnt; /* size of the config-cmd */ - unsigned char fifo; /* fifo/recv monitor */ - unsigned char sav_bf; /* save bad frames (bit7=1)*/ - unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ - unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ - unsigned char ifs; /* inter frame spacing */ - unsigned char time_low; /* slot time low */ - unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ - unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ - unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ - unsigned char fram_len; /* minimal frame len */ - unsigned char dummy; /* dummy */ -}; - -/* - * Multicast Setup command - */ -struct mcsetup_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short mc_cnt; /* number of bytes in the MC-List */ - unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ -}; - -/* - * transmit command - */ -struct transmit_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short tbd_offset; /* pointeroffset to TBD */ - unsigned char dest[6]; /* destination address of the frame */ - unsigned short length; /* user defined: 802.3 length / Ether type */ -}; - -#define TCMD_ERRMASK 0x0fa0 -#define TCMD_MAXCOLLMASK 0x000f -#define TCMD_MAXCOLL 0x0020 -#define TCMD_HEARTBEAT 0x0040 -#define TCMD_DEFERRED 0x0080 -#define TCMD_UNDERRUN 0x0100 -#define TCMD_LOSTCTS 0x0200 -#define TCMD_NOCARRIER 0x0400 -#define TCMD_LATECOLL 0x0800 - -struct tdr_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short status; -}; - -#define TDR_LNK_OK 0x8000 /* No link problem identified */ -#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ -#define TDR_ET_OPN 0x2000 /* open, no correct termination */ -#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ -#define TDR_TIMEMASK 0x07ff /* mask for the time field */ - -/* - * Transmit Buffer Descriptor (TBD) - */ -struct tbd_struct -{ - unsigned short size; /* size + EOF-Flag(15) */ - unsigned short next; /* pointeroffset to next TBD */ - char *buffer; /* pointer to buffer */ -}; - -#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ - -/*************************************************************************/ -/* -Verbatim from the Crynwyr stuff: - - The 3c523 responds with adapter code 0x6042 at slot -registers xxx0 and xxx1. The setup register is at xxx2 and -contains the following bits: - -0: card enable -2,1: csr address select - 00 = 0300 - 01 = 1300 - 10 = 2300 - 11 = 3300 -4,3: shared memory address select - 00 = 0c0000 - 01 = 0c8000 - 10 = 0d0000 - 11 = 0d8000 -5: set to disable on-board thinnet -7,6: (read-only) shows selected irq - 00 = 12 - 01 = 7 - 10 = 3 - 11 = 9 - -The interrupt-select register is at xxx3 and uses one bit per irq. - -0: int 12 -1: int 7 -2: int 3 -3: int 9 - - Again, the documentation stresses that the setup register -should never be written. The interrupt-select register may be -written with the value corresponding to bits 7.6 in -the setup register to insure corret setup. -*/ - -/* Offsets from the base I/O address. */ -#define ELMC_SA 0 /* first 6 bytes are IEEE network address */ -#define ELMC_CTRL 6 /* control & status register */ -#define ELMC_REVISION 7 /* revision register, first 4 bits only */ -#define ELMC_IO_EXTENT 8 - -/* these are the bit selects for the port register 2 */ -#define ELMC_STATUS_ENABLED 0x01 -#define ELMC_STATUS_CSR_SELECT 0x06 -#define ELMC_STATUS_MEMORY_SELECT 0x18 -#define ELMC_STATUS_DISABLE_THIN 0x20 -#define ELMC_STATUS_IRQ_SELECT 0xc0 - -/* this is the card id used in the detection code. You might recognize -it from @6042.adf */ -#define ELMC_MCA_ID 0x6042 - -/* - The following define the bits for the control & status register - - The bank select registers can be used if more than 16K of memory is - on the card. For some stupid reason, bank 3 is the one for the - bottom 16K, and the card defaults to bank 0. So we have to set the - bank to 3 before the card will even think of operating. To get bank - 3, set BS0 and BS1 to high (of course...) -*/ -#define ELMC_CTRL_BS0 0x01 /* RW bank select */ -#define ELMC_CTRL_BS1 0x02 /* RW bank select */ -#define ELMC_CTRL_INTE 0x04 /* RW interrupt enable, assert high */ -#define ELMC_CTRL_INT 0x08 /* R interrupt active, assert high */ -/*#define ELMC_CTRL_* 0x10*/ /* reserved */ -#define ELMC_CTRL_LBK 0x20 /* RW loopback enable, assert high */ -#define ELMC_CTRL_CA 0x40 /* RW channel attention, assert high */ -#define ELMC_CTRL_RST 0x80 /* RW 82586 reset, assert low */ - -/* some handy compound bits */ - -/* normal operation should have bank 3 and RST high, ints enabled */ -#define ELMC_NORMAL (ELMC_CTRL_INTE|ELMC_CTRL_RST|0x3) - -#endif /* _3c523_INCLUDE_ */ diff --git a/drivers/net/ethernet/i825xx/3c527.c b/drivers/net/ethernet/i825xx/3c527.c deleted file mode 100644 index 278e791afe0..00000000000 --- a/drivers/net/ethernet/i825xx/3c527.c +++ /dev/null @@ -1,1660 +0,0 @@ -/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6. - * - * (c) Copyright 1998 Red Hat Software Inc - * Written by Alan Cox. - * Further debugging by Carl Drougge. - * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br> - * Heavily modified by Richard Procter <rnp@paradise.net.nz> - * - * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c - * (for the MCA stuff) written by Wim Dumon. - * - * Thanks to 3Com for making this possible by providing me with the - * documentation. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#define DRV_NAME "3c527" -#define DRV_VERSION "0.7-SMP" -#define DRV_RELDATE "2003/09/21" - -static const char *version = -DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n"; - -/** - * DOC: Traps for the unwary - * - * The diagram (Figure 1-1) and the POS summary disagree with the - * "Interrupt Level" section in the manual. - * - * The manual contradicts itself when describing the minimum number - * buffers in the 'configure lists' command. - * My card accepts a buffer config of 4/4. - * - * Setting the SAV BP bit does not save bad packets, but - * only enables RX on-card stats collection. - * - * The documentation in places seems to miss things. In actual fact - * I've always eventually found everything is documented, it just - * requires careful study. - * - * DOC: Theory Of Operation - * - * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large - * amount of on board intelligence that housekeeps a somewhat dumber - * Intel NIC. For performance we want to keep the transmit queue deep - * as the card can transmit packets while fetching others from main - * memory by bus master DMA. Transmission and reception are driven by - * circular buffer queues. - * - * The mailboxes can be used for controlling how the card traverses - * its buffer rings, but are used only for initial setup in this - * implementation. The exec mailbox allows a variety of commands to - * be executed. Each command must complete before the next is - * executed. Primarily we use the exec mailbox for controlling the - * multicast lists. We have to do a certain amount of interesting - * hoop jumping as the multicast list changes can occur in interrupt - * state when the card has an exec command pending. We defer such - * events until the command completion interrupt. - * - * A copy break scheme (taken from 3c59x.c) is employed whereby - * received frames exceeding a configurable length are passed - * directly to the higher networking layers without incuring a copy, - * in what amounts to a time/space trade-off. - * - * The card also keeps a large amount of statistical information - * on-board. In a perfect world, these could be used safely at no - * cost. However, lacking information to the contrary, processing - * them without races would involve so much extra complexity as to - * make it unworthwhile to do so. In the end, a hybrid SW/HW - * implementation was made necessary --- see mc32_update_stats(). - * - * DOC: Notes - * - * It should be possible to use two or more cards, but at this stage - * only by loading two copies of the same module. - * - * The on-board 82586 NIC has trouble receiving multiple - * back-to-back frames and so is likely to drop packets from fast - * senders. -**/ - -#include <linux/module.h> - -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if_ether.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/mca-legacy.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/wait.h> -#include <linux/ethtool.h> -#include <linux/completion.h> -#include <linux/bitops.h> -#include <linux/semaphore.h> - -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/dma.h> - -#include "3c527.h" - -MODULE_LICENSE("GPL"); - -/* - * The name of the card. Is used for messages and in the requests for - * io regions, irqs and dma channels - */ -static const char* cardname = DRV_NAME; - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 2 -#endif - -static unsigned int mc32_debug = NET_DEBUG; - -/* The number of low I/O ports used by the ethercard. */ -#define MC32_IO_EXTENT 8 - -/* As implemented, values must be a power-of-2 -- 4/8/16/32 */ -#define TX_RING_LEN 32 /* Typically the card supports 37 */ -#define RX_RING_LEN 8 /* " " " */ - -/* Copy break point, see above for details. - * Setting to > 1512 effectively disables this feature. */ -#define RX_COPYBREAK 200 /* Value from 3c59x.c */ - -/* Issue the 82586 workaround command - this is for "busy lans", but - * basically means for all lans now days - has a performance (latency) - * cost, but best set. */ -static const int WORKAROUND_82586=1; - -/* Pointers to buffers and their on-card records */ -struct mc32_ring_desc -{ - volatile struct skb_header *p; - struct sk_buff *skb; -}; - -/* Information that needs to be kept for each board. */ -struct mc32_local -{ - int slot; - - u32 base; - volatile struct mc32_mailbox *rx_box; - volatile struct mc32_mailbox *tx_box; - volatile struct mc32_mailbox *exec_box; - volatile struct mc32_stats *stats; /* Start of on-card statistics */ - u16 tx_chain; /* Transmit list start offset */ - u16 rx_chain; /* Receive list start offset */ - u16 tx_len; /* Transmit list count */ - u16 rx_len; /* Receive list count */ - - u16 xceiver_desired_state; /* HALTED or RUNNING */ - u16 cmd_nonblocking; /* Thread is uninterested in command result */ - u16 mc_reload_wait; /* A multicast load request is pending */ - u32 mc_list_valid; /* True when the mclist is set */ - - struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */ - struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */ - - atomic_t tx_count; /* buffers left */ - atomic_t tx_ring_head; /* index to tx en-queue end */ - u16 tx_ring_tail; /* index to tx de-queue end */ - - u16 rx_ring_tail; /* index to rx de-queue end */ - - struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ - struct completion execution_cmd; /* Card has completed an execute command */ - struct completion xceiver_cmd; /* Card has completed a tx or rx command */ -}; - -/* The station (ethernet) address prefix, used for a sanity check. */ -#define SA_ADDR0 0x02 -#define SA_ADDR1 0x60 -#define SA_ADDR2 0xAC - -struct mca_adapters_t { - unsigned int id; - char *name; -}; - -static const struct mca_adapters_t mc32_adapters[] = { - { 0x0041, "3COM EtherLink MC/32" }, - { 0x8EF5, "IBM High Performance Lan Adapter" }, - { 0x0000, NULL } -}; - - -/* Macros for ring index manipulations */ -static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); }; -static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); }; - -static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); }; - - -/* Index to functions, as function prototypes. */ -static int mc32_probe1(struct net_device *dev, int ioaddr); -static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len); -static int mc32_open(struct net_device *dev); -static void mc32_timeout(struct net_device *dev); -static netdev_tx_t mc32_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t mc32_interrupt(int irq, void *dev_id); -static int mc32_close(struct net_device *dev); -static struct net_device_stats *mc32_get_stats(struct net_device *dev); -static void mc32_set_multicast_list(struct net_device *dev); -static void mc32_reset_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - -static void cleanup_card(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - unsigned slot = lp->slot; - mca_mark_as_unused(slot); - mca_set_adapter_name(slot, NULL); - free_irq(dev->irq, dev); - release_region(dev->base_addr, MC32_IO_EXTENT); -} - -/** - * mc32_probe - Search for supported boards - * @unit: interface number to use - * - * Because MCA bus is a real bus and we can scan for cards we could do a - * single scan for all boards here. Right now we use the passed in device - * structure and scan for only one board. This needs fixing for modules - * in particular. - */ - -struct net_device *__init mc32_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local)); - static int current_mca_slot = -1; - int i; - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - - /* Do not check any supplied i/o locations. - POS registers usually don't fail :) */ - - /* MCA cards have POS registers. - Autodetecting MCA cards is extremely simple. - Just search for the card. */ - - for(i = 0; (mc32_adapters[i].name != NULL); i++) { - current_mca_slot = - mca_find_unused_adapter(mc32_adapters[i].id, 0); - - if(current_mca_slot != MCA_NOTFOUND) { - if(!mc32_probe1(dev, current_mca_slot)) - { - mca_set_adapter_name(current_mca_slot, - mc32_adapters[i].name); - mca_mark_as_used(current_mca_slot); - err = register_netdev(dev); - if (err) { - cleanup_card(dev); - free_netdev(dev); - dev = ERR_PTR(err); - } - return dev; - } - - } - } - free_netdev(dev); - return ERR_PTR(-ENODEV); -} - -static const struct net_device_ops netdev_ops = { - .ndo_open = mc32_open, - .ndo_stop = mc32_close, - .ndo_start_xmit = mc32_send_packet, - .ndo_get_stats = mc32_get_stats, - .ndo_set_rx_mode = mc32_set_multicast_list, - .ndo_tx_timeout = mc32_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/** - * mc32_probe1 - Check a given slot for a board and test the card - * @dev: Device structure to fill in - * @slot: The MCA bus slot being used by this card - * - * Decode the slot data and configure the card structures. Having done this we - * can reset the card and configure it. The card does a full self test cycle - * in firmware so we have to wait for it to return and post us either a - * failure case or some addresses we use to find the board internals. - */ - -static int __init mc32_probe1(struct net_device *dev, int slot) -{ - static unsigned version_printed; - int i, err; - u8 POS; - u32 base; - struct mc32_local *lp = netdev_priv(dev); - static const u16 mca_io_bases[] = { - 0x7280,0x7290, - 0x7680,0x7690, - 0x7A80,0x7A90, - 0x7E80,0x7E90 - }; - static const u32 mca_mem_bases[] = { - 0x00C0000, - 0x00C4000, - 0x00C8000, - 0x00CC000, - 0x00D0000, - 0x00D4000, - 0x00D8000, - 0x00DC000 - }; - static const char * const failures[] = { - "Processor instruction", - "Processor data bus", - "Processor data bus", - "Processor data bus", - "Adapter bus", - "ROM checksum", - "Base RAM", - "Extended RAM", - "82586 internal loopback", - "82586 initialisation failure", - "Adapter list configuration error" - }; - - /* Time to play MCA games */ - - if (mc32_debug && version_printed++ == 0) - pr_debug("%s", version); - - pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot); - - POS = mca_read_stored_pos(slot, 2); - - if(!(POS&1)) - { - pr_cont("disabled.\n"); - return -ENODEV; - } - - /* Fill in the 'dev' fields. */ - dev->base_addr = mca_io_bases[(POS>>1)&7]; - dev->mem_start = mca_mem_bases[(POS>>4)&7]; - - POS = mca_read_stored_pos(slot, 4); - if(!(POS&1)) - { - pr_cont("memory window disabled.\n"); - return -ENODEV; - } - - POS = mca_read_stored_pos(slot, 5); - - i=(POS>>4)&3; - if(i==3) - { - pr_cont("invalid memory window.\n"); - return -ENODEV; - } - - i*=16384; - i+=16384; - - dev->mem_end=dev->mem_start + i; - - dev->irq = ((POS>>2)&3)+9; - - if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname)) - { - pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr); - return -EBUSY; - } - - pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n", - dev->base_addr, dev->irq, dev->mem_start, i/1024); - - - /* We ought to set the cache line size here.. */ - - - /* - * Go PROM browsing - */ - - /* Retrieve and print the ethernet address. */ - for (i = 0; i < 6; i++) - { - mca_write_pos(slot, 6, i+12); - mca_write_pos(slot, 7, 0); - - dev->dev_addr[i] = mca_read_pos(slot,3); - } - - pr_info("%s: Address %pM ", dev->name, dev->dev_addr); - - mca_write_pos(slot, 6, 0); - mca_write_pos(slot, 7, 0); - - POS = mca_read_stored_pos(slot, 4); - - if(POS&2) - pr_cont(": BNC port selected.\n"); - else - pr_cont(": AUI port selected.\n"); - - POS=inb(dev->base_addr+HOST_CTRL); - POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET; - POS&=~HOST_CTRL_INTE; - outb(POS, dev->base_addr+HOST_CTRL); - /* Reset adapter */ - udelay(100); - /* Reset off */ - POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET); - outb(POS, dev->base_addr+HOST_CTRL); - - udelay(300); - - /* - * Grab the IRQ - */ - - err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev); - if (err) { - release_region(dev->base_addr, MC32_IO_EXTENT); - pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); - goto err_exit_ports; - } - - memset(lp, 0, sizeof(struct mc32_local)); - lp->slot = slot; - - i=0; - - base = inb(dev->base_addr); - - while(base == 0xFF) - { - i++; - if(i == 1000) - { - pr_err("%s: failed to boot adapter.\n", dev->name); - err = -ENODEV; - goto err_exit_irq; - } - udelay(1000); - if(inb(dev->base_addr+2)&(1<<5)) - base = inb(dev->base_addr); - } - - if(base>0) - { - if(base < 0x0C) - pr_err("%s: %s%s.\n", dev->name, failures[base-1], - base<0x0A?" test failure":""); - else - pr_err("%s: unknown failure %d.\n", dev->name, base); - err = -ENODEV; - goto err_exit_irq; - } - - base=0; - for(i=0;i<4;i++) - { - int n=0; - - while(!(inb(dev->base_addr+2)&(1<<5))) - { - n++; - udelay(50); - if(n>100) - { - pr_err("%s: mailbox read fail (%d).\n", dev->name, i); - err = -ENODEV; - goto err_exit_irq; - } - } - - base|=(inb(dev->base_addr)<<(8*i)); - } - - lp->exec_box=isa_bus_to_virt(dev->mem_start+base); - - base=lp->exec_box->data[1]<<16|lp->exec_box->data[0]; - - lp->base = dev->mem_start+base; - - lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]); - lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]); - - lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]); - - /* - * Descriptor chains (card relative) - */ - - lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ - lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ - lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ - lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - - sema_init(&lp->cmd_mutex, 0); - init_completion(&lp->execution_cmd); - init_completion(&lp->xceiver_cmd); - - pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", - dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = HZ*5; /* Board does all the work */ - dev->ethtool_ops = &netdev_ethtool_ops; - - return 0; - -err_exit_irq: - free_irq(dev->irq, dev); -err_exit_ports: - release_region(dev->base_addr, MC32_IO_EXTENT); - return err; -} - - -/** - * mc32_ready_poll - wait until we can feed it a command - * @dev: The device to wait for - * - * Wait until the card becomes ready to accept a command via the - * command register. This tells us nothing about the completion - * status of any pending commands and takes very little time at all. - */ - -static inline void mc32_ready_poll(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); -} - - -/** - * mc32_command_nowait - send a command non blocking - * @dev: The 3c527 to issue the command to - * @cmd: The command word to write to the mailbox - * @data: A data block if the command expects one - * @len: Length of the data block - * - * Send a command from interrupt state. If there is a command - * currently being executed then we return an error of -1. It - * simply isn't viable to wait around as commands may be - * slow. This can theoretically be starved on SMP, but it's hard - * to see a realistic situation. We do not wait for the command - * to complete --- we rely on the interrupt handler to tidy up - * after us. - */ - -static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int ret = -1; - - if (down_trylock(&lp->cmd_mutex) == 0) - { - lp->cmd_nonblocking=1; - lp->exec_box->mbox=0; - lp->exec_box->mbox=cmd; - memcpy((void *)lp->exec_box->data, data, len); - barrier(); /* the memcpy forgot the volatile so be sure */ - - /* Send the command */ - mc32_ready_poll(dev); - outb(1<<6, ioaddr+HOST_CMD); - - ret = 0; - - /* Interrupt handler will signal mutex on completion */ - } - - return ret; -} - - -/** - * mc32_command - send a command and sleep until completion - * @dev: The 3c527 card to issue the command to - * @cmd: The command word to write to the mailbox - * @data: A data block if the command expects one - * @len: Length of the data block - * - * Sends exec commands in a user context. This permits us to wait around - * for the replies and also to wait for the command buffer to complete - * from a previous command before we execute our command. After our - * command completes we will attempt any pending multicast reload - * we blocked off by hogging the exec buffer. - * - * You feed the card a command, you wait, it interrupts you get a - * reply. All well and good. The complication arises because you use - * commands for filter list changes which come in at bh level from things - * like IPV6 group stuff. - */ - -static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int ret = 0; - - down(&lp->cmd_mutex); - - /* - * My Turn - */ - - lp->cmd_nonblocking=0; - lp->exec_box->mbox=0; - lp->exec_box->mbox=cmd; - memcpy((void *)lp->exec_box->data, data, len); - barrier(); /* the memcpy forgot the volatile so be sure */ - - mc32_ready_poll(dev); - outb(1<<6, ioaddr+HOST_CMD); - - wait_for_completion(&lp->execution_cmd); - - if(lp->exec_box->mbox&(1<<13)) - ret = -1; - - up(&lp->cmd_mutex); - - /* - * A multicast set got blocked - try it now - */ - - if(lp->mc_reload_wait) - { - mc32_reset_multicast_list(dev); - } - - return ret; -} - - -/** - * mc32_start_transceiver - tell board to restart tx/rx - * @dev: The 3c527 card to issue the command to - * - * This may be called from the interrupt state, where it is used - * to restart the rx ring if the card runs out of rx buffers. - * - * We must first check if it's ok to (re)start the transceiver. See - * mc32_close for details. - */ - -static void mc32_start_transceiver(struct net_device *dev) { - - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Ignore RX overflow on device closure */ - if (lp->xceiver_desired_state==HALTED) - return; - - /* Give the card the offset to the post-EOL-bit RX descriptor */ - mc32_ready_poll(dev); - lp->rx_box->mbox=0; - lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next; - outb(HOST_CMD_START_RX, ioaddr+HOST_CMD); - - mc32_ready_poll(dev); - lp->tx_box->mbox=0; - outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */ - - /* We are not interrupted on start completion */ -} - - -/** - * mc32_halt_transceiver - tell board to stop tx/rx - * @dev: The 3c527 card to issue the command to - * - * We issue the commands to halt the card's transceiver. In fact, - * after some experimenting we now simply tell the card to - * suspend. When issuing aborts occasionally odd things happened. - * - * We then sleep until the card has notified us that both rx and - * tx have been suspended. - */ - -static void mc32_halt_transceiver(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - mc32_ready_poll(dev); - lp->rx_box->mbox=0; - outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD); - wait_for_completion(&lp->xceiver_cmd); - - mc32_ready_poll(dev); - lp->tx_box->mbox=0; - outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD); - wait_for_completion(&lp->xceiver_cmd); -} - - -/** - * mc32_load_rx_ring - load the ring of receive buffers - * @dev: 3c527 to build the ring for - * - * This initialises the on-card and driver datastructures to - * the point where mc32_start_transceiver() can be called. - * - * The card sets up the receive ring for us. We are required to use the - * ring it provides, although the size of the ring is configurable. - * - * We allocate an sk_buff for each ring entry in turn and - * initialise its house-keeping info. At the same time, we read - * each 'next' pointer in our rx_ring array. This reduces slow - * shared-memory reads and makes it easy to access predecessor - * descriptors. - * - * We then set the end-of-list bit for the last entry so that the - * card will know when it has run out of buffers. - */ - -static int mc32_load_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - u16 rx_base; - volatile struct skb_header *p; - - rx_base=lp->rx_chain; - - for(i=0; i<RX_RING_LEN; i++) { - lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL); - if (lp->rx_ring[i].skb==NULL) { - for (;i>=0;i--) - kfree_skb(lp->rx_ring[i].skb); - return -ENOBUFS; - } - skb_reserve(lp->rx_ring[i].skb, 18); - - p=isa_bus_to_virt(lp->base+rx_base); - - p->control=0; - p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data); - p->status=0; - p->length=1532; - - lp->rx_ring[i].p=p; - rx_base=p->next; - } - - lp->rx_ring[i-1].p->control |= CONTROL_EOL; - - lp->rx_ring_tail=0; - - return 0; -} - - -/** - * mc32_flush_rx_ring - free the ring of receive buffers - * @lp: Local data of 3c527 to flush the rx ring of - * - * Free the buffer for each ring slot. This may be called - * before mc32_load_rx_ring(), eg. on error in mc32_open(). - * Requires rx skb pointers to point to a valid skb, or NULL. - */ - -static void mc32_flush_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - - for(i=0; i < RX_RING_LEN; i++) - { - if (lp->rx_ring[i].skb) { - dev_kfree_skb(lp->rx_ring[i].skb); - lp->rx_ring[i].skb = NULL; - } - lp->rx_ring[i].p=NULL; - } -} - - -/** - * mc32_load_tx_ring - load transmit ring - * @dev: The 3c527 card to issue the command to - * - * This sets up the host transmit data-structures. - * - * First, we obtain from the card it's current position in the tx - * ring, so that we will know where to begin transmitting - * packets. - * - * Then, we read the 'next' pointers from the on-card tx ring into - * our tx_ring array to reduce slow shared-mem reads. Finally, we - * intitalise the tx house keeping variables. - * - */ - -static void mc32_load_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *p; - int i; - u16 tx_base; - - tx_base=lp->tx_box->data[0]; - - for(i=0 ; i<TX_RING_LEN ; i++) - { - p=isa_bus_to_virt(lp->base+tx_base); - lp->tx_ring[i].p=p; - lp->tx_ring[i].skb=NULL; - - tx_base=p->next; - } - - /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */ - /* see mc32_tx_ring */ - - atomic_set(&lp->tx_count, TX_RING_LEN-1); - atomic_set(&lp->tx_ring_head, 0); - lp->tx_ring_tail=0; -} - - -/** - * mc32_flush_tx_ring - free transmit ring - * @lp: Local data of 3c527 to flush the tx ring of - * - * If the ring is non-empty, zip over the it, freeing any - * allocated skb_buffs. The tx ring house-keeping variables are - * then reset. Requires rx skb pointers to point to a valid skb, - * or NULL. - */ - -static void mc32_flush_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - - for (i=0; i < TX_RING_LEN; i++) - { - if (lp->tx_ring[i].skb) - { - dev_kfree_skb(lp->tx_ring[i].skb); - lp->tx_ring[i].skb = NULL; - } - } - - atomic_set(&lp->tx_count, 0); - atomic_set(&lp->tx_ring_head, 0); - lp->tx_ring_tail=0; -} - - -/** - * mc32_open - handle 'up' of card - * @dev: device to open - * - * The user is trying to bring the card into ready state. This requires - * a brief dialogue with the card. Firstly we enable interrupts and then - * 'indications'. Without these enabled the card doesn't bother telling - * us what it has done. This had me puzzled for a week. - * - * We configure the number of card descriptors, then load the network - * address and multicast filters. Turn on the workaround mode. This - * works around a bug in the 82586 - it asks the firmware to do - * so. It has a performance (latency) hit but is needed on busy - * [read most] lans. We load the ring with buffers then we kick it - * all off. - */ - -static int mc32_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct mc32_local *lp = netdev_priv(dev); - u8 one=1; - u8 regs; - u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN}; - - /* - * Interrupts enabled - */ - - regs=inb(ioaddr+HOST_CTRL); - regs|=HOST_CTRL_INTE; - outb(regs, ioaddr+HOST_CTRL); - - /* - * Allow ourselves to issue commands - */ - - up(&lp->cmd_mutex); - - - /* - * Send the indications on command - */ - - mc32_command(dev, 4, &one, 2); - - /* - * Poke it to make sure it's really dead. - */ - - mc32_halt_transceiver(dev); - mc32_flush_tx_ring(dev); - - /* - * Ask card to set up on-card descriptors to our spec - */ - - if(mc32_command(dev, 8, descnumbuffs, 4)) { - pr_info("%s: %s rejected our buffer configuration!\n", - dev->name, cardname); - mc32_close(dev); - return -ENOBUFS; - } - - /* Report new configuration */ - mc32_command(dev, 6, NULL, 0); - - lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ - lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ - lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ - lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - - /* Set Network Address */ - mc32_command(dev, 1, dev->dev_addr, 6); - - /* Set the filters */ - mc32_set_multicast_list(dev); - - if (WORKAROUND_82586) { - u16 zero_word=0; - mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */ - } - - mc32_load_tx_ring(dev); - - if(mc32_load_rx_ring(dev)) - { - mc32_close(dev); - return -ENOBUFS; - } - - lp->xceiver_desired_state = RUNNING; - - /* And finally, set the ball rolling... */ - mc32_start_transceiver(dev); - - netif_start_queue(dev); - - return 0; -} - - -/** - * mc32_timeout - handle a timeout from the network layer - * @dev: 3c527 that timed out - * - * Handle a timeout on transmit from the 3c527. This normally means - * bad things as the hardware handles cable timeouts and mess for - * us. - * - */ - -static void mc32_timeout(struct net_device *dev) -{ - pr_warning("%s: transmit timed out?\n", dev->name); - /* Try to restart the adaptor. */ - netif_wake_queue(dev); -} - - -/** - * mc32_send_packet - queue a frame for transmit - * @skb: buffer to transmit - * @dev: 3c527 to send it out of - * - * Transmit a buffer. This normally means throwing the buffer onto - * the transmit queue as the queue is quite large. If the queue is - * full then we set tx_busy and return. Once the interrupt handler - * gets messages telling it to reclaim transmit queue entries, we will - * clear tx_busy and the kernel will start calling this again. - * - * We do not disable interrupts or acquire any locks; this can - * run concurrently with mc32_tx_ring(), and the function itself - * is serialised at a higher layer. However, similarly for the - * card itself, we must ensure that we update tx_ring_head only - * after we've established a valid packet on the tx ring (and - * before we let the card "see" it, to prevent it racing with the - * irq handler). - * - */ - -static netdev_tx_t mc32_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - u32 head = atomic_read(&lp->tx_ring_head); - - volatile struct skb_header *p, *np; - - netif_stop_queue(dev); - - if(atomic_read(&lp->tx_count)==0) { - return NETDEV_TX_BUSY; - } - - if (skb_padto(skb, ETH_ZLEN)) { - netif_wake_queue(dev); - return NETDEV_TX_OK; - } - - atomic_dec(&lp->tx_count); - - /* P is the last sending/sent buffer as a pointer */ - p=lp->tx_ring[head].p; - - head = next_tx(head); - - /* NP is the buffer we will be loading */ - np=lp->tx_ring[head].p; - - /* We will need this to flush the buffer out */ - lp->tx_ring[head].skb=skb; - - np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; - np->data = isa_virt_to_bus(skb->data); - np->status = 0; - np->control = CONTROL_EOP | CONTROL_EOL; - wmb(); - - /* - * The new frame has been setup; we can now - * let the interrupt handler and card "see" it - */ - - atomic_set(&lp->tx_ring_head, head); - p->control &= ~CONTROL_EOL; - - netif_wake_queue(dev); - return NETDEV_TX_OK; -} - - -/** - * mc32_update_stats - pull off the on board statistics - * @dev: 3c527 to service - * - * - * Query and reset the on-card stats. There's the small possibility - * of a race here, which would result in an underestimation of - * actual errors. As such, we'd prefer to keep all our stats - * collection in software. As a rule, we do. However it can't be - * used for rx errors and collisions as, by default, the card discards - * bad rx packets. - * - * Setting the SAV BP in the rx filter command supposedly - * stops this behaviour. However, testing shows that it only seems to - * enable the collation of on-card rx statistics --- the driver - * never sees an RX descriptor with an error status set. - * - */ - -static void mc32_update_stats(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct mc32_stats *st = lp->stats; - - u32 rx_errors=0; - - rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors; - st->rx_crc_errors=0; - rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors; - st->rx_overrun_errors=0; - rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors; - st->rx_alignment_errors=0; - rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors; - st->rx_tooshort_errors=0; - rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors; - st->rx_outofresource_errors=0; - dev->stats.rx_errors=rx_errors; - - /* Number of packets which saw one collision */ - dev->stats.collisions+=st->dataC[10]; - st->dataC[10]=0; - - /* Number of packets which saw 2--15 collisions */ - dev->stats.collisions+=st->dataC[11]; - st->dataC[11]=0; -} - - -/** - * mc32_rx_ring - process the receive ring - * @dev: 3c527 that needs its receive ring processing - * - * - * We have received one or more indications from the card that a - * receive has completed. The buffer ring thus contains dirty - * entries. We walk the ring by iterating over the circular rx_ring - * array, starting at the next dirty buffer (which happens to be the - * one we finished up at last time around). - * - * For each completed packet, we will either copy it and pass it up - * the stack or, if the packet is near MTU sized, we allocate - * another buffer and flip the old one up the stack. - * - * We must succeed in keeping a buffer on the ring. If necessary we - * will toss a received packet rather than lose a ring entry. Once - * the first uncompleted descriptor is found, we move the - * End-Of-List bit to include the buffers just processed. - * - */ - -static void mc32_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *p; - u16 rx_ring_tail; - u16 rx_old_tail; - int x=0; - - rx_old_tail = rx_ring_tail = lp->rx_ring_tail; - - do - { - p=lp->rx_ring[rx_ring_tail].p; - - if(!(p->status & (1<<7))) { /* Not COMPLETED */ - break; - } - if(p->status & (1<<6)) /* COMPLETED_OK */ - { - - u16 length=p->length; - struct sk_buff *skb; - struct sk_buff *newskb; - - /* Try to save time by avoiding a copy on big frames */ - - if ((length > RX_COPYBREAK) && - ((newskb = netdev_alloc_skb(dev, 1532)) != NULL)) - { - skb=lp->rx_ring[rx_ring_tail].skb; - skb_put(skb, length); - - skb_reserve(newskb,18); - lp->rx_ring[rx_ring_tail].skb=newskb; - p->data=isa_virt_to_bus(newskb->data); - } - else - { - skb = netdev_alloc_skb(dev, length + 2); - - if(skb==NULL) { - dev->stats.rx_dropped++; - goto dropped; - } - - skb_reserve(skb,2); - memcpy(skb_put(skb, length), - lp->rx_ring[rx_ring_tail].skb->data, length); - } - - skb->protocol=eth_type_trans(skb,dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += length; - netif_rx(skb); - } - - dropped: - p->length = 1532; - p->status = 0; - - rx_ring_tail=next_rx(rx_ring_tail); - } - while(x++<48); - - /* If there was actually a frame to be processed, place the EOL bit */ - /* at the descriptor prior to the one to be filled next */ - - if (rx_ring_tail != rx_old_tail) - { - lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; - lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; - - lp->rx_ring_tail=rx_ring_tail; - } -} - - -/** - * mc32_tx_ring - process completed transmits - * @dev: 3c527 that needs its transmit ring processing - * - * - * This operates in a similar fashion to mc32_rx_ring. We iterate - * over the transmit ring. For each descriptor which has been - * processed by the card, we free its associated buffer and note - * any errors. This continues until the transmit ring is emptied - * or we reach a descriptor that hasn't yet been processed by the - * card. - * - */ - -static void mc32_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *np; - - /* - * We rely on head==tail to mean 'queue empty'. - * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent - * tx_ring_head wrapping to tail and confusing a 'queue empty' - * condition with 'queue full' - */ - - while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head)) - { - u16 t; - - t=next_tx(lp->tx_ring_tail); - np=lp->tx_ring[t].p; - - if(!(np->status & (1<<7))) - { - /* Not COMPLETED */ - break; - } - dev->stats.tx_packets++; - if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ - { - dev->stats.tx_errors++; - - switch(np->status&0x0F) - { - case 1: - dev->stats.tx_aborted_errors++; - break; /* Max collisions */ - case 2: - dev->stats.tx_fifo_errors++; - break; - case 3: - dev->stats.tx_carrier_errors++; - break; - case 4: - dev->stats.tx_window_errors++; - break; /* CTS Lost */ - case 5: - dev->stats.tx_aborted_errors++; - break; /* Transmit timeout */ - } - } - /* Packets are sent in order - this is - basically a FIFO queue of buffers matching - the card ring */ - dev->stats.tx_bytes+=lp->tx_ring[t].skb->len; - dev_kfree_skb_irq(lp->tx_ring[t].skb); - lp->tx_ring[t].skb=NULL; - atomic_inc(&lp->tx_count); - netif_wake_queue(dev); - - lp->tx_ring_tail=t; - } - -} - - -/** - * mc32_interrupt - handle an interrupt from a 3c527 - * @irq: Interrupt number - * @dev_id: 3c527 that requires servicing - * @regs: Registers (unused) - * - * - * An interrupt is raised whenever the 3c527 writes to the command - * register. This register contains the message it wishes to send us - * packed into a single byte field. We keep reading status entries - * until we have processed all the control items, but simply count - * transmit and receive reports. When all reports are in we empty the - * transceiver rings as appropriate. This saves the overhead of - * multiple command requests. - * - * Because MCA is level-triggered, we shouldn't miss indications. - * Therefore, we needn't ask the card to suspend interrupts within - * this handler. The card receives an implicit acknowledgment of the - * current interrupt when we read the command register. - * - */ - -static irqreturn_t mc32_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct mc32_local *lp; - int ioaddr, status, boguscount = 0; - int rx_event = 0; - int tx_event = 0; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - /* See whats cooking */ - - while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000) - { - status=inb(ioaddr+HOST_CMD); - - pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n", - (status&7), (status>>3)&7, (status>>6)&1, - (status>>7)&1, boguscount); - - switch(status&7) - { - case 0: - break; - case 6: /* TX fail */ - case 2: /* TX ok */ - tx_event = 1; - break; - case 3: /* Halt */ - case 4: /* Abort */ - complete(&lp->xceiver_cmd); - break; - default: - pr_notice("%s: strange tx ack %d\n", dev->name, status&7); - } - status>>=3; - switch(status&7) - { - case 0: - break; - case 2: /* RX */ - rx_event=1; - break; - case 3: /* Halt */ - case 4: /* Abort */ - complete(&lp->xceiver_cmd); - break; - case 6: - /* Out of RX buffers stat */ - /* Must restart rx */ - dev->stats.rx_dropped++; - mc32_rx_ring(dev); - mc32_start_transceiver(dev); - break; - default: - pr_notice("%s: strange rx ack %d\n", - dev->name, status&7); - } - status>>=3; - if(status&1) - { - /* - * No thread is waiting: we need to tidy - * up ourself. - */ - - if (lp->cmd_nonblocking) { - up(&lp->cmd_mutex); - if (lp->mc_reload_wait) - mc32_reset_multicast_list(dev); - } - else complete(&lp->execution_cmd); - } - if(status&2) - { - /* - * We get interrupted once per - * counter that is about to overflow. - */ - - mc32_update_stats(dev); - } - } - - - /* - * Process the transmit and receive rings - */ - - if(tx_event) - mc32_tx_ring(dev); - - if(rx_event) - mc32_rx_ring(dev); - - return IRQ_HANDLED; -} - - -/** - * mc32_close - user configuring the 3c527 down - * @dev: 3c527 card to shut down - * - * The 3c527 is a bus mastering device. We must be careful how we - * shut it down. It may also be running shared interrupt so we have - * to be sure to silence it properly - * - * We indicate that the card is closing to the rest of the - * driver. Otherwise, it is possible that the card may run out - * of receive buffers and restart the transceiver while we're - * trying to close it. - * - * We abort any receive and transmits going on and then wait until - * any pending exec commands have completed in other code threads. - * In theory we can't get here while that is true, in practice I am - * paranoid - * - * We turn off the interrupt enable for the board to be sure it can't - * intefere with other devices. - */ - -static int mc32_close(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - u8 regs; - u16 one=1; - - lp->xceiver_desired_state = HALTED; - netif_stop_queue(dev); - - /* - * Send the indications on command (handy debug check) - */ - - mc32_command(dev, 4, &one, 2); - - /* Shut down the transceiver */ - - mc32_halt_transceiver(dev); - - /* Ensure we issue no more commands beyond this point */ - - down(&lp->cmd_mutex); - - /* Ok the card is now stopping */ - - regs=inb(ioaddr+HOST_CTRL); - regs&=~HOST_CTRL_INTE; - outb(regs, ioaddr+HOST_CTRL); - - mc32_flush_rx_ring(dev); - mc32_flush_tx_ring(dev); - - mc32_update_stats(dev); - - return 0; -} - - -/** - * mc32_get_stats - hand back stats to network layer - * @dev: The 3c527 card to handle - * - * We've collected all the stats we can in software already. Now - * it's time to update those kept on-card and return the lot. - * - */ - -static struct net_device_stats *mc32_get_stats(struct net_device *dev) -{ - mc32_update_stats(dev); - return &dev->stats; -} - - -/** - * do_mc32_set_multicast_list - attempt to update multicasts - * @dev: 3c527 device to load the list on - * @retry: indicates this is not the first call. - * - * - * Actually set or clear the multicast filter for this adaptor. The - * locking issues are handled by this routine. We have to track - * state as it may take multiple calls to get the command sequence - * completed. We just keep trying to schedule the loads until we - * manage to process them all. - * - * num_addrs == -1 Promiscuous mode, receive all packets - * - * num_addrs == 0 Normal mode, clear multicast list - * - * num_addrs > 0 Multicast mode, receive normal and MC packets, - * and do best-effort filtering. - * - * See mc32_update_stats() regards setting the SAV BP bit. - * - */ - -static void do_mc32_set_multicast_list(struct net_device *dev, int retry) -{ - struct mc32_local *lp = netdev_priv(dev); - u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ - - if ((dev->flags&IFF_PROMISC) || - (dev->flags&IFF_ALLMULTI) || - netdev_mc_count(dev) > 10) - /* Enable promiscuous mode */ - filt |= 1; - else if (!netdev_mc_empty(dev)) - { - unsigned char block[62]; - unsigned char *bp; - struct netdev_hw_addr *ha; - - if(retry==0) - lp->mc_list_valid = 0; - if(!lp->mc_list_valid) - { - block[1]=0; - block[0]=netdev_mc_count(dev); - bp=block+2; - - netdev_for_each_mc_addr(ha, dev) { - memcpy(bp, ha->addr, 6); - bp+=6; - } - if(mc32_command_nowait(dev, 2, block, - 2+6*netdev_mc_count(dev))==-1) - { - lp->mc_reload_wait = 1; - return; - } - lp->mc_list_valid=1; - } - } - - if(mc32_command_nowait(dev, 0, &filt, 2)==-1) - { - lp->mc_reload_wait = 1; - } - else { - lp->mc_reload_wait = 0; - } -} - - -/** - * mc32_set_multicast_list - queue multicast list update - * @dev: The 3c527 to use - * - * Commence loading the multicast list. This is called when the kernel - * changes the lists. It will override any pending list we are trying to - * load. - */ - -static void mc32_set_multicast_list(struct net_device *dev) -{ - do_mc32_set_multicast_list(dev,0); -} - - -/** - * mc32_reset_multicast_list - reset multicast list - * @dev: The 3c527 to use - * - * Attempt the next step in loading the multicast lists. If this attempt - * fails to complete then it will be scheduled and this function called - * again later from elsewhere. - */ - -static void mc32_reset_multicast_list(struct net_device *dev) -{ - do_mc32_set_multicast_list(dev,1); -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return mc32_debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - mc32_debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE - -static struct net_device *this_device; - -/** - * init_module - entry point - * - * Probe and locate a 3c527 card. This really should probe and locate - * all the 3c527 cards in the machine not just one of them. Yes you can - * insmod multiple modules for now but it's a hack. - */ - -int __init init_module(void) -{ - this_device = mc32_probe(-1); - if (IS_ERR(this_device)) - return PTR_ERR(this_device); - return 0; -} - -/** - * cleanup_module - free resources for an unload - * - * Unloading time. We release the MCA bus resources and the interrupt - * at which point everything is ready to unload. The card must be stopped - * at this point or we would not have been called. When we unload we - * leave the card stopped but not totally shut down. When the card is - * initialized it must be rebooted or the rings reloaded before any - * transmit operations are allowed to start scribbling into memory. - */ - -void __exit cleanup_module(void) -{ - unregister_netdev(this_device); - cleanup_card(this_device); - free_netdev(this_device); -} - -#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/3c527.h b/drivers/net/ethernet/i825xx/3c527.h deleted file mode 100644 index d693b8d15cd..00000000000 --- a/drivers/net/ethernet/i825xx/3c527.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * 3COM "EtherLink MC/32" Descriptions - */ - -/* - * Registers - */ - -#define HOST_CMD 0 -#define HOST_CMD_START_RX (1<<3) -#define HOST_CMD_SUSPND_RX (3<<3) -#define HOST_CMD_RESTRT_RX (5<<3) - -#define HOST_CMD_SUSPND_TX 3 -#define HOST_CMD_RESTRT_TX 5 - - -#define HOST_STATUS 2 -#define HOST_STATUS_CRR (1<<6) -#define HOST_STATUS_CWR (1<<5) - - -#define HOST_CTRL 6 -#define HOST_CTRL_ATTN (1<<7) -#define HOST_CTRL_RESET (1<<6) -#define HOST_CTRL_INTE (1<<2) - -#define HOST_RAMPAGE 8 - -#define HALTED 0 -#define RUNNING 1 - -struct mc32_mailbox -{ - u16 mbox; - u16 data[1]; -} __packed; - -struct skb_header -{ - u8 status; - u8 control; - u16 next; /* Do not change! */ - u16 length; - u32 data; -} __packed; - -struct mc32_stats -{ - /* RX Errors */ - u32 rx_crc_errors; - u32 rx_alignment_errors; - u32 rx_overrun_errors; - u32 rx_tooshort_errors; - u32 rx_toolong_errors; - u32 rx_outofresource_errors; - - u32 rx_discarded; /* via card pattern match filter */ - - /* TX Errors */ - u32 tx_max_collisions; - u32 tx_carrier_errors; - u32 tx_underrun_errors; - u32 tx_cts_errors; - u32 tx_timeout_errors; - - /* various cruft */ - u32 dataA[6]; - u16 dataB[5]; - u32 dataC[14]; -} __packed; - -#define STATUS_MASK 0x0F -#define COMPLETED (1<<7) -#define COMPLETED_OK (1<<6) -#define BUFFER_BUSY (1<<5) - -#define CONTROL_EOP (1<<7) /* End Of Packet */ -#define CONTROL_EOL (1<<6) /* End of List */ - -#define MCA_MC32_ID 0x0041 /* Our MCA ident */ diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig index ca1ae985c6d..fed5080a6b6 100644 --- a/drivers/net/ethernet/i825xx/Kconfig +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -43,28 +43,6 @@ config EL16 To compile this driver as a module, choose M here. The module will be called 3c507. -config ELMC - tristate "3c523 \"EtherLink/MC\" support" - depends on MCA_LEGACY - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called 3c523. - -config ELMC_II - tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)" - depends on MCA && MCA_LEGACY - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here. The module - will be called 3c527. - config ARM_ETHER1 tristate "Acorn Ether1 support" depends on ARM && ARCH_ACORN diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile index f68a3694968..6adff85e8ec 100644 --- a/drivers/net/ethernet/i825xx/Makefile +++ b/drivers/net/ethernet/i825xx/Makefile @@ -7,8 +7,6 @@ obj-$(CONFIG_EEXPRESS) += eexpress.o obj-$(CONFIG_EEXPRESS_PRO) += eepro.o obj-$(CONFIG_ELPLUS) += 3c505.o obj-$(CONFIG_EL16) += 3c507.o -obj-$(CONFIG_ELMC) += 3c523.o -obj-$(CONFIG_ELMC_II) += 3c527.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_NI52) += ni52.o obj-$(CONFIG_SUN3_82586) += sun3_82586.o diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c index cc2e66ad443..7a6a2f04c5b 100644 --- a/drivers/net/ethernet/i825xx/eexpress.c +++ b/drivers/net/ethernet/i825xx/eexpress.c @@ -9,7 +9,7 @@ * Many modifications, and currently maintained, by * Philip Blundell <philb@gnu.org> * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk> - * Added MCA support Adam Fritzler + * Added MCA support Adam Fritzler (now deleted) * * Note - this driver is experimental still - it has problems on faster * machines. Someone needs to sit down and go through it line by line with @@ -111,7 +111,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> -#include <linux/mca-legacy.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/jiffies.h> @@ -227,16 +226,6 @@ static unsigned short start_code[] = { /* maps irq number to EtherExpress magic value */ static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 }; -#ifdef CONFIG_MCA_LEGACY -/* mapping of the first four bits of the second POS register */ -static unsigned short mca_iomap[] = { - 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200, - 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300 -}; -/* bits 5-7 of the second POS register */ -static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; -#endif - /* * Prototypes for Linux interface */ @@ -340,53 +329,6 @@ static int __init do_express_probe(struct net_device *dev) dev->if_port = 0xff; /* not set */ -#ifdef CONFIG_MCA_LEGACY - if (MCA_bus) { - int slot = 0; - - /* - * Only find one card at a time. Subsequent calls - * will find others, however, proper multicard MCA - * probing and setup can't be done with the - * old-style Space.c init routines. -- ASF - */ - while (slot != MCA_NOTFOUND) { - int pos0, pos1; - - slot = mca_find_unused_adapter(0x628B, slot); - if (slot == MCA_NOTFOUND) - break; - - pos0 = mca_read_stored_pos(slot, 2); - pos1 = mca_read_stored_pos(slot, 3); - ioaddr = mca_iomap[pos1&0xf]; - - dev->irq = mca_irqmap[(pos1>>4)&0x7]; - - /* - * XXX: Transceiver selection is done - * differently on the MCA version. - * How to get it to select something - * other than external/AUI is currently - * unknown. This code is just for looks. -- ASF - */ - if ((pos0 & 0x7) == 0x1) - dev->if_port = AUI; - else if ((pos0 & 0x7) == 0x5) { - if (pos1 & 0x80) - dev->if_port = BNC; - else - dev->if_port = TPE; - } - - mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA"); - mca_set_adapter_procfn(slot, NULL, dev); - mca_mark_as_used(slot); - - break; - } - } -#endif if (ioaddr&0xfe00) { if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) return -EBUSY; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 76213162fbe..79b07ec6726 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -7,7 +7,7 @@ config NET_VENDOR_INTEL default y depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ - GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ + GSC || BVME6000 || MVME16x || \ (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ EXPERIMENTAL ---help--- @@ -120,6 +120,17 @@ config IGB_DCA driver. DCA is a method for warming the CPU cache before data is used, with the intent of lessening the impact of cache misses. +config IGB_PTP + bool "PTP Hardware Clock (PHC)" + default y + depends on IGB && PTP_1588_CLOCK + ---help--- + Say Y here if you want to use PTP Hardware Clock (PHC) in the + driver. Only the basic clock operations have been implemented. + + Every timestamp and clock read operations must consult the + overflow counter to form a correct time value. + config IGBVF tristate "Intel(R) 82576 Virtual Function Ethernet support" depends on PCI @@ -182,6 +193,14 @@ config IXGBE To compile this driver as a module, choose M here. The module will be called ixgbe. +config IXGBE_HWMON + bool "Intel(R) 10GbE PCI Express adapters HWMON support" + default y + depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m) + ---help--- + Say Y if you want to expose the thermal sensor data on some of + our cards, via a hwmon sysfs interface. + config IXGBE_DCA bool "Direct Cache Access (DCA) Support" default y @@ -201,6 +220,17 @@ config IXGBE_DCB If unsure, say N. +config IXGBE_PTP + bool "PTP Clock Support" + default n + depends on IXGBE && PTP_1588_CLOCK + ---help--- + Say Y here if you want support for 1588 Timestamping with a + PHC device, using the PTP 1588 Clock support. This is + required to enable timestamping support for the device. + + If unsure, say N. + config IXGBEVF tristate "Intel(R) 82599 Virtual Function Ethernet support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index e498effb85d..ada720b42ff 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1759,6 +1759,7 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, skb->data, skb->len, PCI_DMA_TODEVICE)); /* check for mapping failure? */ cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); } static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, @@ -2733,6 +2734,7 @@ static const struct ethtool_ops e100_ethtool_ops = { .set_phys_id = e100_set_phys_id, .get_ethtool_stats = e100_get_ethtool_stats, .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8d8908d2a9b..95731c84104 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -831,9 +831,10 @@ static int e1000_set_features(struct net_device *netdev, if (changed & NETIF_F_HW_VLAN_RX) e1000_vlan_mode(netdev, features); - if (!(changed & NETIF_F_RXCSUM)) + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) return 0; + netdev->features = features; adapter->rx_csum = !!(features & NETIF_F_RXCSUM); if (netif_running(netdev)) @@ -1078,6 +1079,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, netdev->features |= netdev->hw_features; netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_RXALL; netdev->hw_features |= NETIF_F_RXFCS; if (pci_using_dac) { @@ -1845,6 +1847,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) break; } + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + ew32(RCTL, rctl); } @@ -3247,6 +3265,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, nr_frags, mss); if (count) { + skb_tx_timestamp(skb); + e1000_tx_queue(adapter, tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); @@ -4050,7 +4070,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* errors is only valid for DD + EOP descriptors */ if (unlikely((status & E1000_RXD_STAT_EOP) && (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - u8 last_byte = *(skb->data + length - 1); + u8 *mapped; + u8 last_byte; + + mapped = page_address(buffer_info->page); + last_byte = *(mapped + length - 1); if (TBI_ACCEPT(hw, status, rx_desc->errors, length, last_byte)) { spin_lock_irqsave(&adapter->stats_lock, @@ -4061,6 +4085,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, irq_flags); length--; } else { + if (netdev->features & NETIF_F_RXALL) + goto process_skb; /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window @@ -4073,6 +4099,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, } #define rxtop rx_ring->rx_skb_top +process_skb: if (!(status & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ if (!rxtop) { @@ -4280,12 +4307,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, flags); length--; } else { + if (netdev->features & NETIF_F_RXALL) + goto process_skb; /* recycle */ buffer_info->skb = skb; goto next_desc; } } +process_skb: total_rx_bytes += (length - 4); /* don't count FCS */ total_rx_packets++; @@ -4369,30 +4399,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, break; } - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - struct sk_buff *oldskb = skb; - e_err(rx_err, "skb align check failed: %u bytes at " - "%p\n", bufsz, skb->data); - /* Try again, without freeing the previous */ - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - /* Failed allocation, critical failure */ - if (!skb) { - dev_kfree_skb(oldskb); - adapter->alloc_rx_buff_failed++; - break; - } - - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - /* give up */ - dev_kfree_skb(skb); - dev_kfree_skb(oldskb); - break; /* while (cleaned_count--) */ - } - - /* Use new allocation */ - dev_kfree_skb(oldskb); - } buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; check_page: diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index bac9dda31b6..4dd18a1f45d 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -228,9 +228,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) /* FWSM register */ mac->has_fwsm = true; /* ARC supported; valid only if manageability features are enabled. */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; + mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); /* Adaptive IFS not supported */ mac->adaptive_ifs = false; @@ -766,6 +764,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; + u16 kum_reg_data; /* * Prevent the PCI-E bus from sticking if there is no TLP connection @@ -791,6 +790,13 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ew32(CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ @@ -938,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) else reg |= (1 << 28); ew32(TARC(1), reg); + + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); } /** @@ -1433,6 +1447,7 @@ static const struct e1000_mac_operations es2_mac_ops = { /* setup_physical_interface dependent on media type */ .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations es2_phy_ops = { diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index b3fdc6977f2..36db4df09ae 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -295,9 +295,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) * ARC supported; valid only if manageability features are * enabled. */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; + mac->arc_subsystem_valid = !!(er32(FWSM) & + E1000_FWSM_MODE_MASK); break; case e1000_82574: case e1000_82583: @@ -798,7 +797,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) /* Check for pending operations. */ for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } @@ -822,7 +821,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } @@ -1000,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext; + u32 ctrl, ctrl_ext, eecd; s32 ret_val; /* @@ -1073,6 +1072,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) */ switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = er32(EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + ew32(EECD, eecd); + break; case e1000_82573: case e1000_82574: case e1000_82583: @@ -1280,6 +1289,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ew32(CTRL_EXT, reg); } + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + /* PCI-Ex Control Registers */ switch (hw->mac.type) { case e1000_82574: @@ -1763,7 +1782,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) * incoming packets directed to this port are dropped. * Eventually the LAA will be in RAR[0] and RAR[14]. */ - e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); } /** @@ -1927,6 +1947,7 @@ static const struct e1000_mac_operations e82571_mac_ops = { .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, .read_mac_addr = e1000_read_mac_addr_82571, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations e82_phy_ops_igp = { @@ -2061,9 +2082,11 @@ const struct e1000_info e1000_82574_info = { | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_CHECK_PHY_HANG + .flags2 = FLAG2_CHECK_PHY_HANG | FLAG2_DISABLE_ASPM_L0S - | FLAG2_NO_DISABLE_RX, + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX + | FLAG2_DMA_BURST, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 3a502591716..351a4097b2b 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -74,7 +74,9 @@ #define E1000_WUS_BC E1000_WUFC_BC /* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ @@ -573,6 +575,7 @@ #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ /* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ @@ -739,6 +742,7 @@ #define I82577_E_PHY_ID 0x01540050 #define I82578_E_PHY_ID 0x004DD040 #define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -850,4 +854,8 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640 +/* FW Semaphore */ +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 + #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index b83897f76ee..6e6fffb3458 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -206,6 +206,7 @@ enum e1000_boards { board_ich10lan, board_pchlan, board_pch2lan, + board_pch_lpt, }; struct e1000_ps_page { @@ -528,6 +529,7 @@ extern const struct e1000_info e1000_ich9_info; extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_es2_info; extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, @@ -576,7 +578,7 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); -extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); @@ -673,11 +675,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) return hw->phy.ops.read_reg(hw, offset, data); } +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) { return hw->phy.ops.write_reg(hw, offset, data); } +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + static inline s32 e1000_get_cable_length(struct e1000_hw *hw) { return hw->phy.ops.get_cable_length(hw); @@ -735,9 +747,46 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) return readl(hw->hw_addr + reg); } +#define er32(reg) __er32(hw, E1000_##reg) + +/** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +static inline s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) { + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + writel(val, hw->hw_addr + reg); } +#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) + +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (__ew32((a), (reg + ((offset) << 2)), (value))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + #endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index db35dd5d96d..d863075df7a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -259,8 +259,7 @@ static int e1000_set_settings(struct net_device *netdev, * cannot be changed */ if (hw->phy.ops.check_reset_block(hw)) { - e_err("Cannot change link characteristics when SoL/IDER is " - "active.\n"); + e_err("Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -403,15 +402,15 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[1] = er32(STATUS); regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN); - regs_buff[4] = er32(RDH); - regs_buff[5] = er32(RDT); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); regs_buff[6] = er32(RDTR); regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN); - regs_buff[9] = er32(TDH); - regs_buff[10] = er32(TDT); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); + regs_buff[10] = er32(TDT(0)); regs_buff[11] = er32(TIDV); regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ @@ -727,9 +726,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, (test[pat] & write)); val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); if (val != (test[pat] & write & mask)) { - e_err("pattern test reg %04X failed: got 0x%08X " - "expected 0x%08X\n", reg + offset, val, - (test[pat] & write & mask)); + e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg + offset, val, (test[pat] & write & mask)); *data = reg; return 1; } @@ -744,8 +742,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, __ew32(&adapter->hw, reg, write & mask); val = __er32(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - e_err("set/check reg %04X test failed: got 0x%08X " - "expected 0x%08X\n", reg, (val & mask), (write & mask)); + e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); *data = reg; return 1; } @@ -775,6 +773,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) u32 i; u32 toggle; u32 mask; + u32 wlock_mac = 0; /* * The status register is Read Only, so a write should fail. @@ -797,8 +796,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ew32(STATUS, toggle); after = er32(STATUS) & toggle; if (value != after) { - e_err("failed STATUS register test got: 0x%08X expected: " - "0x%08X\n", after, value); + e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -813,15 +812,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) } REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); - REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); - REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); @@ -830,29 +829,41 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); if (!(adapter->flags & FLAG_IS_ICH)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); mask = 0x8003FFFF; switch (mac->type) { case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: mask |= (1 << 18); break; default: break; } - for (i = 0; i < mac->rar_entry_count; i++) + + if (mac->type == e1000_pch_lpt) + wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> + E1000_FWSM_WLOCK_MAC_SHIFT; + + for (i = 0; i < mac->rar_entry_count; i++) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), - mask, 0xFFFFFFFF); + mask, 0xFFFFFFFF); + } for (i = 0; i < mac->mta_reg_count; i++) REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); *data = 0; + return 0; } @@ -1104,11 +1115,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH, ((u64) tx_ring->dma >> 32)); - ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); - ew32(TDH, 0); - ew32(TDT, 0); + ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); + ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); @@ -1168,11 +1179,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); - ew32(RDBAH, ((u64) rx_ring->dma >> 32)); - ew32(RDLEN, rx_ring->size); - ew32(RDH, 0); - ew32(RDT, 0); + ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); + ew32(RDLEN(0), rx_ring->size); + ew32(RDH(0), 0); + ew32(RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | E1000_RCTL_SBP | E1000_RCTL_SECRC | @@ -1534,7 +1545,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) int ret_val = 0; unsigned long time; - ew32(RDT, rx_ring->count - 1); + ew32(RDT(0), rx_ring->count - 1); /* * Calculate the loop count based on the largest descriptor ring @@ -1561,7 +1572,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) if (k == tx_ring->count) k = 0; } - ew32(TDT, k); + ew32(TDT(0), k); e1e_flush(); msleep(200); time = jiffies; /* set the start time for the receive */ @@ -1791,8 +1802,7 @@ static void e1000_get_wol(struct net_device *netdev, wol->supported &= ~WAKE_UCAST; if (adapter->wol & E1000_WUFC_EX) - e_err("Interface does not support directed (unicast) " - "frame wake-up packets\n"); + e_err("Interface does not support directed (unicast) frame wake-up packets\n"); } if (adapter->wol & E1000_WUFC_EX) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index f82ecf536c8..ed5b40985ed 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -36,16 +36,6 @@ struct e1000_adapter; #include "defines.h" -#define er32(reg) __er32(hw, E1000_##reg) -#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) -#define e1e_flush() er32(STATUS) - -#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ - (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) - -#define E1000_READ_REG_ARRAY(a, reg, offset) \ - (readl((a)->hw_addr + reg + ((offset) << 2))) - enum e1e_registers { E1000_CTRL = 0x00000, /* Device Control - RW */ E1000_STATUS = 0x00008, /* Device Status - RO */ @@ -61,6 +51,7 @@ enum e1e_registers { E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ E1000_FCT = 0x00030, /* Flow Control Type - RW */ E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */ E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ @@ -94,31 +85,40 @@ enum e1e_registers { E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ - E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ - E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ - E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ - E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ - E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ - E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ - E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ -#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ - -/* Convenience macros +/* + * Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - * + * E1000_RDBAL(current_rx_queue) */ -#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */ +#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8)) + E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */ +#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8)) + E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */ +#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8)) + E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */ +#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8)) + E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */ +#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8)) + E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ + E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ +#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ - E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ - E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ - E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ - E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ - E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ + E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */ +#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8)) + E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */ +#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8)) + E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */ +#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8)) + E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */ +#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8)) + E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */ +#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8)) E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ #define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) @@ -200,6 +200,14 @@ enum e1e_registers { #define E1000_RA (E1000_RAL(0)) E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ #define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) + E1000_SHRAL_PCH_LPT_BASE = 0x05408, +#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8)) + E1000_SHRAH_PCH_LTP_BASE = 0x0540C, +#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8)) + E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */ +#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8)) + E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */ +#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8)) E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ E1000_WUC = 0x05800, /* Wakeup Control - RW */ E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ @@ -402,6 +410,8 @@ enum e1e_registers { #define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 #define E1000_DEV_ID_PCH2_LV_LM 0x1502 #define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_REVISION_4 4 @@ -422,6 +432,7 @@ enum e1000_mac_type { e1000_ich10lan, e1000_pchlan, e1000_pch2lan, + e1000_pch_lpt, }; enum e1000_media_type { @@ -459,6 +470,7 @@ enum e1000_phy_type { e1000_phy_82578, e1000_phy_82577, e1000_phy_82579, + e1000_phy_i217, }; enum e1000_bus_width { @@ -782,6 +794,7 @@ struct e1000_mac_operations { s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); s32 (*read_mac_addr)(struct e1000_hw *); }; @@ -966,6 +979,7 @@ struct e1000_dev_spec_ich8lan { struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; bool nvm_k1_enabled; bool eee_disable; + u16 eee_lp_ability; }; struct e1000_hw { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b461c24945e..bbf70ba367d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -105,6 +105,9 @@ #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 @@ -112,6 +115,8 @@ #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ #define PHY_PAGE_SHIFT 5 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ @@ -127,14 +132,22 @@ #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) @@ -147,11 +160,26 @@ #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_MASK 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_MASK 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_MASK 0x0010 /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 /* OEM Bits Phy Register */ #define HV_OEM_BITS PHY_REG(768, 25) @@ -255,6 +283,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); @@ -283,18 +313,161 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) -static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { - u32 ctrl; + u16 phy_reg; + u32 phy_id; - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; - ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; - ew32(CTRL, ctrl); - e1e_flush(); - udelay(10); - ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; - ew32(CTRL, ctrl); + e1e_rphy_locked(hw, PHY_ID1, &phy_reg); + phy_id = (u32)(phy_reg << 16); + e1e_rphy_locked(hw, PHY_ID2, &phy_reg); + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + return true; + } else { + if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK)) + hw->phy.id = phy_id; + return true; + } + + return false; +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 mac_reg, fwsm = er32(FWSM); + s32 ret_val; + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to initialize PHY flow\n"); + return ret_val; + } + + /* + * The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* + * Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* fall-through */ + case e1000_pch2lan: + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + if (e1000_phy_is_accessible_pchlan(hw)) { + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + break; + } + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + e_dbg("Required LANPHYPC toggle blocked by ME\n"); + break; + } + + e_dbg("Toggling LANPHYPC\n"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + udelay(10); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + + /* + * Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; } /** @@ -324,70 +497,41 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - if (!hw->phy.ops.check_reset_block(hw)) { - u32 fwsm = er32(FWSM); - - /* - * The MAC-PHY interconnect may still be in SMBus mode after - * Sx->S0. If resetting the PHY is not blocked, toggle the - * LANPHYPC Value bit to force the interconnect to PCIe mode. - */ - e1000_toggle_lanphypc_value_ich8lan(hw); - msleep(50); - - /* - * Gate automatic PHY configuration by hardware on - * non-managed 82579 - */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - - /* - * Reset the PHY before any access to it. Doing so, ensures - * that the PHY is in a known good state before we read/write - * PHY registers. The generic reset is sufficient here, - * because we haven't determined the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); - if (ret_val) - return ret_val; + phy->id = e1000_phy_unknown; - /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); - e1000_gate_hw_phy_config_ich8lan(hw, false); - } - } + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; - phy->id = e1000_phy_unknown; - switch (hw->mac.type) { - default: - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - return ret_val; - if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; break; - /* fall-through */ - case e1000_pch2lan: - /* - * In case the PHY needs to be in mdio slow mode, - * set slow mode and try to get the PHY id again. - */ - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (ret_val) - return ret_val; - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - return ret_val; - break; - } + } phy->type = e1000e_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: + case e1000_phy_i217: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; @@ -572,7 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) /* Adaptive IFS supported */ mac->adaptive_ifs = true; - /* LED operations */ + /* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: @@ -591,8 +735,12 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; - case e1000_pchlan: case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ @@ -609,12 +757,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } + if (mac->type == e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + } + /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - /* Gate automatic PHY configuration by hardware on managed 82579 */ - if ((mac->type == e1000_pch2lan) && + /* + * Gate automatic PHY configuration by hardware on managed + * 82579 and i217 + */ + if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); @@ -630,22 +786,50 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val = 0; u16 phy_reg; - if (hw->phy.type != e1000_phy_82579) + if ((hw->phy.type != e1000_phy_82579) && + (hw->phy.type != e1000_phy_i217)) return 0; ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) return ret_val; - if (hw->dev_spec.ich8lan.eee_disable) + if (dev_spec->eee_disable) phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; else phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; - return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + if (ret_val) + return ret_val; + + if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_LP_ABILITY); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); + + /* + * EEE is not supported in 100Half, so ignore partner's EEE + * in 100 ability if full-duplex is not advertised. + */ + e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); + if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS)) + dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED; +release: + hw->phy.ops.release(hw); + } + + return 0; } /** @@ -687,6 +871,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + if (!link) return 0; /* No link detected */ @@ -782,6 +969,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) break; case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -967,6 +1155,145 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) } /** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + if (index < hw->mac.rar_entry_count) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + ew32(SHRAL(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL(index - 1)) == rar_low) && + (er32(SHRAH(index - 1)) == rar_high)) + return; + + e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), er32(FWSM)); + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + /* + * The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * @@ -994,6 +1321,8 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; s32 ret_val = 0; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; @@ -1006,6 +1335,19 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } @@ -1043,6 +1385,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) /* Fall-thru */ case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -1062,10 +1405,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); - if (!(hw->mac.type == e1000_pch2lan)) { - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto release; - } + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; @@ -1076,9 +1418,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && - (hw->mac.type == e1000_pchlan)) || - (hw->mac.type == e1000_pch2lan)) { + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { /* * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. @@ -1121,8 +1463,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; - ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, - reg_data); + ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } @@ -1159,8 +1500,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { - ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, - &status_reg); + ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, + &status_reg); if (ret_val) goto release; @@ -1175,8 +1516,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) } if (hw->phy.type == e1000_phy_82577) { - ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, - &status_reg); + ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; @@ -1191,15 +1531,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) } /* Link stall fix for link up */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x0100); + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x4100); + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } @@ -1279,14 +1617,14 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) u32 mac_reg; u16 oem_reg; - if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) + if (hw->mac.type < e1000_pchlan) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - if (!(hw->mac.type == e1000_pch2lan)) { + if (hw->mac.type == e1000_pchlan) { mac_reg = er32(EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; @@ -1298,7 +1636,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) mac_reg = er32(PHY_CTRL); - ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; @@ -1325,7 +1663,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; - ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); @@ -1421,11 +1759,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, - phy_data & 0x00FF); + ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); release: hw->phy.ops.release(hw); @@ -1484,7 +1821,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) u32 mac_reg; u16 i; - if (hw->mac.type != e1000_pch2lan) + if (hw->mac.type < e1000_pch2lan) return 0; /* disable Rx path while enabling/disabling workaround */ @@ -1657,20 +1994,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_MSE_THRESHOLD); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034); if (ret_val) goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_MSE_LINK_DOWN); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005); release: hw->phy.ops.release(hw); @@ -1708,8 +2043,18 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) return ret_val; if (status_reg & HV_M_STATUS_SPEED_1000) { + u16 pm_phy_reg; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + /* LV 1G Packet drop issue wa */ + ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; + ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); + if (ret_val) + return ret_val; } else { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; @@ -1733,7 +2078,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; - if (hw->mac.type != e1000_pch2lan) + if (hw->mac.type < e1000_pch2lan) return; extcnf_ctrl = er32(EXTCNF_CTRL); @@ -1835,12 +2180,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_LPI_UPDATE_TIMER); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); if (!ret_val) - ret_val = hw->phy.ops.write_reg_locked(hw, - I82579_EMI_DATA, - 0x1387); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387); hw->phy.ops.release(hw); } @@ -2213,7 +2556,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ - if (hsfsts.hsf_status.fldesvalid == 0) { + if (!hsfsts.hsf_status.fldesvalid) { e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } @@ -2233,7 +2576,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * completed. */ - if (hsfsts.hsf_status.flcinprog == 0) { + if (!hsfsts.hsf_status.flcinprog) { /* * There is no cycle running at present, * so we can start a cycle. @@ -2251,7 +2594,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcinprog == 0) { + if (!hsfsts.hsf_status.flcinprog) { ret_val = 0; break; } @@ -2293,12 +2636,12 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcdone == 1) + if (hsfsts.hsf_status.flcdone) break; udelay(1); } while (i++ < timeout); - if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) return 0; return -E1000_ERR_NVM; @@ -2409,10 +2752,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) { + if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; - } else if (hsfsts.hsf_status.flcdone == 0) { + } else if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } @@ -2642,7 +2985,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - if ((data & 0x40) == 0) { + if (!(data & 0x40)) { data |= 0x40; ret_val = e1000_write_nvm(hw, 0x19, 1, &data); if (ret_val) @@ -2760,10 +3103,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) + if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; - if (hsfsts.hsf_status.flcdone == 0) { + if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } @@ -2915,10 +3258,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) * a few more times else Done */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) + if (hsfsts.hsf_status.flcerr) /* repeat for some time before giving up */ continue; - else if (hsfsts.hsf_status.flcdone == 0) + else if (!hsfsts.hsf_status.flcdone) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } @@ -3060,8 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u16 reg; - u32 ctrl, kab; + u16 kum_cfg; + u32 ctrl, reg; s32 ret_val; /* @@ -3095,12 +3438,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) } if (hw->mac.type == e1000_pchlan) { - /* Save the NVM K1 bit setting*/ - ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + /* Save the NVM K1 bit setting */ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); if (ret_val) return ret_val; - if (reg & E1000_NVM_K1_ENABLE) + if (kum_cfg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = true; else dev_spec->nvm_k1_enabled = false; @@ -3130,6 +3473,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) /* cannot issue a flush here because it hangs the hardware */ msleep(20); + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = er32(FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, reg); + } + if (!ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); @@ -3154,9 +3505,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(IMC, 0xffffffff); er32(ICR); - kab = er32(KABGTXD); - kab |= E1000_KABGTXD_BGSQLBIAS; - ew32(KABGTXD, kab); + reg = er32(KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, reg); return 0; } @@ -3309,6 +3660,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) */ reg = er32(RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); } @@ -3359,6 +3717,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) ew32(FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); @@ -3422,6 +3781,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) break; case e1000_phy_82577: case e1000_phy_82579: + case e1000_phy_i217: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; @@ -3668,14 +4028,88 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_ADVERTISEMENT); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); + + /* + * Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link. + */ + if ((eee_advert & I217_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I217_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + } + + /* + * For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* + * Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_MASK; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* + * Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: ew32(PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) @@ -3704,44 +4138,61 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. + * On i217, setup Intel Rapid Start Technology. **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { - u16 phy_id1, phy_id2; s32 ret_val; - if ((hw->mac.type != e1000_pch2lan) || - hw->phy.ops.check_reset_block(hw)) + if (hw->mac.type < e1000_pch2lan) return; - ret_val = hw->phy.ops.acquire(hw); + ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) { - e_dbg("Failed to acquire PHY semaphore in resume\n"); + e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); return; } - /* Test access to the PHY registers by reading the ID regs */ - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); - if (ret_val) - goto release; - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); - if (ret_val) - goto release; - - if (hw->phy.id == ((u32)(phy_id1 << 16) | - (u32)(phy_id2 & PHY_REVISION_MASK))) - goto release; + /* + * For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; - e1000_toggle_lanphypc_value_ich8lan(hw); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } - hw->phy.ops.release(hw); - msleep(50); - e1000_phy_hw_reset(hw); - msleep(50); - return; + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* + * Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_MASK; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: - hw->phy.ops.release(hw); + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } } /** @@ -3921,7 +4372,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { - if (((er32(EECD) & E1000_EECD_PRES) == 0) && + if (!(er32(EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) { e1000e_phy_init_script_igp3(hw); } @@ -3982,6 +4433,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -4026,6 +4478,7 @@ static const struct e1000_mac_operations ich8_mac_ops = { .setup_physical_interface= e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations ich8_phy_ops = { @@ -4140,3 +4593,22 @@ const struct e1000_info e1000_pch2_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index decad98c105..026e8b3ab52 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -143,12 +143,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) /* Setup the receive address */ e_dbg("Programming MAC Address into RAR[0]\n"); - e1000e_rar_set(hw, hw->mac.addr, 0); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); for (i = 1; i < rar_count; i++) - e1000e_rar_set(hw, mac_addr, i); + hw->mac.ops.rar_set(hw, mac_addr, i); } /** @@ -215,13 +215,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ - e1000e_rar_set(hw, alt_mac_addr, 0); + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); return 0; } /** - * e1000e_rar_set - Set receive address register + * e1000e_rar_set_generic - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register @@ -229,7 +229,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) * Sets the receive address array register at index to the address passed * in by addr. **/ -void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; @@ -681,7 +681,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) return ret_val; } - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) hw->fc.requested_mode = e1000_fc_none; else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 473f8e71151..bacc950fc68 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -85,7 +85,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) /* Check that the host interface is enabled. */ hicr = er32(HICR); - if ((hicr & E1000_HICR_EN) == 0) { + if (!(hicr & E1000_HICR_EN)) { e_dbg("E1000_HOST_EN bit disabled.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 9520a6ac1f3..a4b0435b00d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION +#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -79,6 +79,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_ich10lan] = &e1000_ich10_info, [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, }; struct e1000_reg_info { @@ -110,14 +111,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Rx Registers */ {E1000_RCTL, "RCTL"}, - {E1000_RDLEN, "RDLEN"}, - {E1000_RDH, "RDH"}, - {E1000_RDT, "RDT"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, {E1000_RDTR, "RDTR"}, {E1000_RXDCTL(0), "RXDCTL"}, {E1000_ERT, "ERT"}, - {E1000_RDBAL, "RDBAL"}, - {E1000_RDBAH, "RDBAH"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, {E1000_RDFH, "RDFH"}, {E1000_RDFT, "RDFT"}, {E1000_RDFHS, "RDFHS"}, @@ -126,11 +127,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Tx Registers */ {E1000_TCTL, "TCTL"}, - {E1000_TDBAL, "TDBAL"}, - {E1000_TDBAH, "TDBAH"}, - {E1000_TDLEN, "TDLEN"}, - {E1000_TDH, "TDH"}, - {E1000_TDT, "TDT"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, {E1000_TIDV, "TIDV"}, {E1000_TXDCTL(0), "TXDCTL"}, {E1000_TADV, "TADV"}, @@ -538,43 +539,15 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, adapter->hw_csum_good++; } -/** - * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() - * @hw: pointer to the HW structure - * @tail: address of tail descriptor register - * @i: value to write to tail descriptor register - * - * When updating the tail register, the ME could be accessing Host CSR - * registers at the same time. Normally, this is handled in h/w by an - * arbiter but on some parts there is a bug that acknowledges Host accesses - * later than it should which could result in the descriptor register to - * have an incorrect value. Workaround this by checking the FWSM register - * which has bit 24 set while ME is accessing Host CSR registers, wait - * if it is set and try again a number of times. - **/ -static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, - unsigned int i) -{ - unsigned int j = 0; - - while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && - (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) - udelay(50); - - writel(i, tail); - - if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) - return E1000_ERR_SWFW_SYNC; - - return 0; -} - static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); - if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { + writel(i, rx_ring->tail); + + if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); e_err("ME firmware caused invalid RDT - resetting\n"); @@ -586,8 +559,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, tx_ring->tail); - if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { + if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); e_err("ME firmware caused invalid TDT - resetting\n"); @@ -1053,7 +1029,8 @@ static void e1000_print_hw_hang(struct work_struct *work) if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { - /* May be block on write-back, flush and detect again + /* + * May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); @@ -1108,6 +1085,10 @@ static void e1000_print_hw_hang(struct work_struct *work) phy_1000t_status, phy_ext_status, pci_status); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); } /** @@ -1645,7 +1626,10 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) adapter->flags2 &= ~FLAG2_IS_DISCARDING; writel(0, rx_ring->head); - writel(0, rx_ring->tail); + if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -2318,7 +2302,10 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_clean = 0; writel(0, tx_ring->head); - writel(0, tx_ring->tail); + if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); } /** @@ -2530,33 +2517,31 @@ err: } /** - * e1000_clean - NAPI Rx polling callback + * e1000e_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback - * @budget: amount of packets driver is allowed to process this poll + * @weight: number of packets driver is allowed to process this poll **/ -static int e1000_clean(struct napi_struct *napi, int budget) +static int e1000e_poll(struct napi_struct *napi, int weight) { - struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); struct e1000_hw *hw = &adapter->hw; struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 1, work_done = 0; adapter = netdev_priv(poll_dev); - if (adapter->msix_entries && - !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) - goto clean_rx; - - tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); + if (!adapter->msix_entries || + (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); -clean_rx: - adapter->clean_rx(adapter->rx_ring, &work_done, budget); + adapter->clean_rx(adapter->rx_ring, &work_done, weight); if (!tx_cleaned) - work_done = budget; + work_done = weight; - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { + /* If weight not fully consumed, exit the polling mode */ + if (work_done < weight) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); napi_complete(napi); @@ -2800,13 +2785,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); - ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); - ew32(TDBAH, (tdba >> 32)); - ew32(TDLEN, tdlen); - ew32(TDH, 0); - ew32(TDT, 0); - tx_ring->head = adapter->hw.hw_addr + E1000_TDH; - tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDLEN(0), tdlen); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); @@ -2879,8 +2864,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 rctl, rfctl; u32 pages = 0; - /* Workaround Si errata on 82579 - configure jumbo frame flow */ - if (hw->mac.type == e1000_pch2lan) { + /* Workaround Si errata on PCHx - configure jumbo frame flow */ + if (hw->mac.type >= e1000_pch2lan) { s32 ret_val; if (adapter->netdev->mtu > ETH_DATA_LEN) @@ -2955,6 +2940,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* Enable Extended Status in all Receive Descriptors */ rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_EXTEN; + ew32(RFCTL, rfctl); /* * 82571 and greater support packet-split where the protocol @@ -2980,13 +2966,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) if (adapter->rx_ps_pages) { u32 psrctl = 0; - /* - * disable packet split support for IPv6 extension headers, - * because some malformed IPv6 headers can hang the Rx - */ - rfctl |= (E1000_RFCTL_IPV6_EX_DIS | - E1000_RFCTL_NEW_IPV6_EXT_DIS); - /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; @@ -3025,7 +3004,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) */ } - ew32(RFCTL, rfctl); ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ adapter->flags &= ~FLAG_RX_RESTART_NOW; @@ -3110,13 +3088,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; - ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); - ew32(RDBAH, (rdba >> 32)); - ew32(RDLEN, rdlen); - ew32(RDH, 0); - ew32(RDT, 0); - rx_ring->head = adapter->hw.hw_addr + E1000_RDH; - rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rdlen); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); @@ -3229,7 +3207,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) netdev_for_each_uc_addr(ha, netdev) { if (!rar_entries) break; - e1000e_rar_set(hw, ha->addr, rar_entries--); + hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); count++; } } @@ -3510,6 +3488,7 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->refresh_time = 0x1000; break; case e1000_pch2lan: + case e1000_pch_lpt: fc->high_water = 0x05C20; fc->low_water = 0x05048; fc->pause_time = 0x0650; @@ -4038,6 +4017,7 @@ static int e1000_close(struct net_device *netdev) static int e1000_set_mac(struct net_device *netdev, void *p) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -4046,7 +4026,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); - e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { /* activate the work around */ @@ -4060,9 +4040,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p) * are dropped. Eventually the LAA will be in RAR[0] and * RAR[14] */ - e1000e_rar_set(&adapter->hw, - adapter->hw.mac.addr, - adapter->hw.mac.rar_entry_count - 1); + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); } return 0; @@ -4641,7 +4620,7 @@ link_up: * reset from the other port. Set the appropriate LAA in RAR[0] */ if (e1000e_get_laa_state_82571(hw)) - e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) e1000e_check_82574_phy_workaround(adapter); @@ -5151,6 +5130,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* if count is 0 then mapping error has occurred */ count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); if (count) { + skb_tx_timestamp(skb); + netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ @@ -5285,22 +5266,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - /* Jumbo frame workaround on 82579 requires CRC be stripped */ - if ((adapter->hw.mac.type == e1000_pch2lan) && + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((adapter->hw.mac.type >= e1000_pch2lan) && !(adapter->flags2 & FLAG2_CRC_STRIPPING) && (new_mtu > ETH_DATA_LEN)) { - e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); + e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); return -EINVAL; } - /* 82573 Errata 17 */ - if (((adapter->hw.mac.type == e1000_82573) || - (adapter->hw.mac.type == e1000_82574)) && - (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { - adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; - e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); - } - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ @@ -5694,7 +5667,7 @@ static int __e1000_resume(struct pci_dev *pdev) return err; } - if (hw->mac.type == e1000_pch2lan) + if (hw->mac.type >= e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); e1000e_power_up_phy(adapter); @@ -6226,7 +6199,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; @@ -6593,6 +6566,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 16adeb9418a..55cc1565bc2 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -166,8 +166,8 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea * * Default Value: 1 (enabled) */ -E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ - "the CRC"); +E1000_PARAM(CrcStripping, + "Enable CRC Stripping, disable if your BMC needs the CRC"); struct e1000_option { enum { enable_option, range_option, list_option } type; @@ -347,8 +347,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) /* * Make sure a message is printed for non-special - * values. And in case of an invalid option, display - * warning, use default and got through itr/itr_setting + * values. And in case of an invalid option, display + * warning, use default and go through itr/itr_setting * adjustment logic below */ if ((adapter->itr > 4) && @@ -365,7 +365,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) * Make sure a message is printed for non-special * default values */ - if (adapter->itr > 40) + if (adapter->itr > 4) e_info("%s set to default %d\n", opt.name, adapter->itr); } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 35b45578c60..0334d013bc3 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -639,6 +639,45 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) } /** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return e1e_wphy(hw, PHY_1000T_CTRL, phy_data); +} + +/** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * @@ -659,7 +698,11 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - return e1e_wphy(hw, I82577_CFG_REG, phy_data); + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); } /** @@ -718,12 +761,28 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) + if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ - if (phy->type == e1000_phy_bm) + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) @@ -879,31 +938,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) return ret_val; } - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); - if (ret_val) - return ret_val; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + ret_val = e1000_set_master_slave_mode(hw); } return ret_val; @@ -1090,7 +1125,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) * If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ - if (phy->autoneg_advertised == 0) + if (!phy->autoneg_advertised) phy->autoneg_advertised = phy->autoneg_mask; e_dbg("Reconfiguring auto-neg advertisement params\n"); @@ -1596,7 +1631,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) - phy->speed_downgraded = (phy_data & mask); + phy->speed_downgraded = !!(phy_data & mask); return ret_val; } @@ -1925,8 +1960,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->polarity_correction = (phy_data & - M88E1000_PSCR_POLARITY_REVERSAL); + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) @@ -1936,7 +1971,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = e1000_get_cable_length(hw); @@ -1999,7 +2034,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { @@ -2052,8 +2087,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; - phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) - ? false : true; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); @@ -2070,7 +2104,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; @@ -2320,6 +2354,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; default: phy_type = e1000_phy_unknown; break; @@ -2979,7 +3016,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && - ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { + !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, @@ -3265,7 +3302,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 6565c463185..97c197fd4a8 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -33,5 +33,7 @@ obj-$(CONFIG_IGB) += igb.o igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ - e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ + e1000_i210.o +igb-$(CONFIG_IGB_PTP) += igb_ptp.o diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 08bdc33715e..e6508395842 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -36,6 +36,7 @@ #include "e1000_mac.h" #include "e1000_82575.h" +#include "e1000_i210.h" static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *); @@ -52,6 +53,8 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); static s32 igb_setup_copper_link_82575(struct e1000_hw *); static s32 igb_setup_serdes_link_82575(struct e1000_hw *); static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); @@ -96,6 +99,8 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) break; case e1000_82580: case e1000_i350: + case e1000_i210: + case e1000_i211: reg = rd32(E1000_MDICNFG); ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); break; @@ -150,6 +155,17 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) case E1000_DEV_ID_I350_SGMII: mac->type = e1000_i350; break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; default: return -E1000_ERR_MAC_INIT; break; @@ -182,26 +198,44 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ - mac->rar_entry_count = E1000_RAR_ENTRIES_82575; - if (mac->type == e1000_82576) + switch (mac->type) { + case e1000_82576: mac->rar_entry_count = E1000_RAR_ENTRIES_82576; - if (mac->type == e1000_82580) + break; + case e1000_82580: mac->rar_entry_count = E1000_RAR_ENTRIES_82580; - if (mac->type == e1000_i350) + break; + case e1000_i350: + case e1000_i210: + case e1000_i211: mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } /* reset */ if (mac->type >= e1000_82580) mac->ops.reset_hw = igb_reset_hw_82580; else mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ mac->arc_subsystem_valid = (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) ? true : false; - /* enable EEE on i350 parts */ - if (mac->type == e1000_i350) + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) dev_spec->eee_disable = false; else dev_spec->eee_disable = true; @@ -213,26 +247,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) /* NVM initialization */ eecd = rd32(E1000_EECD); - - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; - break; - } - - nvm->type = e1000_nvm_eeprom_spi; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); @@ -242,6 +256,33 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd + & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd + & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else + nvm->type = e1000_nvm_flash_hw; + /* * Check for invalid size */ @@ -249,32 +290,60 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) pr_notice("The NVM size is not valid, defaulting to 32K\n"); size = 15; } - nvm->word_size = 1 << size; - if (nvm->word_size == (1 << 15)) - nvm->page_size = 128; /* NVM Function Pointers */ - nvm->ops.acquire = igb_acquire_nvm_82575; - if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; - else - nvm->ops.read = igb_read_nvm_spi; - - nvm->ops.release = igb_release_nvm_82575; switch (hw->mac.type) { case e1000_82580: nvm->ops.validate = igb_validate_nvm_checksum_82580; nvm->ops.update = igb_update_nvm_checksum_82580; + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + nvm->ops.write = igb_write_nvm_spi; break; case e1000_i350: nvm->ops.validate = igb_validate_nvm_checksum_i350; nvm->ops.update = igb_update_nvm_checksum_i350; + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + nvm->ops.write = igb_write_nvm_spi; + break; + case e1000_i210: + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + break; + case e1000_i211: + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.read = igb_read_nvm_i211; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + nvm->ops.write = NULL; break; default: nvm->ops.validate = igb_validate_nvm_checksum; nvm->ops.update = igb_update_nvm_checksum; + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + nvm->ops.write = igb_write_nvm_spi; + break; } - nvm->ops.write = igb_write_nvm_spi; /* if part supports SR-IOV then initialize mailbox parameters */ switch (mac->type) { @@ -312,9 +381,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; - } else if (hw->mac.type >= e1000_82580) { + } else if ((hw->mac.type == e1000_82580) + || (hw->mac.type == e1000_i350)) { phy->ops.read_reg = igb_read_phy_reg_82580; phy->ops.write_reg = igb_write_phy_reg_82580; + } else if (hw->phy.type >= e1000_phy_i210) { + phy->ops.read_reg = igb_read_phy_reg_gs40g; + phy->ops.write_reg = igb_write_phy_reg_gs40g; } else { phy->ops.read_reg = igb_read_phy_reg_igp; phy->ops.write_reg = igb_write_phy_reg_igp; @@ -343,6 +416,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) else phy->ops.get_cable_length = igb_get_cable_length_m88; + if (phy->id == I210_I_PHY_ID) { + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = + igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = + igb_set_d3_lplu_state_82580; + } phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; case IGP03E1000_E_PHY_ID: @@ -359,6 +440,17 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; phy->ops.get_cable_length = igb_get_cable_length_82580; phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; default: return -E1000_ERR_PHY; @@ -385,7 +477,7 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - return igb_acquire_swfw_sync_82575(hw, mask); + return hw->mac.ops.acquire_swfw_sync(hw, mask); } /** @@ -406,7 +498,7 @@ static void igb_release_phy_82575(struct e1000_hw *hw) else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; - igb_release_swfw_sync_82575(hw, mask); + hw->mac.ops.release_swfw_sync(hw, mask); } /** @@ -510,6 +602,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) break; case e1000_82580: case e1000_i350: + case e1000_i210: + case e1000_i211: mdic = rd32(E1000_MDICNFG); mdic &= E1000_MDICNFG_PHY_MASK; phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; @@ -674,6 +768,96 @@ out: } /** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + +/** * igb_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure * @@ -686,14 +870,14 @@ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) { s32 ret_val; - ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); if (ret_val) goto out; ret_val = igb_acquire_nvm(hw); if (ret_val) - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); out: return ret_val; @@ -709,7 +893,7 @@ out: static void igb_release_nvm_82575(struct e1000_hw *hw) { igb_release_nvm(hw); - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); } /** @@ -1080,7 +1264,6 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) * is no link. */ igb_clear_hw_cntrs_82575(hw); - return ret_val; } @@ -1117,6 +1300,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) } } switch (hw->phy.type) { + case e1000_phy_i210: case e1000_phy_m88: if (hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID) @@ -1757,7 +1941,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) /* Determine whether or not a global dev reset is requested */ if (global_device_reset && - igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) global_device_reset = false; if (global_device_reset && @@ -1803,7 +1987,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) /* Release semaphore */ if (global_device_reset) - igb_release_swfw_sync_82575(hw, swmbsw_mask); + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index b927d79ab53..e85c453f542 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -55,10 +55,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); #define E1000_SRRCTL_DROP_EN 0x80000000 #define E1000_SRRCTL_TIMESTAMP 0x40000000 + #define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 89eb1f85b9f..ec7e4fe3e3e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -301,6 +301,8 @@ * transactions */ #define E1000_DMACR_DMAC_LX_SHIFT 28 #define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 #define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit * Threshold */ @@ -458,6 +460,7 @@ #define E1000_ERR_INVALID_ARGUMENT 16 #define E1000_ERR_NO_SPACE 17 #define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 @@ -595,6 +598,25 @@ #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ #define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + /* Offset to data in NVM read/write registers */ #define E1000_NVM_RW_REG_DATA 16 @@ -613,6 +635,16 @@ #define NVM_CHECKSUM_REG 0x003F #define NVM_COMPATIBILITY_REG_3 0x0003 #define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ @@ -639,6 +671,7 @@ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF #define NVM_PBA_PTR_GUARD 0xFAFA #define NVM_WORD_SIZE_BASE_SHIFT 6 @@ -696,6 +729,7 @@ #define I82580_I_PHY_ID 0x015403A0 #define I350_I_PHY_ID 0x015403B0 #define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -815,6 +849,7 @@ #define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ #define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ #define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ #define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ /* SerDes Control */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index f67cbd3fa30..c2a51dcda55 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -63,6 +63,13 @@ struct e1000_hw; #define E1000_DEV_ID_I350_FIBER 0x1522 #define E1000_DEV_ID_I350_SERDES 0x1523 #define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I211_COPPER 0x1539 #define E1000_REVISION_2 2 #define E1000_REVISION_4 4 @@ -83,6 +90,8 @@ enum e1000_mac_type { e1000_82576, e1000_82580, e1000_i350, + e1000_i210, + e1000_i211, e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ }; @@ -117,6 +126,7 @@ enum e1000_phy_type { e1000_phy_igp_3, e1000_phy_ife, e1000_phy_82580, + e1000_phy_i210, }; enum e1000_bus_type { @@ -313,6 +323,9 @@ struct e1000_mac_operations { void (*rar_set)(struct e1000_hw *, u8 *, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); + }; struct e1000_phy_operations { @@ -338,6 +351,7 @@ struct e1000_nvm_operations { s32 (*write)(struct e1000_hw *, u16, u16, u16 *); s32 (*update)(struct e1000_hw *); s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); }; struct e1000_info { diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c new file mode 100644 index 00000000000..77a5f939bc7 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -0,0 +1,603 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ + +/* e1000_i210 + * e1000_i211 + */ + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "e1000_hw.h" +#include "e1000_i210.h" + +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw); +static void igb_put_hw_semaphore_i210(struct e1000_hw *hw); +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw); + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & fwmask)) + break; + + /* + * Firmware currently using resource (fwmask) + */ + igb_put_hw_semaphore_i210(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore_i210(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore_i210(hw); +} + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore_i210 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~E1000_SWSM_SWESMBI; + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + udelay(5); + } + + if (ret_val != E1000_SUCCESS) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_i211 - Read NVM wrapper function for I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_i211(hw, offset, &data[0]); + ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]); + ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]); + if (ret_val != E1000_SUCCESS) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_ID_LED_SETTINGS: + case NVM_INIT_CTRL_2: + case NVM_INIT_CTRL_4: + case NVM_LED_1_CFG: + case NVM_LED_0_2_CFG: + igb_read_invm_i211(hw, offset, data); + break; + case NVM_COMPAT: + *data = ID_LED_DEFAULT_I210; + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_i211 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == (u8)address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + hw_dbg("Flash update complete\n"); + else + hw_dbg("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h new file mode 100644 index 00000000000..5dc2bd3f50b --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -0,0 +1,76 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +extern s32 igb_update_flash_i210(struct e1000_hw *hw); +extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw); +extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw); +extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data); +extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); +extern void igb_release_nvm_i210(struct e1000_hw *hw); +extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_OFF1_OFF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index f57338afd71..819c145ac76 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -658,6 +658,7 @@ s32 igb_setup_link(struct e1000_hw *hw) ret_val = igb_set_fc_watermarks(hw); out: + return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index fa2c6ba6213..aa5fcdf3f35 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -710,4 +710,3 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw) out: return ret_val; } - diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 789de5b83aa..7be98b6f105 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -35,6 +35,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = @@ -570,6 +571,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) hw_dbg("Error committing the PHY changes\n"); goto out; } + if (phy->type == e1000_phy_i210) { + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + } out: return ret_val; @@ -1213,12 +1219,22 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) goto out; if (!link) { - if (hw->phy.type != e1000_phy_m88 || - hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) hw_dbg("Link taking longer than expected.\n"); - } else { - + else { /* * We didn't get link. * Reset the DSP and cross our fingers. @@ -1243,7 +1259,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_m88 || hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) goto out; ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); @@ -1441,6 +1458,7 @@ s32 igb_check_downshift(struct e1000_hw *hw) u16 phy_data, offset, mask; switch (phy->type) { + case e1000_phy_i210: case e1000_phy_m88: case e1000_phy_gg82563: offset = M88E1000_PHY_SPEC_STATUS; @@ -1476,7 +1494,7 @@ out: * * Polarity is determined based on the PHY specific status register. **/ -static s32 igb_check_polarity_m88(struct e1000_hw *hw) +s32 igb_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; @@ -1665,6 +1683,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) u16 phy_data, phy_data2, index, default_page, is_cm; switch (hw->phy.id) { + case I210_I_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, @@ -2129,10 +2148,16 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) void igb_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; + u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; + if (hw->phy.type == e1000_phy_i210) { + hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); + power_reg &= ~GS40G_CS_POWER_DOWN; + hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); + } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); } @@ -2146,10 +2171,18 @@ void igb_power_up_phy_copper(struct e1000_hw *hw) void igb_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; + u16 power_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; + + /* i210 Phy requires an additional bit for power up/down */ + if (hw->phy.type == e1000_phy_i210) { + hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg); + power_reg |= GS40G_CS_POWER_DOWN; + hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg); + } hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); msleep(1); } @@ -2345,3 +2378,103 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw) out: return ret_val; } + +/** + * igb_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to write to + * upper half is page to use. + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 4c32ac66ff3..34e40619f16 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -73,6 +73,9 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); /* IGP01E1000 Specific Registers */ #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ @@ -114,6 +117,13 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw); /* I82580 PHY Diagnostics Status */ #define I82580_DSTATUS_CABLE_LENGTH 0x03FC #define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ + /* Enable flexible speed on link-up */ #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ @@ -133,4 +143,16 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw); #define E1000_CABLE_LENGTH_UNDEFINED 0xFF +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 +#define GS40G_CS_POWER_DOWN 0x0002 +#define GS40G_LINE_LB 0x4000 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index ccdf36d503f..35d1e4f2c92 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -352,4 +352,18 @@ #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + #endif diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 8e33bdd33ee..ae6d3f393a5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -35,8 +35,8 @@ #include "e1000_82575.h" #include <linux/clocksource.h> -#include <linux/timecompare.h> #include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include <linux/bitops.h> #include <linux/if_vlan.h> @@ -65,10 +65,13 @@ struct igb_adapter; #define MAX_Q_VECTORS 8 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ - (hw->mac.type > e1000_82575 ? 8 : 4)) +#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \ + (hw->mac.type > e1000_82575 ? 8 : 4))) +#define IGB_MAX_RX_QUEUES_I210 4 +#define IGB_MAX_RX_QUEUES_I211 2 #define IGB_MAX_TX_QUEUES 16 - +#define IGB_MAX_TX_QUEUES_I210 4 +#define IGB_MAX_TX_QUEUES_I211 2 #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 #define IGB_MAX_VFTA_ENTRIES 128 @@ -328,9 +331,6 @@ struct igb_adapter { /* OS defined structs */ struct pci_dev *pdev; - struct cyclecounter cycles; - struct timecounter clock; - struct timecompare compare; struct hwtstamp_config hwtstamp_config; spinlock_t stats64_lock; @@ -364,6 +364,13 @@ struct igb_adapter { u32 wvbr; int node; u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + struct delayed_work overflow_work; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -378,7 +385,6 @@ struct igb_adapter { #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ #define IGB_82576_TSYNC_SHIFT 19 -#define IGB_82580_TSYNC_SHIFT 24 #define IGB_TS_HDR_LEN 16 enum e1000_state_t { __IGB_TESTING, @@ -414,7 +420,15 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); extern void igb_power_up_link(struct igb_adapter *); +#ifdef CONFIG_IGB_PTP +extern void igb_ptp_init(struct igb_adapter *adapter); +extern void igb_ptp_remove(struct igb_adapter *adapter); +extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim); + +#endif static inline s32 igb_reset_phy(struct e1000_hw *hw) { if (hw->phy.ops.reset) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e10821a0f24..812d4f963bd 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -335,7 +335,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data) static int igb_get_regs_len(struct net_device *netdev) { -#define IGB_REGS_LEN 551 +#define IGB_REGS_LEN 739 return IGB_REGS_LEN * sizeof(u32); } @@ -552,10 +552,49 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[548] = rd32(E1000_TDFT); regs_buff[549] = rd32(E1000_TDFHS); regs_buff[550] = rd32(E1000_TDFPC); - regs_buff[551] = adapter->stats.o2bgptc; - regs_buff[552] = adapter->stats.b2ospc; - regs_buff[553] = adapter->stats.o2bspc; - regs_buff[554] = adapter->stats.b2ogprc; + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type != e1000_82576) + return; + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); } static int igb_get_eeprom_len(struct net_device *netdev) @@ -624,6 +663,9 @@ static int igb_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; + if (hw->mac.type == e1000_i211) + return -EOPNOTSUPP; + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; @@ -851,6 +893,36 @@ struct igb_reg_test { #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + /* i350 reg test */ static struct igb_reg_test reg_test_i350[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1073,6 +1145,11 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) test = reg_test_i350; toggle = 0x7FEFF3FF; break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; case e1000_82580: test = reg_test_82580; toggle = 0x7FEFF3FF; @@ -1154,23 +1231,13 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) { - u16 temp; - u16 checksum = 0; - u16 i; - *data = 0; - /* Read and add up the contents of the EEPROM */ - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { - *data = 1; - break; - } - checksum += temp; - } - /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) - *data = 2; + /* Validate eeprom on all parts but i211 */ + if (adapter->hw.mac.type != e1000_i211) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } return *data; } @@ -1236,6 +1303,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) ics_mask = 0x77DCFED5; break; case e1000_i350: + case e1000_i210: + case e1000_i211: ics_mask = 0x77DCFED5; break; default: @@ -1402,23 +1471,35 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; + u16 phy_reg = 0; hw->mac.autoneg = false; - if (hw->phy.type == e1000_phy_m88) { + switch (hw->phy.type) { + case e1000_phy_m88: /* Auto-MDI/MDIX Off */ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); /* reset to update Auto-MDI/MDIX */ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); /* autoneg off */ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); - } else if (hw->phy.type == e1000_phy_82580) { + break; + case e1000_phy_82580: /* enable MII loopback */ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + break; + case e1000_phy_i210: + /* set loopback speed in PHY */ + igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), + &phy_reg); + phy_reg |= GS40G_MAC_SPEED_1G; + igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), + phy_reg); + ctrl_reg = rd32(E1000_CTRL_EXT); + default: + break; } - ctrl_reg = rd32(E1000_CTRL); - /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1431,7 +1512,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) E1000_CTRL_FD | /* Force Duplex to FULL */ E1000_CTRL_SLU); /* Set link up enable bit */ - if (hw->phy.type == e1000_phy_m88) + if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ wr32(E1000_CTRL, ctrl_reg); @@ -1439,7 +1520,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ - if (hw->phy.type == e1000_phy_m88) + if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) igb_phy_disable_receiver(adapter); udelay(500); @@ -1704,6 +1785,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) *data = 0; goto out; } + if ((adapter->hw.mac.type == e1000_i210) + || (adapter->hw.mac.type == e1000_i210)) { + dev_err(&adapter->pdev->dev, + "Loopback test not supported " + "on this part at this time.\n"); + *data = 0; + goto out; + } *data = igb_setup_desc_rings(adapter); if (*data) goto out; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8683ca4748c..dd3bfe8cd36 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -60,8 +60,8 @@ #include "igb.h" #define MAJ 3 -#define MIN 2 -#define BUILD 10 +#define MIN 4 +#define BUILD 7 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -75,6 +75,11 @@ static const struct e1000_info *igb_info_tbl[] = { }; static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, @@ -114,7 +119,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); -static void igb_init_hw_timer(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); @@ -565,33 +569,6 @@ exit: return; } - -/** - * igb_read_clock - read raw cycle counter (to be used by time counter) - */ -static cycle_t igb_read_clock(const struct cyclecounter *tc) -{ - struct igb_adapter *adapter = - container_of(tc, struct igb_adapter, cycles); - struct e1000_hw *hw = &adapter->hw; - u64 stamp = 0; - int shift = 0; - - /* - * The timestamp latches on lowest register read. For the 82580 - * the lowest register is SYSTIMR instead of SYSTIML. However we never - * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. - */ - if (hw->mac.type >= e1000_82580) { - stamp = rd32(E1000_SYSTIMR) >> 8; - shift = IGB_82580_TSYNC_SHIFT; - } - - stamp |= (u64)rd32(E1000_SYSTIML) << shift; - stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); - return stamp; -} - /** * igb_get_hw_dev - return device * used by hardware layer to print debugging information @@ -669,6 +646,8 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) case e1000_82575: case e1000_82580: case e1000_i350: + case e1000_i210: + case e1000_i211: default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@ -755,8 +734,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter) if (adapter->hw.mac.type >= e1000_82576) set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); - /* On i350, loopback VLAN packets have the tag byte-swapped. */ - if (adapter->hw.mac.type == e1000_i350) + /* + * On i350, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + * */ + if (adapter->hw.mac.type >= e1000_i350) set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); adapter->rx_ring[i] = ring; @@ -850,6 +832,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) break; case e1000_82580: case e1000_i350: + case e1000_i210: + case e1000_i211: /* * On 82580 and newer adapters the scheme is similar to 82576 * however instead of ordering column-major we have things @@ -916,6 +900,8 @@ static void igb_configure_msix(struct igb_adapter *adapter) case e1000_82576: case e1000_82580: case e1000_i350: + case e1000_i210: + case e1000_i211: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | @@ -1062,6 +1048,11 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; + /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */ + if ((adapter->hw.mac.type == e1000_i210) + || (adapter->hw.mac.type == e1000_i211)) + numvecs = 4; + /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; @@ -1069,6 +1060,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) numvecs++; adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) goto msi_only; @@ -1662,6 +1654,8 @@ void igb_reset(struct igb_adapter *adapter) pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: + case e1000_i210: + case e1000_i211: default: pba = E1000_PBA_34K; break; @@ -1746,6 +1740,13 @@ void igb_reset(struct igb_adapter *adapter) if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); + /* + * Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + igb_init_dmac(adapter, pba); if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); @@ -1850,7 +1851,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, */ if (pdev->is_virtfn) { WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); + pci_name(pdev), pdev->vendor, pdev->device); return -EINVAL; } @@ -2004,11 +2005,16 @@ static int __devinit igb_probe(struct pci_dev *pdev, * known good starting state */ hw->mac.ops.reset_hw(hw); - /* make sure the NVM is good */ - if (hw->nvm.ops.validate(hw) < 0) { - dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; + /* + * make sure the NVM is good , i211 parts have special NVM that + * doesn't contain a checksum + */ + if (hw->mac.type != e1000_i211) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } } /* copy the MAC address out of the NVM */ @@ -2113,9 +2119,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, } #endif +#ifdef CONFIG_IGB_PTP /* do hw tstamp init after resetting */ - igb_init_hw_timer(adapter); + igb_ptp_init(adapter); +#endif dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", @@ -2140,6 +2148,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->num_rx_queues, adapter->num_tx_queues); switch (hw->mac.type) { case e1000_i350: + case e1000_i210: + case e1000_i211: igb_set_eee_i350(hw); break; default: @@ -2187,7 +2197,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_PTP + igb_ptp_remove(adapter); +#endif /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2263,9 +2276,14 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) { #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; int old_vfs = igb_find_enabled_vfs(adapter); int i; + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + if (old_vfs) { dev_info(&pdev->dev, "%d pre-allocated VFs found - override " "max_vfs setting of %d\n", old_vfs, max_vfs); @@ -2277,6 +2295,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); + /* if allocation failed then we do not support SR-IOV */ if (!adapter->vf_data) { adapter->vfs_allocated_count = 0; @@ -2307,112 +2326,6 @@ out: } /** - * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp - * @adapter: board private structure to initialize - * - * igb_init_hw_timer initializes the function pointer and values for the hw - * timer found in hardware. - **/ -static void igb_init_hw_timer(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /* - * The 82580 timesync updates the system timer every 8ns by 8ns - * and the value cannot be shifted. Instead we need to shift - * the registers to generate a 64bit timer value. As a result - * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by - * 24 in order to generate a larger value for synchronization. - */ - adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; - /* disable system timer temporarily by setting bit 31 */ - wr32(E1000_TSAUXC, 0x80000000); - wrfl(); - - /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIMR, 0x00000000); - wr32(E1000_SYSTIML, 0x80000000); - wr32(E1000_SYSTIMH, 0x000000FF); - wrfl(); - - /* enable system timer by clearing bit 31 */ - wr32(E1000_TSAUXC, 0x0); - wrfl(); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82576: - /* - * Initialize hardware timer: we keep it running just in case - * that some program needs it later on. - */ - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /** - * Scale the NIC clock cycle by a large factor so that - * relatively small clock corrections can be added or - * subtracted at each clock tick. The drawbacks of a large - * factor are a) that the clock register overflows more quickly - * (not such a big deal) and b) that the increment per tick has - * to fit into 24 bits. As a result we need to use a shift of - * 19 so we can fit a value of 16 into the TIMINCA register. - */ - adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; - wr32(E1000_TIMINCA, - (1 << E1000_TIMINCA_16NS_SHIFT) | - (16 << IGB_82576_TSYNC_SHIFT)); - - /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0xFF800000); - wrfl(); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82575: - /* 82575 does not support timesync */ - default: - break; - } - -} - -/** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize * @@ -2457,11 +2370,28 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) } else adapter->vfs_allocated_count = max_vfs; break; + case e1000_i210: + case e1000_i211: + adapter->vfs_allocated_count = 0; + break; default: break; } #endif /* CONFIG_PCI_IOV */ - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); + switch (hw->mac.type) { + case e1000_i210: + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210, + num_online_cpus()); + break; + case e1000_i211: + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211, + num_online_cpus()); + break; + default: + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, + num_online_cpus()); + break; + } /* i350 cannot do RSS and SR-IOV at the same time */ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) adapter->rss_queues = 1; @@ -2491,7 +2421,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); - if (hw->mac.type == e1000_i350) + if (hw->mac.type >= e1000_i350) adapter->flags &= ~IGB_FLAG_DMAC; set_bit(__IGB_DOWN, &adapter->state); @@ -2944,6 +2874,17 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) /* Don't need to set TUOFL or IPOFL, they default to 1 */ wr32(E1000_RXCSUM, rxcsum); + /* + * Generate RSS hash based on TCP port numbers and/or + * IPv4/v6 src and dst addresses since UDP cannot be + * hashed reliably due to IP fragmentation + */ + + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even @@ -2959,25 +2900,15 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) wr32(E1000_VT_CTL, vtctl); } if (adapter->rss_queues > 1) - mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; else - mrqc = E1000_MRQC_ENABLE_VMDQ; + mrqc |= E1000_MRQC_ENABLE_VMDQ; } else { - mrqc = E1000_MRQC_ENABLE_RSS_4Q; + if (hw->mac.type != e1000_i211) + mrqc |= E1000_MRQC_ENABLE_RSS_4Q; } igb_vmm_control(adapter); - /* - * Generate RSS hash based on TCP port numbers and/or - * IPv4/v6 src and dst addresses since UDP cannot be - * hashed reliably due to IP fragmentation - */ - mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | - E1000_MRQC_RSS_FIELD_IPV4_TCP | - E1000_MRQC_RSS_FIELD_IPV6 | - E1000_MRQC_RSS_FIELD_IPV6_TCP | - E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; - wr32(E1000_MRQC, mrqc); } @@ -3579,7 +3510,7 @@ static void igb_set_rx_mode(struct net_device *netdev) * we will have issues with VLAN tag stripping not being done for frames * that are only arriving because we are the default pool */ - if (hw->mac.type < e1000_82576) + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) return; vmolr |= rd32(E1000_VMOLR(vfn)) & @@ -3676,7 +3607,7 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) bool ret = false; u32 ctrl_ext, thstat; - /* check for thermal sensor event on i350, copper only */ + /* check for thermal sensor event on i350 copper only */ if (hw->mac.type == e1000_i350) { thstat = rd32(E1000_THSTAT); ctrl_ext = rd32(E1000_CTRL_EXT); @@ -5721,35 +5652,7 @@ static int igb_poll(struct napi_struct *napi, int budget) return 0; } -/** - * igb_systim_to_hwtstamp - convert system time value to hw timestamp - * @adapter: board private structure - * @shhwtstamps: timestamp structure to update - * @regval: unsigned 64bit system time value. - * - * We need to convert the system time value stored in the RX/TXSTMP registers - * into a hwtstamp which can be used by the upper level timestamping functions - */ -static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, - struct skb_shared_hwtstamps *shhwtstamps, - u64 regval) -{ - u64 ns; - - /* - * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to - * 24 to match clock shift we setup earlier. - */ - if (adapter->hw.mac.type >= e1000_82580) - regval <<= IGB_82580_TSYNC_SHIFT; - - ns = timecounter_cyc2time(&adapter->clock, regval); - timecompare_update(&adapter->compare, ns); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); -} - +#ifdef CONFIG_IGB_PTP /** * igb_tx_hwtstamp - utility function which checks for TX time stamp * @q_vector: pointer to q_vector containing needed info @@ -5779,6 +5682,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, skb_tstamp_tx(buffer_info->skb, &shhwtstamps); } +#endif /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info @@ -5822,9 +5726,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; +#ifdef CONFIG_IGB_PTP /* retrieve hardware timestamp */ igb_tx_hwtstamp(q_vector, tx_buffer); +#endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; @@ -5996,6 +5902,7 @@ static inline void igb_rx_hash(struct igb_ring *ring, skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } +#ifdef CONFIG_IGB_PTP static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -6035,6 +5942,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } +#endif static void igb_rx_vlan(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -6145,7 +6053,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) goto next_desc; } +#ifdef CONFIG_IGB_PTP igb_rx_hwtstamp(q_vector, rx_desc, skb); +#endif igb_rx_hash(rx_ring, rx_desc, skb); igb_rx_checksum(rx_ring, rx_desc, skb); igb_rx_vlan(rx_ring, rx_desc, skb); @@ -7162,6 +7072,8 @@ static void igb_vmm_control(struct igb_adapter *adapter) switch (hw->mac.type) { case e1000_82575: + case e1000_i210: + case e1000_i211: default: /* replication is not supported for 82575 */ return; @@ -7235,6 +7147,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* watchdog timer= +-1000 usec in 32usec intervals */ reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; wr32(E1000_DMACR, reg); /* diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c new file mode 100644 index 00000000000..d5ee7fa5072 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -0,0 +1,385 @@ +/* + * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 + * + * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> + +#include "igb.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* + * The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not availible. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + */ + +#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9) +#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) +#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +/* + * SYSTIM read access for the 82576 + */ + +static cycle_t igb_82576_systim_read(const struct cyclecounter *cc) +{ + u64 val; + u32 lo, hi; + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* + * SYSTIM read access for the 82580 + */ + +static cycle_t igb_82580_systim_read(const struct cyclecounter *cc) +{ + u64 val; + u32 lo, hi, jk; + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + + /* + * The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + jk = rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* + * PTP clock operations + */ + +static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 rate; + u32 incvalue; + int neg_adj = 0; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + struct e1000_hw *hw = &igb->hw; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 14; + rate = div_u64(rate, 1953125); + + incvalue = 16 << IGB_82576_TSYNC_SHIFT; + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 rate; + u32 inca; + int neg_adj = 0; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + struct e1000_hw *hw = &igb->hw; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 26; + rate = div_u64(rate, 1953125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + now = timecounter_read(&igb->tc); + now += delta; + timecounter_init(&igb->tc, &igb->cc, now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + u64 ns; + u32 remainder; + unsigned long flags; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ns = timecounter_read(&igb->tc); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts) +{ + u64 ns; + unsigned long flags; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int ptp_82576_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int ptp_82580_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static void igb_overflow_check(struct work_struct *work) +{ + struct timespec ts; + struct igb_adapter *igb = + container_of(work, struct igb_adapter, overflow_work.work); + + igb_gettime(&igb->caps, &ts); + + pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD); +} + +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + case e1000_82580: + adapter->caps.owner = THIS_MODULE; + strcpy(adapter->caps.name, "igb-82580"); + adapter->caps.max_adj = 62499999; + adapter->caps.n_ext_ts = 0; + adapter->caps.pps = 0; + adapter->caps.adjfreq = ptp_82580_adjfreq; + adapter->caps.adjtime = igb_adjtime; + adapter->caps.gettime = igb_gettime; + adapter->caps.settime = igb_settime; + adapter->caps.enable = ptp_82580_enable; + adapter->cc.read = igb_82580_systim_read; + adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + /* Enable the timer functions by clearing bit 31. */ + wr32(E1000_TSAUXC, 0x0); + break; + + case e1000_82576: + adapter->caps.owner = THIS_MODULE; + strcpy(adapter->caps.name, "igb-82576"); + adapter->caps.max_adj = 1000000000; + adapter->caps.n_ext_ts = 0; + adapter->caps.pps = 0; + adapter->caps.adjfreq = ptp_82576_adjfreq; + adapter->caps.adjtime = igb_adjtime; + adapter->caps.gettime = igb_gettime; + adapter->caps.settime = igb_settime; + adapter->caps.enable = ptp_82576_enable; + adapter->cc.read = igb_82576_systim_read; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + + default: + adapter->ptp_clock = NULL; + return; + } + + wrfl(); + + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check); + + spin_lock_init(&adapter->tmreg_lock); + + schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD); + + adapter->ptp_clock = ptp_clock_register(&adapter->caps); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); +} + +void igb_ptp_remove(struct igb_adapter *adapter) +{ + cancel_delayed_work_sync(&adapter->overflow_work); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + } +} + +/** + * igb_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +void igb_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + switch (adapter->hw.mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + case e1000_82580: + case e1000_82576: + break; + default: + return; + } + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 8be1d1b2132..0bdf06bc5c4 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,9 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o +ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o + ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 81b15558953..3ef3c5284e5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -36,6 +36,12 @@ #include <linux/aer.h> #include <linux/if_vlan.h> +#ifdef CONFIG_IXGBE_PTP +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#endif /* CONFIG_IXGBE_PTP */ + #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_dcb.h" @@ -96,6 +102,7 @@ #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) +#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -331,6 +338,26 @@ struct ixgbe_q_vector { /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#ifdef CONFIG_IXGBE_HWMON + +#define IXGBE_HWMON_TYPE_LOC 0 +#define IXGBE_HWMON_TYPE_TEMP 1 +#define IXGBE_HWMON_TYPE_CAUTION 2 +#define IXGBE_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ixgbe_hw *hw; + struct ixgbe_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* CONFIG_IXGBE_HWMON */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register @@ -438,6 +465,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) +#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -525,6 +554,17 @@ struct ixgbe_adapter { u32 interrupt_event; u32 led_reg; +#ifdef CONFIG_IXGBE_PTP + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + unsigned long last_overflow_check; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 base_incval; + u32 cycle_speed; +#endif /* CONFIG_IXGBE_PTP */ + /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; @@ -535,6 +575,10 @@ struct ixgbe_adapter { u32 timer_event_accumulator; u32 vferr_refcount; + struct kobject *info_kobj; +#ifdef CONFIG_IXGBE_HWMON + struct hwmon_buff ixgbe_hwmon_buff; +#endif /* CONFIG_IXGBE_HWMON */ }; struct ixgbe_fdir_filter { @@ -597,6 +641,8 @@ extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, @@ -626,10 +672,15 @@ extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); extern void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB +extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); #endif extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_do_reset(struct net_device *netdev); +#ifdef CONFIG_IXGBE_HWMON +extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +#endif /* CONFIG_IXGBE_HWMON */ #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_ring *tx_ring, @@ -660,4 +711,18 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) return netdev_get_tx_queue(ring->netdev, ring->queue_index); } +#ifdef CONFIG_IXGBE_PTP +extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, + struct ifreq *ifr, int cmd); +extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +#endif /* CONFIG_IXGBE_PTP */ + #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 85d2e2c4ce4..42537336110 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -91,29 +91,6 @@ out: IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } -/** - * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count - * @hw: pointer to hardware structure - * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. - **/ -static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 msix_count; - pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, - &msix_count); - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; - - /* MSI-X count is zero-based in HW, so increment to give proper value */ - msix_count++; - - return msix_count; -} - -/** - */ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -126,7 +103,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return 0; } @@ -347,24 +324,33 @@ out: /** * ixgbe_fc_enable_82598 - Enable flow control * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) * * Enable flow control according to the current settings. **/ -static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) +static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 fctrl_reg; u32 rmcs_reg; u32 reg; + u32 fcrtl, fcrth; u32 link_speed = 0; + int i; bool link_up; -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) + /* + * Validate the water mark configuration for packet buffer 0. Zero + * water marks indicate that the packet buffer was not configured + * and the watermarks for packet buffer 0 should always be configured. + */ + if (!hw->fc.low_water || + !hw->fc.high_water[0] || + !hw->fc.pause_time) { + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; + } -#endif /* CONFIG_DCB */ /* * On 82598 having Rx FC on causes resets while doing 1G * so if it's on turn it off once we know link_speed. For @@ -386,9 +372,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) } /* Negotiate the fc mode to use */ - ret_val = ixgbe_fc_autoneg(hw); - if (ret_val == IXGBE_ERR_FLOW_CONTROL) - goto out; + ixgbe_fc_autoneg(hw); /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); @@ -405,9 +389,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif * other: Invalid. */ switch (hw->fc.current_mode) { @@ -440,11 +421,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) fctrl_reg |= IXGBE_FCTRL_RFCE; rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; @@ -457,29 +433,29 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); - /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - reg = hw->fc.low_water << 6; - if (hw->fc.send_xon) - reg |= IXGBE_FCRTL_XONE; - - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); + fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; - reg = hw->fc.high_water[packetbuf_num] << 6; - reg |= IXGBE_FCRTH_FCEN; + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + } - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); } /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); - if ((packetbuf_num & 1) == 0) - reg = (reg & 0xFFFF0000) | hw->fc.pause_time; - else - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); out: return ret_val; @@ -1300,6 +1276,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 9c14685358e..dee64d2703f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -2119,6 +2119,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 49aa41fe7b8..77ac41feb0f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -47,13 +47,6 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); @@ -64,6 +57,172 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); /** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: + return 0; + case IXGBE_DEV_ID_82599_T3_LOM: + return 0; + default: + return IXGBE_ERR_FC_NOT_SUPPORTED; + } +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + + /* + * Validate the requested mode. Strict IEEE mode does not allow + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. + */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_backplane: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, ®_cu); + break; + default: + break; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= IXGBE_PCS1GANA_ASM_PAUSE; + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_ASM_PAUSE; + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= IXGBE_TAF_ASM_PAUSE; + reg_cu &= ~IXGBE_TAF_SYM_PAUSE; + } + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE; + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw) == 0)) { + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, reg_cu); + } + + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); +out: + return ret_val; +} + +/** * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * @@ -95,7 +254,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ - ixgbe_setup_fc(hw, 0); + ixgbe_setup_fc(hw); /* Clear adapter stopped flag */ hw->adapter_stopped = false; @@ -1923,30 +2082,36 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) /** * ixgbe_fc_enable_generic - Enable flow control * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) * * Enable flow control according to the current settings. **/ -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; + int i; -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) + /* + * Validate the water mark configuration for packet buffer 0. Zero + * water marks indicate that the packet buffer was not configured + * and the watermarks for packet buffer 0 should always be configured. + */ + if (!hw->fc.low_water || + !hw->fc.high_water[0] || + !hw->fc.pause_time) { + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; + } -#endif /* CONFIG_DCB */ /* Negotiate the fc mode to use */ - ret_val = ixgbe_fc_autoneg(hw); - if (ret_val == IXGBE_ERR_FLOW_CONTROL) - goto out; + ixgbe_fc_autoneg(hw); /* Disable any previous flow control settings */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); @@ -1959,9 +2124,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif * other: Invalid. */ switch (hw->fc.current_mode) { @@ -1994,11 +2156,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) mflcn_reg |= IXGBE_MFLCN_RFCE; fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; @@ -2011,100 +2168,86 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - fcrtl = hw->fc.low_water << 10; + fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - fcrth = hw->fc.high_water[packetbuf_num] << 10; - fcrth |= IXGBE_FCRTH_FCEN; - if (hw->fc.send_xon) - fcrtl |= IXGBE_FCRTL_XONE; - } else { - /* - * If Tx flow control is disabled, set our high water mark - * to Rx FIFO size minus 32 in order prevent Tx switch - * loopback from stalling on DMA. - */ - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32; - } + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the maximum FCRTH value. This allows the Tx + * switch to function even under heavy Rx workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + } - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); - if ((packetbuf_num & 1) == 0) - reg = (reg & 0xFFFF0000) | hw->fc.pause_time; - else - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); out: return ret_val; } /** - * ixgbe_fc_autoneg - Configure flow control + * ixgbe_negotiate_fc - Negotiate flow control * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement * - * Compares our advertised flow control capabilities to those advertised by - * our link partner, and determines the proper flow control mode to use. + * Find the intersection between advertised settings and link partner's + * advertised settings **/ -s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - - if (hw->fc.disable_fc_autoneg) - goto out; - - /* - * AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - * - * Since we're being called from an LSC, link is already known to be up. - * So use link_up_wait_to_complete=false. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - ret_val = IXGBE_ERR_FLOW_CONTROL; - goto out; - } - - switch (hw->phy.media_type) { - /* Autoneg flow control on fiber adapters */ - case ixgbe_media_type_fiber: - if (speed == IXGBE_LINK_SPEED_1GB_FULL) - ret_val = ixgbe_fc_autoneg_fiber(hw); - break; - - /* Autoneg flow control on backplane adapters */ - case ixgbe_media_type_backplane: - ret_val = ixgbe_fc_autoneg_backplane(hw); - break; - - /* Autoneg flow control on copper adapters */ - case ixgbe_media_type_copper: - if (ixgbe_device_supports_autoneg_fc(hw) == 0) - ret_val = ixgbe_fc_autoneg_copper(hw); - break; - - default: - break; - } + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; -out: - if (ret_val == 0) { - hw->fc.fc_was_autonegged = true; + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); } - return ret_val; + return 0; } /** @@ -2116,7 +2259,7 @@ out: static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On multispeed fiber at 1g, bail out if @@ -2126,10 +2269,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) goto out; - } pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); @@ -2153,7 +2294,7 @@ out: static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On backplane, bail out if @@ -2161,21 +2302,13 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) * - we are 82599 and link partner is not AN enabled */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) goto out; - } if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) goto out; - } } /* * Read the 10g AN autoc and LP ability registers and resolve @@ -2217,241 +2350,63 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) } /** - * ixgbe_negotiate_fc - Negotiate flow control - * @hw: pointer to hardware structure - * @adv_reg: flow control advertised settings - * @lp_reg: link partner's flow control settings - * @adv_sym: symmetric pause bit in advertisement - * @adv_asm: asymmetric pause bit in advertisement - * @lp_sym: symmetric pause bit in link partner advertisement - * @lp_asm: asymmetric pause bit in link partner advertisement - * - * Find the intersection between advertised settings and link partner's - * advertised settings - **/ -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) -{ - if ((!(adv_reg)) || (!(lp_reg))) - return IXGBE_ERR_FC_NOT_NEGOTIATED; - - if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); - } - } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && - (lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); - } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && - !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); - } - return 0; -} - -/** - * ixgbe_setup_fc - Set up flow control + * ixgbe_fc_autoneg - Configure flow control * @hw: pointer to hardware structure * - * Called at init time to set up flow control. + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { - s32 ret_val = 0; - u32 reg = 0, reg_bp = 0; - u16 reg_cu = 0; - -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) { - hw->fc.current_mode = hw->fc.requested_mode; - goto out; - } - -#endif /* CONFIG_DCB */ - /* Validate the packetbuf configuration */ - if (packetbuf_num < 0 || packetbuf_num > 7) { - hw_dbg(hw, "Invalid packet buffer number [%d], expected range " - "is 0-7\n", packetbuf_num); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; /* - * Validate the water mark configuration. Zero water marks are invalid - * because it causes the controller to just blast out fc packets. + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + * + * Since we're being called from an LSC, link is already known to be up. + * So use link_up_wait_to_complete=false. */ - if (!hw->fc.low_water || - !hw->fc.high_water[packetbuf_num] || - !hw->fc.pause_time) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + if (hw->fc.disable_fc_autoneg) goto out; - } - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause us to fail at UNH. - */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " - "IEEE mode\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) goto out; - } - - /* - * 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* - * Set up the 1G and 10G flow control advertisement registers so the - * HW will be able to do fc autoneg once the cable is plugged in. If - * we link at 10G, the 1G advertisement is harmless and vice versa. - */ switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ case ixgbe_media_type_backplane: - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + ret_val = ixgbe_fc_autoneg_backplane(hw); break; + /* Autoneg flow control on copper adapters */ case ixgbe_media_type_copper: - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, ®_cu); + if (ixgbe_device_supports_autoneg_fc(hw) == 0) + ret_val = ixgbe_fc_autoneg_copper(hw); break; default: - ; - } - - /* - * The possible values of fc.requested_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif - * other: Invalid. - */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - reg |= (IXGBE_PCS1GANA_ASM_PAUSE); - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); - } else if (hw->phy.media_type == ixgbe_media_type_copper) { - reg_cu |= (IXGBE_TAF_ASM_PAUSE); - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); - } break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - if (hw->mac.type != ixgbe_mac_X540) { - /* - * Enable auto-negotiation between the MAC & PHY; - * the MAC will advertise clause 37 flow control. - */ - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - - /* Disable AN timeout */ - if (hw->fc.strict_ieee) - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; - - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); - } - - /* - * AUTOC restart handles negotiation of 1G and 10G on backplane - * and copper. There is no need to set the PCS1GCTL register. - * - */ - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); - } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - (ixgbe_device_supports_autoneg_fc(hw) == 0)) { - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, reg_cu); } - hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); out: - return ret_val; + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } } /** @@ -2606,7 +2561,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) break; else /* Use interrupt-safe sleep just in case */ - udelay(10); + udelay(1000); } /* For informational purposes only */ @@ -2783,17 +2738,36 @@ san_mac_addr_out: * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ -u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; - u16 msix_count; - pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, - &msix_count); + u16 msix_count = 1; + u16 max_msix_count; + u16 pcie_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return msix_count; + } + + pci_read_config_word(adapter->pdev, pcie_offset, &msix_count); msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; - /* MSI-X count is zero-based in HW, so increment to give proper value */ + /* MSI-X count is zero-based in HW */ msix_count++; + if (msix_count > max_msix_count) + msix_count = max_msix_count; + return msix_count; } @@ -3203,28 +3177,6 @@ wwn_prefix_out: } /** - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow - * control - * @hw: pointer to hardware structure - * - * There are several phys that do not support autoneg flow control. This - * function check the device id to see if the associated phy supports - * autoneg flow control. - **/ -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) -{ - - switch (hw->device_id) { - case IXGBE_DEV_ID_X540T: - return 0; - case IXGBE_DEV_ID_82599_T3_LOM: - return 0; - default: - return IXGBE_ERR_FC_NOT_SUPPORTED; - } -} - -/** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for anti-spoofing @@ -3585,3 +3537,172 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); } + +static const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA +}; +static const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, + IXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ixgbe_get_ets_data - Extracts the ETS bit data + * @hw: pointer to hardware structure + * @ets_cfg: extected ETS data + * @ets_offset: offset of ETS data + * + * Returns error code. + **/ +static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, + u16 *ets_offset) +{ + s32 status = 0; + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); + if (status) + goto out; + + if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); + if (status) + goto out; + + if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + +out: + return status; +} + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Returns the thermal sensor data structure + **/ +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + goto out; + + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) + goto out; + + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) { + status = hw->phy.ops.read_i2c_byte(hw, + ixgbe_emc_temp_data[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + if (status) + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + goto out; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + + if (sensor_location == 0) + continue; + + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; + } +out: + return status; +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 204f06235b4..6222fdb3d3f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -31,7 +31,7 @@ #include "ixgbe_type.h" #include "ixgbe.h" -u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); @@ -77,8 +77,8 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num); -s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); @@ -107,6 +107,19 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define IXGBE_EMC_DIODE3_DATA 0x2A +#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); + #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #ifndef writeq diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index d3695edfcb8..87592b458c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -191,53 +191,46 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, */ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { - u32 reg; + u32 fcrtl, reg; u8 i; - if (pfc_en) { - /* Enable Transmit Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - reg &= ~IXGBE_RMCS_TFCE_802_3X; - /* correct the reporting of our flow control status */ - reg |= IXGBE_RMCS_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg &= ~IXGBE_FCTRL_RFCE; - reg |= IXGBE_FCTRL_RPFCE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); - - /* Configure pause time */ - for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); - } + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); - /* - * Configure flow control thresholds and enable priority flow control - * for each traffic class. - */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int enabled = pfc_en & (1 << i); + if (pfc_en) + reg |= IXGBE_FCTRL_RPFCE; - reg = hw->fc.low_water << 10; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); - if (enabled == pfc_enabled_tx || - enabled == pfc_enabled_full) - reg |= IXGBE_FCRTL_XONE; + fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + continue; + } + + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); + /* Configure pause time */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - reg = hw->fc.high_water[i] << 10; - if (enabled == pfc_enabled_tx || - enabled == pfc_enabled_full) - reg |= IXGBE_FCRTH_FCEN; + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); - } return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 888a419dc3d..4eac80d0185 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -211,24 +211,42 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, */ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { - u32 i, j, reg; + u32 i, j, fcrtl, reg; u8 max_tc = 0; - for (i = 0; i < MAX_USER_PRIORITY; i++) + /* Enable Transmit Priority Flow Control */ + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg |= IXGBE_MFLCN_DPF; + + /* + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be + * enabled. + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + if (hw->mac.type == ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) + reg |= IXGBE_MFLCN_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + for (i = 0; i < MAX_USER_PRIORITY; i++) { if (prio_tc[i] > max_tc) max_tc = prio_tc[i]; + } + + fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE; /* Configure PFC Tx thresholds per TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i <= max_tc; i++) { int enabled = 0; - if (i > max_tc) { - reg = 0; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); - continue; - } - for (j = 0; j < MAX_USER_PRIORITY; j++) { if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { enabled = 1; @@ -236,61 +254,29 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) } } - reg = hw->fc.low_water << 10; - - if (enabled) - reg |= IXGBE_FCRTL_XONE; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } - reg = hw->fc.high_water[i] << 10; - if (enabled) - reg |= IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } - if (pfc_en) { - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - - reg = IXGBE_FCCFG_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); - /* - * Enable Receive PFC - * 82599 will always honor XOFF frames we receive when - * we are in PFC mode however X540 only honors enabled - * traffic classes. - */ - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg &= ~IXGBE_MFLCN_RFCE; - reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; - - if (hw->mac.type == ixgbe_mac_X540) { - reg &= ~IXGBE_MFLCN_RPFCE_MASK; - reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; - } + for (; i < MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - - } else { - /* X540 devices have a RX bit that should be cleared - * if PFC is disabled on all TCs but PFC features is - * enabled. - */ - if (hw->mac.type == ixgbe_mac_X540) { - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg &= ~IXGBE_MFLCN_RPFCE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - } + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - } + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 32e5c02ff6d..5164a21b13c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -338,6 +338,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; int i; @@ -350,32 +352,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (!adapter->dcb_set_bitmap) return ret; - if (adapter->dcb_cfg.pfc_mode_enable) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) - adapter->last_lfc_mode = - adapter->hw.fc.current_mode; - break; - default: - break; - } - adapter->hw.fc.requested_mode = ixgbe_fc_pfc; - } else { - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - adapter->hw.fc.requested_mode = ixgbe_fc_none; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - break; - default: - break; - } - } - if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; @@ -388,23 +364,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif - ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, - max_frame, DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, - max_frame, DCB_RX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_RX_CONFIG); - ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, - DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); - ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, - DCB_TX_CONFIG, bwg_id); - ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, - DCB_TX_CONFIG, prio_type); - ixgbe_dcb_unpack_map(&adapter->dcb_cfg, - DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_cfg, max); + ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); + ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); - ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, prio_type, prio_tc); + ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, + prio_type, prio_tc); for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(netdev, i, prio_tc[i]); @@ -413,19 +385,21 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } if (adapter->dcb_set_bitmap & BIT_PFC) { - u8 pfc_en; - u8 prio_tc[MAX_USER_PRIORITY]; + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; + u8 prio_tc[MAX_USER_PRIORITY]; + + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); + ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); + } else { + hw->mac.ops.fc_enable(hw); + } - ixgbe_dcb_unpack_map(&adapter->dcb_cfg, - DCB_TX_CONFIG, prio_tc); - ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); - ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc); - if (ret != DCB_HW_CHG_RST) - ret = DCB_HW_CHG; - } + ixgbe_set_rx_drop_en(adapter); - if (adapter->dcb_cfg.pfc_mode_enable) - adapter->hw.fc.current_mode = ixgbe_fc_pfc; + ret = DCB_HW_CHG; + } #ifdef IXGBE_FCOE /* Reprogam FCoE hardware offloads when the traffic class @@ -647,7 +621,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; u8 *prio_tc; + int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; @@ -661,7 +637,16 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, prio_tc = adapter->ixgbe_ieee_ets->prio_tc; memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); - return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); + + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) + err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); + else + err = hw->mac.ops.fc_enable(hw); + + ixgbe_set_rx_drop_en(adapter); + + return err; } static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index cfe7d269590..3178f1ec371 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -391,11 +391,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, } else if (hw->fc.current_mode == ixgbe_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; -#ifdef CONFIG_DCB - } else if (hw->fc.current_mode == ixgbe_fc_pfc) { - pause->rx_pause = 0; - pause->tx_pause = 0; -#endif } } @@ -404,21 +399,14 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fc_info fc; + struct ixgbe_fc_info fc = hw->fc; -#ifdef CONFIG_DCB - if (adapter->dcb_cfg.pfc_mode_enable || - ((hw->mac.type == ixgbe_mac_82598EB) && - (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) + /* 82598 does no support link flow control with DCB enabled */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; -#endif - fc = hw->fc; - - if (pause->autoneg != AUTONEG_ENABLE) - fc.disable_fc_autoneg = true; - else - fc.disable_fc_autoneg = false; + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) fc.requested_mode = ixgbe_fc_full; @@ -426,14 +414,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, fc.requested_mode = ixgbe_fc_rx_pause; else if (!pause->rx_pause && pause->tx_pause) fc.requested_mode = ixgbe_fc_tx_pause; - else if (!pause->rx_pause && !pause->tx_pause) - fc.requested_mode = ixgbe_fc_none; else - return -EINVAL; - -#ifdef CONFIG_DCB - adapter->last_lfc_mode = fc.requested_mode; -#endif + fc.requested_mode = ixgbe_fc_none; /* if the thing changed then we'll update and use new autoneg */ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { @@ -1971,53 +1953,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, struct ethtool_wolinfo *wol) { struct ixgbe_hw *hw = &adapter->hw; - int retval = 1; - u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - - /* WOL not supported except for the following */ - switch(hw->device_id) { - case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevices could supports WOL */ - switch (hw->subsystem_device_id) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) { - wol->supported = 0; - break; - } - case IXGBE_SUBDEV_ID_82599_SFP: - retval = 0; - break; - default: - wol->supported = 0; - break; - } - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (hw->subsystem_device_id == - IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { - wol->supported = 0; - break; - } - retval = 0; - break; - case IXGBE_DEV_ID_82599_KX4: - retval = 0; - break; - case IXGBE_DEV_ID_X540T: - /* check eeprom to see if enabled wol */ - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) { - retval = 0; - break; - } + int retval = 0; - /* All others not supported */ - wol->supported = 0; - break; - default: + /* WOL not supported for all devices */ + if (!ixgbe_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; wol->supported = 0; } @@ -2755,6 +2696,46 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int ixgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + switch (adapter->hw.mac.type) { +#ifdef CONFIG_IXGBE_PTP + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_SOME); + break; +#endif /* CONFIG_IXGBE_PTP */ + default: + return ethtool_op_get_ts_info(dev, info); + break; + } + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2783,6 +2764,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_ts_info = ixgbe_get_ts_info, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ed1b47dc083..af1a5314b49 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -523,11 +523,17 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, /** * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, +static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + int v_count, int v_idx, int txr_count, int txr_idx, int rxr_count, int rxr_idx) { @@ -598,7 +604,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, /* update count and index */ txr_count--; - txr_idx++; + txr_idx += v_count; /* push pointer to next ring */ ring++; @@ -641,7 +647,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, /* update count and index */ rxr_count--; - rxr_idx++; + rxr_idx += v_count; /* push pointer to next ring */ ring++; @@ -700,24 +706,23 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) q_vectors = 1; if (q_vectors >= (rxr_remaining + txr_remaining)) { - for (; rxr_remaining; v_idx++, q_vectors--) { - int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); - err = ixgbe_alloc_q_vector(adapter, v_idx, - 0, 0, rqpv, rxr_idx); + for (; rxr_remaining; v_idx++) { + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); if (err) goto err_out; /* update counts and index */ - rxr_remaining -= rqpv; - rxr_idx += rqpv; + rxr_remaining--; + rxr_idx++; } } - for (; q_vectors; v_idx++, q_vectors--) { - int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); - int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); - err = ixgbe_alloc_q_vector(adapter, v_idx, + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, rqpv, rxr_idx); @@ -726,9 +731,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) /* update counts and index */ rxr_remaining -= rqpv; - rxr_idx += rqpv; txr_remaining -= tqpv; - txr_idx += tqpv; + rxr_idx++; + txr_idx++; } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 467948e9ecd..bf20457ea23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif #define MAJ 3 -#define MIN 8 -#define BUILD 21 +#define MIN 9 +#define BUILD 15 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" const char ixgbe_driver_version[] = DRV_VERSION; @@ -133,7 +133,7 @@ static struct notifier_block dca_notifier = { static unsigned int max_vfs; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, - "Maximum number of virtual functions to allocate per physical function"); + "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63"); #endif /* CONFIG_PCI_IOV */ static unsigned int allow_unsupported_sfp; @@ -610,35 +610,50 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, /* tx_buffer must be completely set up in the transmit path */ } -static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; - u32 data = 0; - u32 xoff[8] = {0}; int i; + u32 data; - if ((hw->fc.current_mode == ixgbe_fc_full) || - (hw->fc.current_mode == ixgbe_fc_rx_pause)) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - break; - default: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - } - hwstats->lxoffrxc += data; + if ((hw->fc.current_mode != ixgbe_fc_full) && + (hw->fc.current_mode != ixgbe_fc_rx_pause)) + return; - /* refill credits (no tx hang) if we received xoff */ - if (!data) - return; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + break; + default: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; - for (i = 0; i < adapter->num_tx_queues; i++) - clear_bit(__IXGBE_HANG_CHECK_ARMED, - &adapter->tx_ring[i]->state); + /* refill credits (no tx hang) if we received xoff */ + if (!data) return; - } else if (!(adapter->dcb_cfg.pfc_mode_enable)) + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + ixgbe_update_xoff_rx_lfc(adapter); return; + } /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { @@ -774,6 +789,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; +#ifdef CONFIG_IXGBE_PTP + if (unlikely(tx_buffer->tx_flags & + IXGBE_TX_FLAGS_TSTAMP)) + ixgbe_ptp_tx_hwtstamp(q_vector, + tx_buffer->skb); + +#endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); @@ -1144,7 +1166,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - put_page(page); + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); bi->page = NULL; rx_ring->rx_stats.alloc_rx_page_failed++; @@ -1374,6 +1396,11 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); +#ifdef CONFIG_IXGBE_PTP + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); +#endif + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); __vlan_hwaccel_put_tag(skb, vid); @@ -2295,6 +2322,9 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_check_pps_event(adapter, eicr); +#endif /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2487,6 +2517,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_check_pps_event(adapter, eicr); +#endif /* would disable interrupts here but EIAM disabled it */ napi_schedule(&q_vector->napi); @@ -2756,6 +2789,61 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); } +static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl |= IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl &= ~IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +#ifdef CONFIG_IXGBE_DCB +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#else +static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#endif +{ + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, @@ -2902,33 +2990,6 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } -/** - * ixgbe_set_uta - Set unicast filter table address - * @adapter: board private structure - * - * The unicast table address is a register array of 32-bit registers. - * The table is meant to be used in a way similar to how the MTA is used - * however due to certain limitations in the hardware it is necessary to - * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous - * enable bit to allow vlan tag stripping when promiscuous mode is enabled - **/ -static void ixgbe_set_uta(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - /* The UTA table only exists on 82599 hardware and newer */ - if (hw->mac.type < ixgbe_mac_82599EB) - return; - - /* we only need to do this if VMDq is enabled */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; - - for (i = 0; i < 128; i++) - IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); -} - #define IXGBE_MAX_RX_DESC_POLL 10 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) @@ -3214,8 +3275,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) /* Program registers for the distribution of queues */ ixgbe_setup_mrqc(adapter); - ixgbe_set_uta(adapter); - /* set_rx_buffer_len must be called before ring initialization */ ixgbe_set_rx_buffer_len(adapter); @@ -3452,16 +3511,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; - /* - * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = ixgbe_write_uc_addr_list(netdev); - if (count < 0) { - fctrl |= IXGBE_FCTRL_UPE; - vmolr |= IXGBE_VMOLR_ROPE; - } + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ixgbe_write_uc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; } if (adapter->num_vfs) { @@ -4128,7 +4188,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) DMA_FROM_DEVICE); rx_buffer->dma = 0; if (rx_buffer->page) - put_page(rx_buffer->page); + __free_pages(rx_buffer->page, + ixgbe_rx_pg_order(rx_ring)); rx_buffer->page = NULL; } @@ -4426,9 +4487,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ -#ifdef CONFIG_DCB - adapter->last_lfc_mode = hw->fc.current_mode; -#endif ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; @@ -4993,9 +5051,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { u64 rsc_count = 0; u64 rsc_flush = 0; - for (i = 0; i < 16; i++) - adapter->hw_rx_no_dma_resources += - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); for (i = 0; i < adapter->num_rx_queues; i++) { rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; @@ -5098,6 +5153,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); case ixgbe_mac_82599EB: + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); @@ -5275,7 +5333,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; - int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) return; @@ -5287,13 +5345,13 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_speed = IXGBE_LINK_SPEED_10GB_FULL; link_up = true; } - if (link_up) { - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - } else { - hw->mac.ops.fc_enable(hw, 0); - } + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + hw->mac.ops.fc_enable(hw); + ixgbe_set_rx_drop_en(adapter); } if (link_up || @@ -5347,6 +5405,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) flow_rx = false; break; } + +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_start_cyclecounter(adapter); +#endif + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? "10 Gbps" : @@ -5384,6 +5447,10 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_start_cyclecounter(adapter); +#endif + e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); } @@ -5683,6 +5750,9 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_overflow_check(adapter); +#endif ixgbe_service_event_complete(adapter); } @@ -5833,6 +5903,11 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); +#ifdef CONFIG_IXGBE_PTP + if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); +#endif + /* set segmentation enable bits for TSO/FSO */ #ifdef IXGBE_FCOE if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) @@ -6223,6 +6298,15 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } + skb_tx_timestamp(skb); + +#ifdef CONFIG_IXGBE_PTP + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + } +#endif + #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA @@ -6375,7 +6459,14 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + switch (cmd) { +#ifdef CONFIG_IXGBE_PTP + case SIOCSHWTSTAMP: + return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); +#endif + default: + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + } } /** @@ -6567,15 +6658,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (tc) { netdev_set_num_tc(dev, tc); - adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->last_lfc_mode = adapter->hw.fc.requested_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; + } } else { netdev_reset_tc(dev); - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -6624,7 +6717,7 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, /* Turn off LRO if not RSC capable */ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; - + return features; } @@ -6683,6 +6776,74 @@ static int ixgbe_set_features(struct net_device *netdev, return 0; } +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err = -EOPNOTSUPP; + + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", + ixgbe_driver_name); + return -EINVAL; + } + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (is_unicast_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + } + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err = -EOPNOTSUPP; + + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", + ixgbe_driver_name); + return -EINVAL; + } + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + } + + return err; +} + +static int ixgbe_ndo_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + + return idx; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -6719,6 +6880,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { #endif /* IXGBE_FCOE */ .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, + .ndo_fdb_add = ixgbe_ndo_fdb_add, + .ndo_fdb_del = ixgbe_ndo_fdb_del, + .ndo_fdb_dump = ixgbe_ndo_fdb_dump, }; static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, @@ -6733,14 +6897,66 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, /* The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. */ - adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; + adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; ixgbe_enable_sriov(adapter, ii); #endif /* CONFIG_PCI_IOV */ } /** + * ixgbe_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; + int is_wol_supported = 0; + + switch (device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only these subdevices could supports WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599_560FLR: + /* only support first port */ + if (hw->bus.func != 0) + break; + case IXGBE_SUBDEV_ID_82599_SFP: + is_wol_supported = 1; + break; + } + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_82599_KX4: + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_X540T: + /* check eeprom to see if enabled wol */ + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) { + is_wol_supported = 1; + } + break; + } + + return is_wol_supported; +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -6766,7 +6982,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, u16 device_caps; #endif u32 eec; - u16 wol_cap; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. @@ -7030,42 +7245,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->features &= ~NETIF_F_RXHASH; } - /* WOL not supported for all but the following */ + /* WOL not supported for all devices */ adapter->wol = 0; - switch (pdev->device) { - case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevice supports WOL */ - switch (pdev->subsystem_device) { - case IXGBE_SUBDEV_ID_82599_560FLR: - /* only support first port */ - if (hw->bus.func != 0) - break; - case IXGBE_SUBDEV_ID_82599_SFP: - adapter->wol = IXGBE_WUFC_MAG; - break; - } - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_82599_KX4: + hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); + if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_X540T: - /* Check eeprom to see if it is enabled */ - hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); - wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) - adapter->wol = IXGBE_WUFC_MAG; - break; - } device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_init(adapter); +#endif /* CONFIG_IXGBE_PTP*/ + /* save off EEPROM version number */ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); @@ -7152,6 +7343,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("%s\n", ixgbe_default_device_descr); cards_found++; + +#ifdef CONFIG_IXGBE_HWMON + if (ixgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#endif /* CONFIG_IXGBE_HWMON */ + return 0; err_register: @@ -7190,6 +7387,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); +#ifdef CONFIG_IXGBE_PTP + ixgbe_ptp_stop(adapter); +#endif + #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; @@ -7198,6 +7399,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) } #endif +#ifdef CONFIG_IXGBE_HWMON + ixgbe_sysfs_exit(adapter); +#endif /* CONFIG_IXGBE_HWMON */ + #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ixgbe_cleanup_fcoe(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index bf9f82f4b1a..24117709d6a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1582,13 +1582,21 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl |= IXGBE_I2C_CLK_OUT; - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); - IXGBE_WRITE_FLUSH(hw); + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; - /* SCL rise time (1000ns) */ - udelay(IXGBE_I2C_T_RISE); + for (i = 0; i < timeout; i++) { + *i2cctl |= IXGBE_I2C_CLK_OUT; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + udelay(IXGBE_I2C_T_RISE); + + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + if (i2cctl_r & IXGBE_I2C_CLK_IN) + break; + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c new file mode 100644 index 00000000000..ddc6a4d1930 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -0,0 +1,900 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ +#include "ixgbe.h" +#include <linux/export.h> + +/* + * The 82599 and the X540 do not have true 64bit nanosecond scale + * counter registers. Instead, SYSTIME is defined by a fixed point + * system which allows the user to define the scale counter increment + * value at every level change of the oscillator driving the SYSTIME + * value. For both devices the TIMINCA:IV field defines this + * increment. On the X540 device, 31 bits are provided. However on the + * 82599 only provides 24 bits. The time unit is determined by the + * clock frequency of the oscillator in combination with the TIMINCA + * register. When these devices link at 10Gb the oscillator has a + * period of 6.4ns. In order to convert the scale counter into + * nanoseconds the cyclecounter and timecounter structures are + * used. The SYSTIME registers need to be converted to ns values by use + * of only a right shift (division by power of 2). The following math + * determines the largest incvalue that will fit into the available + * bits in the TIMINCA register. + * + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator + * round(): discard the fractional portion of the calculation + * + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * + * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns + * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns + * + * The period also changes based on the link speed: + * At 10Gb link or no link, the period remains the same. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * These diagrams are only for the 10Gb link period + * + * SYSTIMEH SYSTIMEL + * +--------------+ +--------------+ + * X540 | 32 | | 1 | 3 | 28 | + * *--------------+ +--------------+ + * \________ 36 bits ______/ fract + * + * +--------------+ +--------------+ + * 82599 | 32 | | 8 | 3 | 21 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 36 bit X540 SYSTIME overflows every + * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds + * + * The 43 bit 82599 SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 + +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 + +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 +#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL + +#define IXGBE_OVERFLOW_PERIOD (HZ * 30) + +#ifndef NSECS_PER_SEC +#define NSECS_PER_SEC 1000000000ULL +#endif + +/** + * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) + * @cc - the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) +{ + struct ixgbe_adapter *adapter = + container_of(cc, struct ixgbe_adapter, cc); + struct ixgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + + return stamp; +} + +/** + * ixgbe_ptp_adjfreq + * @ptp - the ptp clock structure + * @ppb - parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + u64 freq; + u32 diff, incval; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = ACCESS_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + + switch (hw->mac.type) { + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_ptp_adjtime + * @ptp - the ptp clock structure + * @delta - offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; + u64 now; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + now = timecounter_read(&adapter->tc); + now += delta; + + /* reset the timecounter */ + timecounter_init(&adapter->tc, + &adapter->cc, + now); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + return 0; +} + +/** + * ixgbe_ptp_gettime + * @ptp - the ptp clock structure + * @ts - timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; + u32 remainder; + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * ixgbe_ptp_settime + * @ptp - the ptp clock structure + * @ts - the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ixgbe_ptp_enable + * @ptp - the ptp clock structure + * @rq - the requested feature to change + * @on - whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (rq->type == PTP_CLK_REQ_PPS) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= + ~IXGBE_FLAG2_PTP_PPS_ENABLED; + return 0; + default: + break; + } + } + + return -ENOTSUPP; +} + +/** + * ixgbe_ptp_check_pps_event + * @adapter - the private adapter structure + * @eicr - the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* Make sure ptp clock is valid, and PPS event enabled */ + if (!adapter->ptp_clock || + !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; + + switch (hw->mac.type) { + case ixgbe_mac_X540: + if (eicr & IXGBE_EICR_TIMESYNC) + ptp_clock_event(adapter->ptp_clock, &event); + break; + default: + break; + } +} + +/** + * ixgbe_ptp_enable_sdp + * @hw - the hardware private structure + * @shift - the clock shift for calculating nanoseconds + * + * this function enables the clock out feature on the sdp0 for the + * X540 device. It will create a 1second periodic output that can be + * used as the PPS (via an interrupt). + * + * It calculates when the systime will be on an exact second, and then + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift) +{ + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh; + u64 clock_edge = 0; + u32 rem; + + switch (hw->mac.type) { + case ixgbe_mac_X540: + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* + * enable the SDP0 pin as output, and connected to the native + * function for Timesync (ClockOut) + */ + esdp |= (IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE); + + /* + * enable the Clock Out feature on SDP0, and allow interrupts + * to occur when the pin changes + */ + tsauxc = (IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT); + + /* clock period (or pulse length) */ + clktiml = (u32)(NSECS_PER_SEC << shift); + clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); + + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + + /* + * account for the fact that we can't do u64 division + * with remainder, by converting the clock values into + * nanoseconds first + */ + clock_edge >>= shift; + div_u64_rem(clock_edge, NSECS_PER_SEC, &rem); + clock_edge += (NSECS_PER_SEC - rem); + clock_edge <<= shift; + + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); + + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC); + break; + default: + break; + } +} + +/** + * ixgbe_ptp_disable_sdp + * @hw - the private hardware structure + * + * this function disables the auxiliary SDP clock out feature + */ +static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw) +{ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0); +} + +/** + * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow + * @work: structure containing information about this work task + * + * this work function is scheduled to continue reading the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute when no + * PTP activity is occurring. + */ +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) +{ + unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; + struct timespec ts; + + if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && + (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { + ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0, ns; + u32 tsynctxctl; + unsigned long flags; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; + + /* + * if TX timestamp is not valid, exit after clearing the + * timestamp registers + */ + if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID)) + return; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, regval); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); +} + +/** + * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct skb_shared_hwtstamps *shhwtstamps; + u64 regval = 0, ns; + u32 tsyncrxctl; + unsigned long flags; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a skb_shared_tx that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + return; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, regval); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * @cmd: particular ioctl requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, + struct ifreq *ifr, int cmd) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hwtstamp_config config; + u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; + u32 tsync_rx_mtrl = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* + * register RXMTRL must be set, therefore it is not + * possible to time stamp both V1 Sync and Delay_Req messages + * and hardware does not support timestamping all packets + * => return error + */ + return -ERANGE; + } + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -ERANGE; + return 0; + } + + /* define ethertype filter for timestamped packets */ + if (is_l2) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), + (IXGBE_ETQF_FILTER_EN | /* enable filter */ + IXGBE_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); + +#define PTP_PORT 319 + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */ + | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */ + | IXGBE_FTQF_QUEUE_ENABLE); + + ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */ + & IXGBE_FTQF_DEST_PORT_MASK /* dest check */ + & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */ + << IXGBE_FTQF_5TUPLE_MASK_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3), + (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 | + IXGBE_IMIR_SIZE_BP_82599)); + + /* enable port check */ + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3), + (htons(PTP_PORT) | + htons(PTP_PORT) << 16)); + + IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf); + + tsync_rx_mtrl |= PTP_PORT << 16; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0); + } + + /* enable/disable TX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + regval &= ~IXGBE_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); + + IXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter - pointer to the adapter structure + * + * this function initializes the timecounter and cyclecounter + * structures for use in generated a ns counter from the arbitrary + * fixed point cycles registers in the hardware. + * + * A change in link speed impacts the frequency of the DMA clock on + * the device, which is used to generate the cycle counter + * registers. Therefor this function is called whenever the link speed + * changes. + * + * This function also turns on the SDP pin for clock out feature (X540 + * only), because this is where the shift is first calculated. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 incval = 0; + u32 shift = 0; + u32 cycle_speed; + unsigned long flags; + + /** + * Determine what speed we need to set the cyclecounter + * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat + * unknown speeds as 10Gb. (Hence why we can't just copy the + * link_speed. + */ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + case IXGBE_LINK_SPEED_1GB_FULL: + case IXGBE_LINK_SPEED_10GB_FULL: + cycle_speed = adapter->link_speed; + break; + default: + /* cycle speed should be 10Gb when there is no link */ + cycle_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + } + + /* Bail if the cycle speed didn't change */ + if (adapter->cycle_speed == cycle_speed) + return; + + /* disable the SDP clock out */ + ixgbe_ptp_disable_sdp(hw); + + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + */ + switch (cycle_speed) { + case IXGBE_LINK_SPEED_100_FULL: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + /** + * Modify the calculated values to fit within the correct + * number of bits specified by the hardware. The 82599 doesn't + * have the same space as the X540, so bitshift the calculated + * values to fit. + */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); + break; + default: + /* other devices aren't supported */ + return; + } + + /* reset the system time registers */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); + IXGBE_WRITE_FLUSH(hw); + + /* now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature*/ + ixgbe_ptp_enable_sdp(hw, shift); + + /* store the new cycle speed */ + adapter->cycle_speed = cycle_speed; + + ACCESS_ONCE(adapter->base_incval) = incval; + smp_mb(); + + /* grab the ptp lock */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + memset(&adapter->cc, 0, sizeof(adapter->cc)); + adapter->cc.read = ixgbe_ptp_read; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.shift = shift; + adapter->cc.mult = 1; + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ixgbe_ptp_init + * @adapter - the ixgbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ixgbe_ptp_init(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime = ixgbe_ptp_gettime; + adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_enable; + break; + case ixgbe_mac_82599EB: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime = ixgbe_ptp_gettime; + adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_enable; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + + ixgbe_ptp_start_cyclecounter(adapter); + + /* (Re)start the overflow check */ + adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + return; +} + +/** + * ixgbe_ptp_stop - disable ptp device and stop the overflow check + * @adapter: pointer to adapter struct + * + * this function stops the ptp support, and cancels the delayed work. + */ +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) +{ + ixgbe_ptp_disable_sdp(&adapter->hw); + + /* stop the overflow check task */ + adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 88a58cb0856..2d971d18696 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -544,13 +544,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); - if (retval) + if (retval) { pr_err("Error receiving message from VF\n"); + return retval; + } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) return retval; + /* flush the ack before we write any messages back */ + IXGBE_WRITE_FLUSH(hw); + /* * until the vf completes a virtual function reset it should not be * allowed to start any configuration. @@ -637,6 +642,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_SET_MACVLAN: index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, "VF %d requested MACVLAN filter but is " + "administratively denied\n", vf); + retval = -1; + break; + } /* * If the VF is allowed to set MAC filters then turn off * anti-spoofing to avoid false positives. An index diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c new file mode 100644 index 00000000000..1d80b1cefa6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -0,0 +1,245 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_type.h" + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/sysfs.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/hwmon.h> + +#ifdef CONFIG_IXGBE_HWMON +/* hwmon callback functions */ +static ssize_t ixgbe_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + ixgbe_attr->sensor->location); +} + +static ssize_t ixgbe_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw); + + value = ixgbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* + * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, + unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *ixgbe_attr; + + n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; + ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IXGBE_HWMON_TYPE_LOC: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_label", offset); + break; + case IXGBE_HWMON_TYPE_TEMP: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_input", offset); + break; + case IXGBE_HWMON_TYPE_CAUTION: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_max", offset); + break; + case IXGBE_HWMON_TYPE_MAX: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_crit", offset); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ixgbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + ixgbe_attr->hw = &adapter->hw; + ixgbe_attr->dev_attr.store = NULL; + ixgbe_attr->dev_attr.attr.mode = S_IRUGO; + ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; + + rc = device_create_file(&adapter->pdev->dev, + &ixgbe_attr->dev_attr); + + if (rc == 0) + ++adapter->ixgbe_hwmon_buff.n_hwmon; + + return rc; +} + +static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) +{ + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(&adapter->pdev->dev, + &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ixgbe_hwmon_buff.hwmon_list); + + if (adapter->ixgbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); +} + +/* called from ixgbe_main.c */ +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) +{ + ixgbe_sysfs_del_adapter(adapter); +} + +/* called from ixgbe_main.c */ +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) +{ + struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; + unsigned int i; + int n_attrs; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { + goto exit; + } + + /* Don't create thermal hwmon interface if no sensors present */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) + goto exit; + + /* + * Allocation space for max attributs + * max num sensors * values (loc, temp, max, caution) + */ + n_attrs = IXGBE_MAX_SENSORS * 4; + ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ixgbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev); + if (IS_ERR(ixgbe_hwmon->device)) { + rc = PTR_ERR(ixgbe_hwmon->device); + goto err; + } + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + /* + * Only create hwmon sysfs entries for sensors that have + * meaningful data for. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); + if (rc) + goto err; + } + + goto exit; + +err: + ixgbe_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* CONFIG_IXGBE_HWMON */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 8636e8344fc..204848d2448 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -110,6 +110,28 @@ #define IXGBE_I2C_CLK_OUT 0x00000002 #define IXGBE_I2C_DATA_IN 0x00000004 #define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A + +#define IXGBE_MAX_SENSORS 3 + +struct ixgbe_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; +}; /* Interrupt Registers */ #define IXGBE_EICR 0x00800 @@ -802,6 +824,8 @@ #define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ #define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ #define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ #define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ #define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ #define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ @@ -1287,6 +1311,7 @@ enum { #define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ @@ -1304,6 +1329,7 @@ enum { #define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1322,6 +1348,7 @@ enum { #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1339,6 +1366,7 @@ enum { #define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ #define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ @@ -1479,8 +1507,10 @@ enum { #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ #define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ #define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ /* LEDCTL Bit Masks */ #define IXGBE_LED_IVRT_BASE 0x00000040 @@ -1677,11 +1707,29 @@ enum { #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 #define IXGBE_FREE_SPACE_PTR 0X3E + +/* External Thermal Sensor Config */ +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + #define IXGBE_SAN_MAC_ADDR_PTR 0x28 #define IXGBE_DEVICE_CAPS 0x2C #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 /* MSI-X capability fields masks */ #define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF @@ -1839,6 +1887,40 @@ enum { #define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ #define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ @@ -1852,7 +1934,7 @@ enum { #define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ #define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ #define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ -#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ #define IXGBE_MFLCN_RPFCE_SHIFT 4 @@ -1968,6 +2050,7 @@ enum { #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -2245,6 +2328,7 @@ struct ixgbe_adv_tx_context_desc { /* Adv Transmit Descriptor Config Masks */ #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ #define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ #define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ #define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ @@ -2533,9 +2617,6 @@ enum ixgbe_fc_mode { ixgbe_fc_rx_pause, ixgbe_fc_tx_pause, ixgbe_fc_full, -#ifdef CONFIG_DCB - ixgbe_fc_pfc, -#endif ixgbe_fc_default }; @@ -2768,10 +2849,12 @@ struct ixgbe_mac_operations { void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *, s32); + s32 (*fc_enable)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -2813,6 +2896,7 @@ struct ixgbe_mac_info { u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ u16 wwpn_prefix; + u16 max_msix_vectors; #define IXGBE_MAX_MTA 128 u32 mta_shadow[IXGBE_MAX_MTA]; s32 mc_filter_type; @@ -2823,12 +2907,12 @@ struct ixgbe_mac_info { u32 rx_pb_size; u32 max_tx_queues; u32 max_rx_queues; - u32 max_msix_vectors; u32 orig_autoc; u32 orig_autoc2; bool orig_link_settings_stored; bool autotry_restart; u8 flags; + struct ixgbe_thermal_sensor_data thermal_sensor_data; }; struct ixgbe_phy_info { @@ -2938,7 +3022,6 @@ struct ixgbe_info { #define IXGBE_ERR_OVERTEMP -26 #define IXGBE_ERR_FC_NOT_NEGOTIATED -27 #define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_FLOW_CONTROL -29 #define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 #define IXGBE_ERR_PBA_SECTION -31 #define IXGBE_ERR_INVALID_ARGUMENT -32 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 97a991403bb..f90ec078ece 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -849,6 +849,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .release_swfw_sync = &ixgbe_release_swfw_sync_X540, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 947b5c83073..e09a6cc633b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -40,6 +40,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ @@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 2bfe0d1d795..e8dddf572d3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev, hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) { - ethtool_cmd_speed_set( - ecmd, - (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - SPEED_10000 : SPEED_1000); + __u32 speed = SPEED_10000; + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed = SPEED_100; + break; + } + + ethtool_cmd_speed_set(ecmd, speed); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, -1); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index dfed420a1bf..0a1b99240d4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; -extern int ixgbevf_up(struct ixgbevf_adapter *adapter); +extern void ixgbevf_up(struct ixgbevf_adapter *adapter); extern void ixgbevf_down(struct ixgbevf_adapter *adapter); extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 307611ae831..f69ec4288b1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.2.0-k" +#define DRV_VERSION "2.6.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; @@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } -static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) +static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; + u32 msg[2]; for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; @@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); } + msg[0] = IXGBE_VF_SET_LPE; + msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + hw->mbx.ops.write_posted(hw, msg, 2); + clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; mod_timer(&adapter->watchdog_timer, jiffies); - return 0; } -int ixgbevf_up(struct ixgbevf_adapter *adapter) +void ixgbevf_up(struct ixgbevf_adapter *adapter) { - int err; struct ixgbe_hw *hw = &adapter->hw; ixgbevf_configure(adapter); - err = ixgbevf_up_complete(adapter); + ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); ixgbevf_irq_enable(adapter, true, true); - - return err; } /** @@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev) */ ixgbevf_map_rings_to_vectors(adapter); - err = ixgbevf_up_complete(adapter); - if (err) - goto err_up; + ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); @@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev) err_req_irq: ixgbevf_down(adapter); -err_up: ixgbevf_free_irq(adapter); err_setup_rx: ixgbevf_free_all_rx_resources(adapter); @@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = max_frame; - hw->mbx.ops.write_posted(hw, msg, 2); + if (!netif_running(netdev)) { + msg[0] = IXGBE_VF_SET_LPE; + msg[1] = max_frame; + hw->mbx.ops.write_posted(hw, msg, 2); + } if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 74be7411242..ec89b86f7ca 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, else *link_up = false; - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; - else + break; + case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } return 0; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 5e1ca0f0509..c8950da60e6 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1665,6 +1665,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .get_strings = mv643xx_eth_get_strings, .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, .get_sset_count = mv643xx_eth_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index efec6b60b32..1db023b075a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1456,6 +1456,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = { .set_settings = pxa168_set_settings, .get_drvinfo = pxa168_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops pxa168_eth_netdev_ops = { diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 487a6c8bd4e..cace36f2ab9 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4825,14 +4825,14 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) init_waitqueue_head(&hw->msi_wait); - sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); - err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); return err; } + sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); sky2_read8(hw, B0_CTST); diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 1bb93531f1b..5f027f95cc8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -11,6 +11,18 @@ config MLX4_EN This driver supports Mellanox Technologies ConnectX Ethernet devices. +config MLX4_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MLX4_EN && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + If set to N, will not be able to configure QoS and ratelimit attributes. + This flag is depended on the kernel's DCB support. + + If unsure, set to Y + config MLX4_CORE tristate depends on PCI diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 4a40ab967ee..293127d28b3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_EN) += mlx4_en.o mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ en_resources.o en_netdev.o en_selftest.o +mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 773c70ea3f6..1bcead1fa2f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1254,7 +1254,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; u32 reply; - u32 slave_status = 0; u8 is_going_down = 0; int i; @@ -1274,10 +1273,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, } /*check if we are in the middle of FLR process, if so return "retry" status to the slave*/ - if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { - slave_status = MLX4_DELAY_RESET_SLAVE; + if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) goto inform_slave_state; - } /* write the version in the event field */ reply |= mlx4_comm_get_version(); @@ -1557,7 +1554,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) return 0; err_resource: - mlx4_free_resource_tracker(dev); + mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL); err_thread: flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 00b81272e31..908a460d8db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) { - init_timer(&cq->timer); - cq->timer.function = mlx4_en_poll_tx_cq; - cq->timer.data = (unsigned long) cq; - } else { + if (!cq->is_tx) { netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_enable(&cq->napi); } @@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { - struct mlx4_en_dev *mdev = priv->mdev; - - if (cq->is_tx) - del_timer(&cq->timer); - else { + if (!cq->is_tx) { napi_disable(&cq->napi); netif_napi_del(&cq->napi); } - mlx4_cq_free(mdev->dev, &cq->mcq); + mlx4_cq_free(priv->mdev->dev, &cq->mcq); } /* Set rx cq moderation parameters */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c new file mode 100644 index 00000000000..5d36795877c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2011 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/dcbnl.h> +#include <linux/math64.h> + +#include "mlx4_en.h" + +static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets *my_ets = &priv->ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + + return 0; +} + +static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) +{ + int i; + int total_ets_bw = 0; + int has_ets_tc = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > MLX4_EN_NUM_UP) { + en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", + i, ets->prio_tc[i]); + return -EINVAL; + } + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + has_ets_tc = 1; + total_ets_bw += ets->tc_tx_bw[i]; + break; + default: + en_err(priv, "TC[%d]: Not supported TSA: %d\n", + i, ets->tc_tsa[i]); + return -ENOTSUPP; + } + } + + if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) { + en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n", + total_ets_bw); + return -EINVAL; + } + + return 0; +} + +static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, + struct ieee_ets *ets, u16 *ratelimit) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int num_strict = 0; + int i; + __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 }; + __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 }; + + ets = ets ?: &priv->ets; + ratelimit = ratelimit ?: priv->maxrate; + + /* higher TC means higher priority => lower pg */ + for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + pg[i] = num_strict++; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; + case IEEE_8021QAZ_TSA_ETS: + pg[i] = MLX4_EN_TC_ETS; + tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN; + break; + } + } + + return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg, + ratelimit); +} + +static int +mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + err = mlx4_en_ets_validate(priv, ets); + if (err) + return err; + + err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc); + if (err) + return err; + + err = mlx4_en_config_port_scheduler(priv, ets, NULL); + if (err) + return err; + + memcpy(&priv->ets, ets, sizeof(priv->ets)); + + return 0; +} + +static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = priv->prof->tx_ppp; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", + pfc->pfc_cap, + pfc->pfc_en, + pfc->mbc, + pfc->delay); + + priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en; + priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en; + + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + + return err; +} + +static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + return 0; +} + +#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ +static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + + if (!priv->maxrate) + return -EINVAL; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + maxrate->tc_maxrate[i] = + priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 tmp[IEEE_8021QAZ_MAX_TCS]; + int i, err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + /* Convert from Kbps into HW units, rounding result up. + * Setting to 0, means unlimited BW. + */ + tmp[i] = div_u64(maxrate->tc_maxrate[i] + + MLX4_RATELIMIT_UNITS_IN_KB - 1, + MLX4_RATELIMIT_UNITS_IN_KB); + } + + err = mlx4_en_config_port_scheduler(priv, NULL, tmp); + if (err) + return err; + + memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate)); + + return 0; +} + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 70346fd7f9c..72901ce2b08 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -83,7 +83,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { - "Interupt Test", + "Interrupt Test", "Link Test", "Speed Test", "Register Test", @@ -359,8 +359,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); - coal->tx_coalesce_usecs = 0; - coal->tx_max_coalesced_frames = 0; + coal->tx_coalesce_usecs = priv->tx_usecs; + coal->tx_max_coalesced_frames = priv->tx_frames; coal->rx_coalesce_usecs = priv->rx_usecs; coal->rx_max_coalesced_frames = priv->rx_frames; @@ -388,6 +388,21 @@ static int mlx4_en_set_coalesce(struct net_device *dev, MLX4_EN_RX_COAL_TIME : coal->rx_coalesce_usecs; + /* Setting TX coalescing parameters */ + if (coal->tx_coalesce_usecs != priv->tx_usecs || + coal->tx_max_coalesced_frames != priv->tx_frames) { + priv->tx_usecs = coal->tx_coalesce_usecs; + priv->tx_frames = coal->tx_max_coalesced_frames; + for (i = 0; i < priv->tx_ring_num; i++) { + priv->tx_cq[i].moder_cnt = priv->tx_frames; + priv->tx_cq[i].moder_time = priv->tx_usecs; + if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) { + en_warn(priv, "Failed changing moderation " + "for TX cq %d\n", i); + } + } + } + /* Set adaptive coalescing params */ priv->pkt_rate_low = coal->pkt_rate_low; priv->rx_usecs_low = coal->rx_coalesce_usecs_low; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2097a7d3c5b..988b2424e1c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -101,6 +101,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) int i; params->udp_rss = udp_rss; + params->num_tx_rings_p_up = min_t(int, num_online_cpus(), + MLX4_EN_MAX_TX_RING_P_UP); if (params->udp_rss && !(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) { mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); @@ -113,8 +115,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; - params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + - (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; + params->prof[i].tx_ring_num = params->num_tx_rings_p_up * + MLX4_EN_NUM_UP; params->prof[i].rss_rings = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 31b455a4927..926d8aac941 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -45,6 +45,27 @@ #include "mlx4_en.h" #include "en_port.h" +static int mlx4_en_setup_tc(struct net_device *dev, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + unsigned int q, offset = 0; + + if (up && up != MLX4_EN_NUM_UP) + return -EINVAL; + + netdev_set_num_tc(dev, up); + + /* Partition Tx queues evenly amongst UP's */ + q = priv->tx_ring_num / up; + for (i = 0; i < up; i++) { + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + + return 0; +} + static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -421,6 +442,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) */ priv->rx_frames = MLX4_EN_RX_COAL_TARGET; priv->rx_usecs = MLX4_EN_RX_COAL_TIME; + priv->tx_frames = MLX4_EN_TX_COAL_PKTS; + priv->tx_usecs = MLX4_EN_TX_COAL_TIME; en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " "rx_frames:%d rx_usecs:%d\n", priv->dev->mtu, priv->rx_frames, priv->rx_usecs); @@ -437,8 +460,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) for (i = 0; i < priv->tx_ring_num; i++) { cq = &priv->tx_cq[i]; - cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; - cq->moder_time = MLX4_EN_TX_COAL_TIME; + cq->moder_cnt = priv->tx_frames; + cq->moder_time = priv->tx_usecs; } /* Reset auto-moderation params */ @@ -650,12 +673,18 @@ int mlx4_en_start_port(struct net_device *dev) /* Configure ring */ tx_ring = &priv->tx_ring[i]; - err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); + err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, + i / priv->mdev->profile.num_tx_rings_p_up); if (err) { en_err(priv, "Failed allocating Tx ring\n"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + + /* Arm CQ for TX completions */ + mlx4_en_arm_cq(priv, cq); + /* Set initial ownership of all Tx TXBBs to SW (1) */ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) *((u32 *) (tx_ring->buf + j)) = 0xffffffff; @@ -797,12 +826,15 @@ static void mlx4_en_restart(struct work_struct *work) watchdog_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; + int i; en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev); + for (i = 0; i < priv->tx_ring_num; i++) + netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); if (mlx4_en_start_port(dev)) en_err(priv, "Failed restarting port %d\n", priv->port); } @@ -966,6 +998,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mutex_unlock(&mdev->state_lock); mlx4_en_free_resources(priv); + + kfree(priv->tx_ring); + kfree(priv->tx_cq); + free_netdev(dev); } @@ -1036,6 +1072,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_poll_controller = mlx4_en_netpoll, #endif .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -1070,6 +1107,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED); priv->tx_ring_num = prof->tx_ring_num; + priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * + priv->tx_ring_num, GFP_KERNEL); + if (!priv->tx_ring) { + err = -ENOMEM; + goto out; + } + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num, + GFP_KERNEL); + if (!priv->tx_cq) { + err = -ENOMEM; + goto out; + } priv->rx_ring_num = prof->rx_ring_num; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; @@ -1079,6 +1128,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) + dev->dcbnl_ops = &mlx4_en_dcbnl_ops; +#endif /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 6934fd7e66e..745090b49d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -39,6 +39,8 @@ #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 +#define MLX4_EN_NUM_TC 8 + #define VLAN_FLTR_SIZE 128 struct mlx4_set_vlan_fltr_mbox { __be32 entry[VLAN_FLTR_SIZE]; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index bcbc54c1694..10c24c784b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -39,7 +39,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, - struct mlx4_qp_context *context) + int user_prio, struct mlx4_qp_context *context) { struct mlx4_en_dev *mdev = priv->mdev; @@ -57,6 +57,10 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->local_qpn = cpu_to_be32(qpn); context->pri_path.ackto = 1 & 0x07; context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; + if (user_prio >= 0) { + context->pri_path.sched_queue |= user_prio << 3; + context->pri_path.feup = 1 << 6; + } context->pri_path.counter_index = 0xff; context->cqn_send = cpu_to_be32(cqn); context->cqn_recv = cpu_to_be32(cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9adbd53da52..d49a7ac3187 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -823,7 +823,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, memset(context, 0, sizeof *context); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, - qpn, ring->cqn, context); + qpn, ring->cqn, -1, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); /* Cancel FCS removal if FW allows */ @@ -890,7 +890,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) } rss_map->indir_qp.event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, - priv->rx_ring[0].cqn, &context); + priv->rx_ring[0].cqn, -1, &context); if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) rss_rings = priv->rx_ring_num; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 17968244c39..019d856b133 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, inline_thold = min(inline_thold, MAX_INLINE); - spin_lock_init(&ring->comp_lock); - tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc(tmp); if (!ring->tx_info) @@ -156,7 +154,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int cq) + int cq, int user_prio) { struct mlx4_en_dev *mdev = priv->mdev; int err; @@ -174,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->doorbell_qpn = ring->qp.qpn << 8; mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, - ring->cqn, &ring->context); + ring->cqn, user_prio, &ring->context); if (ring->bf_enabled) ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); @@ -317,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; + u32 packets = 0; + u32 bytes = 0; if (!priv->port_up) return; @@ -345,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) priv, ring, ring_index, !!((ring->cons + txbbs_skipped) & ring->size)); + packets++; + bytes += ring->tx_info[ring_index].nr_bytes; } while (ring_index != new_index); ++cons_index; @@ -361,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) mlx4_cq_set_ci(mcq); wmb(); ring->cons += txbbs_skipped; + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this ring stopped it */ if (unlikely(ring->blocked)) { if ((u32) (ring->prod - ring->cons) <= ring->size - HEADROOM - MAX_DESC_TXBBS) { ring->blocked = 0; - netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); + netif_tx_wake_queue(ring->tx_queue); priv->port_stats.wake_queue++; } } @@ -377,41 +380,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; - if (!spin_trylock(&ring->comp_lock)) - return; mlx4_en_process_tx_cq(cq->dev, cq); - mod_timer(&cq->timer, jiffies + 1); - spin_unlock(&ring->comp_lock); + mlx4_en_arm_cq(priv, cq); } -void mlx4_en_poll_tx_cq(unsigned long data) -{ - struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; - struct mlx4_en_priv *priv = netdev_priv(cq->dev); - struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; - u32 inflight; - - INC_PERF_COUNTER(priv->pstats.tx_poll); - - if (!spin_trylock_irq(&ring->comp_lock)) { - mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - return; - } - mlx4_en_process_tx_cq(cq->dev, cq); - inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); - - /* If there are still packets in flight and the timer has not already - * been scheduled by the Tx routine then schedule it here to guarantee - * completion processing of these packets */ - if (inflight && priv->port_up) - mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - - spin_unlock_irq(&ring->comp_lock); -} - static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, @@ -440,25 +414,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, return ring->buf + index * TXBB_SIZE; } -static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) -{ - struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; - struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; - unsigned long flags; - - /* If we don't have a pending timer, set one up to catch our recent - post in case the interface becomes idle */ - if (!timer_pending(&cq->timer)) - mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - - /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ - if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock_irqsave(&ring->comp_lock, flags)) { - mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock_irqrestore(&ring->comp_lock, flags); - } -} - static int is_inline(struct sk_buff *skb, void **pfrag) { void *ptr; @@ -571,17 +526,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) { struct mlx4_en_priv *priv = netdev_priv(dev); - u16 vlan_tag = 0; + u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up; + u8 up = 0; - /* If we support per priority flow control and the packet contains - * a vlan tag, send the packet to the TX ring assigned to that priority - */ - if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { - vlan_tag = vlan_tx_tag_get(skb); - return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); - } + if (dev->num_tc) + return skb_tx_hash(dev, skb); - return skb_tx_hash(dev, skb); + if (vlan_tx_tag_present(skb)) + up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; + + return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) @@ -594,7 +548,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; - struct mlx4_en_cq *cq; struct mlx4_en_tx_desc *tx_desc; struct mlx4_wqe_data_seg *data; struct skb_frag_struct *frag; @@ -638,13 +591,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(((int)(ring->prod - ring->cons)) > ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ - netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); + netif_tx_stop_queue(ring->tx_queue); ring->blocked = 1; priv->port_stats.queue_stopped++; - /* Use interrupts to find out when queue opened */ - cq = &priv->tx_cq[tx_ind]; - mlx4_en_arm_cq(priv, cq); return NETDEV_TX_BUSY; } @@ -707,7 +657,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) priv->port_stats.tso_packets++; i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); - ring->bytes += skb->len + (i - 1) * lso_header_size; + tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; ring->packets += i; } else { /* Normal (Non LSO) packet */ @@ -715,10 +665,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); data = &tx_desc->data; - ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); + tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); ring->packets++; } + ring->bytes += tx_info->nr_bytes; + netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); @@ -792,9 +744,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); } - /* Poll CQ here */ - mlx4_en_xmit_poll(priv, tx_ind); - return NETDEV_TX_OK; tx_drop: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2a02ba522e6..24429a99190 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1164,9 +1164,8 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; - priv->mfunc.master.slave_state[slave].init_port_mask |= - (1 << port); } + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); ++priv->mfunc.master.init_port_ref[port]; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 8bb05b46db8..984ace44104 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1306,7 +1306,7 @@ static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); } -int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1319,13 +1319,44 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) return 0; } + +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, + RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *idx = get_param_l(&out_param); + + return err; + } + return __mlx4_counter_alloc(dev, idx); +} EXPORT_SYMBOL_GPL(mlx4_counter_alloc); -void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); return; } + +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +{ + u64 in_param; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, idx); + mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + return; + } + __mlx4_counter_free(dev, idx); +} EXPORT_SYMBOL_GPL(mlx4_counter_free); static int mlx4_setup_hca(struct mlx4_dev *dev) @@ -1865,7 +1896,6 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mlx4_err(dev, "Failed to enable sriov," "continuing without sriov enabled" " (err = %d).\n", err); - num_vfs = 0; err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); @@ -2022,7 +2052,7 @@ err_cmd: mlx4_cmd_cleanup(dev); err_sriov: - if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) + if (dev->flags & MLX4_FLAG_SRIOV) pci_disable_sriov(pdev); err_rel_own: @@ -2070,6 +2100,10 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_CLOSE_PORT(dev, p); } + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_SLAVES_ONLY); + mlx4_cleanup_counters_table(dev); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); @@ -2082,7 +2116,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_cleanup_pd_table(dev); if (mlx4_is_master(dev)) - mlx4_free_resource_tracker(dev); + mlx4_free_resource_tracker(dev, + RES_TR_FREE_STRUCTS_ONLY); iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); @@ -2099,7 +2134,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); - if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { + if (dev->flags & MLX4_FLAG_SRIOV) { mlx4_warn(dev, "Disabling sriov\n"); pci_disable_sriov(pdev); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4799e824052..f4a8f98e402 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -357,7 +357,6 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, u32 prot; int i; bool found; - int last_index; int err; struct mlx4_priv *priv = mlx4_priv(dev); @@ -419,7 +418,6 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, if (err) goto out_mailbox; } - last_index = entry->index; } /* add the new qpn to list of promisc qps */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 2a0ff2cc718..86b6e5a2fab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -53,6 +53,26 @@ #define DRV_VERSION "1.1" #define DRV_RELDATE "Dec, 2011" +#define MLX4_NUM_UP 8 +#define MLX4_NUM_TC 8 +#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ +#define MLX4_RATELIMIT_DEFAULT 0xffff + +struct mlx4_set_port_prio2tc_context { + u8 prio2tc[4]; +}; + +struct mlx4_port_scheduler_tc_cfg_be { + __be16 pg; + __be16 bw_precentage; + __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ + __be16 max_bw_value; +}; + +struct mlx4_set_port_scheduler_context { + struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; +}; + enum { MLX4_HCR_BASE = 0x80680, MLX4_HCR_SIZE = 0x0001c, @@ -126,6 +146,11 @@ enum mlx4_alloc_mode { RES_OP_MAP_ICM, }; +enum mlx4_res_tracker_free_type { + RES_TR_FREE_ALL, + RES_TR_FREE_SLAVES_ONLY, + RES_TR_FREE_STRUCTS_ONLY, +}; /* *Virtual HCR structures. @@ -851,6 +876,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list); +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); @@ -1007,7 +1036,8 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); int mlx4_init_resource_tracker(struct mlx4_dev *dev); -void mlx4_free_resource_tracker(struct mlx4_dev *dev); +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type); int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d69fee41f24..6ae350921b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -40,6 +40,9 @@ #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> +#ifdef CONFIG_MLX4_EN_DCB +#include <linux/dcbnl.h> +#endif #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -108,9 +111,8 @@ enum { #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) #define MLX4_EN_SMALL_PKT_SIZE 64 -#define MLX4_EN_NUM_TX_RINGS 8 -#define MLX4_EN_NUM_PPP_RINGS 8 -#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS) +#define MLX4_EN_MAX_TX_RING_P_UP 32 +#define MLX4_EN_NUM_UP 8 #define MLX4_EN_DEF_TX_RING_SIZE 512 #define MLX4_EN_DEF_RX_RING_SIZE 1024 @@ -118,7 +120,7 @@ enum { #define MLX4_EN_RX_COAL_TARGET 44 #define MLX4_EN_RX_COAL_TIME 0x10 -#define MLX4_EN_TX_COAL_PKTS 5 +#define MLX4_EN_TX_COAL_PKTS 16 #define MLX4_EN_TX_COAL_TIME 0x80 #define MLX4_EN_RX_RATE_LOW 400000 @@ -196,6 +198,7 @@ enum cq_type { struct mlx4_en_tx_info { struct sk_buff *skb; u32 nr_txbb; + u32 nr_bytes; u8 linear; u8 data_offset; u8 inl; @@ -251,9 +254,9 @@ struct mlx4_en_tx_ring { unsigned long bytes; unsigned long packets; unsigned long tx_csum; - spinlock_t comp_lock; struct mlx4_bf bf; bool bf_enabled; + struct netdev_queue *tx_queue; }; struct mlx4_en_rx_desc { @@ -304,8 +307,6 @@ struct mlx4_en_cq { spinlock_t lock; struct net_device *dev; struct napi_struct napi; - /* Per-core Tx cq processing support */ - struct timer_list timer; int size; int buf_size; unsigned vector; @@ -336,6 +337,7 @@ struct mlx4_en_profile { u32 active_ports; u32 small_pkt_int; u8 no_reset; + u8 num_tx_rings_p_up; struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; }; @@ -411,6 +413,15 @@ struct mlx4_en_frag_info { }; +#ifdef CONFIG_MLX4_EN_DCB +/* Minimal TC BW - setting to 0 will block traffic */ +#define MLX4_EN_BW_MIN 1 +#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ + +#define MLX4_EN_TC_ETS 7 + +#endif + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -465,9 +476,9 @@ struct mlx4_en_priv { u16 num_frags; u16 log_rx_info; - struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; + struct mlx4_en_tx_ring *tx_ring; struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; - struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; + struct mlx4_en_cq *tx_cq; struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct work_struct mcast_task; struct work_struct mac_task; @@ -484,6 +495,11 @@ struct mlx4_en_priv { int vids[128]; bool wol; struct device *ddev; + +#ifdef CONFIG_MLX4_EN_DCB + struct ieee_ets ets; + u16 maxrate[IEEE_8021QAZ_MAX_TCS]; +#endif }; enum mlx4_en_wol { @@ -512,7 +528,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); -void mlx4_en_poll_tx_cq(unsigned long data); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); @@ -522,7 +537,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int cq); + int cq, int user_prio); void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); @@ -540,8 +555,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, - int is_tx, int rss, int qpn, int cqn, - struct mlx4_qp_context *context); + int is_tx, int rss, int qpn, int cqn, int user_prio, + struct mlx4_qp_context *context); void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); int mlx4_en_map_buffer(struct mlx4_buf *buf); void mlx4_en_unmap_buffer(struct mlx4_buf *buf); @@ -558,6 +573,10 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); +#ifdef CONFIG_MLX4_EN_DCB +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; +#endif + #define MLX4_EN_NUM_SELF_TEST 5 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); u64 mlx4_en_mac_to_u64(u8 *addr); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index fe2ac8449c1..af55b7ce534 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -788,7 +788,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); - u64 mtt_offset; int err = -ENOMEM; if (max_maps > dev->caps.max_fmr_maps) @@ -811,8 +810,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, if (err) return err; - mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; - fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, fmr->mr.mtt.offset, &fmr->dma_handle); @@ -895,6 +892,6 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free); int mlx4_SYNC_TPT(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, - MLX4_CMD_WRAPPED); + MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index db4746d0dca..1ac88637ad9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -63,7 +63,7 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) } EXPORT_SYMBOL_GPL(mlx4_pd_free); -int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -73,12 +73,47 @@ int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) return 0; } + +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, + RES_XRCD, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *xrcdn = get_param_l(&out_param); + return 0; + } + return __mlx4_xrcd_alloc(dev, xrcdn); +} EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); -void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); } + +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + u64 in_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, xrcdn); + err = mlx4_cmd(dev, in_param, RES_XRCD, + RES_OP_RESERVE, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn); + } else + __mlx4_xrcd_free(dev, xrcdn); +} EXPORT_SYMBOL_GPL(mlx4_xrcd_free); int mlx4_init_pd_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 77535ff18f1..1fe2c7a8b40 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -338,13 +338,12 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { u64 out_param; - int err; if (mlx4_is_mfunc(dev)) { set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); return; } __mlx4_unregister_mac(dev, port, mac); @@ -834,6 +833,68 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_prio2tc_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + for (i = 0; i < MLX4_NUM_UP; i += 2) + context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; + + in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); + +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_scheduler_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof *context); + + for (i = 0; i < MLX4_NUM_TC; i++) { + struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; + u16 r = ratelimit && ratelimit[i] ? ratelimit[i] : + MLX4_RATELIMIT_DEFAULT; + + tc->pg = htons(pg[i]); + tc->bw_precentage = htons(tc_tx_bw[i]); + + tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS); + tc->max_bw_value = htons(r); + } + + in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); + int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 8752e6e0816..b45d0e7f6ab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -89,17 +89,6 @@ enum res_qp_states { RES_QP_HW }; -static inline const char *qp_states_str(enum res_qp_states state) -{ - switch (state) { - case RES_QP_BUSY: return "RES_QP_BUSY"; - case RES_QP_RESERVED: return "RES_QP_RESERVED"; - case RES_QP_MAPPED: return "RES_QP_MAPPED"; - case RES_QP_HW: return "RES_QP_HW"; - default: return "Unknown"; - } -} - struct res_qp { struct res_common com; struct res_mtt *mtt; @@ -173,16 +162,6 @@ enum res_srq_states { RES_SRQ_HW, }; -static inline const char *srq_states_str(enum res_srq_states state) -{ - switch (state) { - case RES_SRQ_BUSY: return "RES_SRQ_BUSY"; - case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED"; - case RES_SRQ_HW: return "RES_SRQ_HW"; - default: return "Unknown"; - } -} - struct res_srq { struct res_common com; struct res_mtt *mtt; @@ -195,20 +174,21 @@ enum res_counter_states { RES_COUNTER_ALLOCATED, }; -static inline const char *counter_states_str(enum res_counter_states state) -{ - switch (state) { - case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY"; - case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED"; - default: return "Unknown"; - } -} - struct res_counter { struct res_common com; int port; }; +enum res_xrcdn_states { + RES_XRCD_BUSY = RES_ANY_BUSY, + RES_XRCD_ALLOCATED, +}; + +struct res_xrcdn { + struct res_common com; + int port; +}; + /* For Debug uses */ static const char *ResourceType(enum mlx4_resource rt) { @@ -221,6 +201,7 @@ static const char *ResourceType(enum mlx4_resource rt) case RES_MAC: return "RES_MAC"; case RES_EQ: return "RES_EQ"; case RES_COUNTER: return "RES_COUNTER"; + case RES_XRCD: return "RES_XRCD"; default: return "Unknown resource type !!!"; }; } @@ -254,16 +235,23 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) return 0 ; } -void mlx4_free_resource_tracker(struct mlx4_dev *dev) +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type) { struct mlx4_priv *priv = mlx4_priv(dev); int i; if (priv->mfunc.master.res_tracker.slave_list) { - for (i = 0 ; i < dev->num_slaves; i++) - mlx4_delete_all_resources_for_slave(dev, i); - - kfree(priv->mfunc.master.res_tracker.slave_list); + if (type != RES_TR_FREE_STRUCTS_ONLY) + for (i = 0 ; i < dev->num_slaves; i++) + if (type == RES_TR_FREE_ALL || + dev->caps.function != i) + mlx4_delete_all_resources_for_slave(dev, i); + + if (type != RES_TR_FREE_SLAVES_ONLY) { + kfree(priv->mfunc.master.res_tracker.slave_list); + priv->mfunc.master.res_tracker.slave_list = NULL; + } } } @@ -471,6 +459,20 @@ static struct res_common *alloc_counter_tr(int id) return &ret->com; } +static struct res_common *alloc_xrcdn_tr(int id) +{ + struct res_xrcdn *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_XRCD_ALLOCATED; + + return &ret->com; +} + static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, int extra) { @@ -501,7 +503,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, case RES_COUNTER: ret = alloc_counter_tr(id); break; - + case RES_XRCD: + ret = alloc_xrcdn_tr(id); + break; default: return NULL; } @@ -624,6 +628,16 @@ static int remove_counter_ok(struct res_counter *res) return 0; } +static int remove_xrcdn_ok(struct res_xrcdn *res) +{ + if (res->com.state == RES_XRCD_BUSY) + return -EBUSY; + else if (res->com.state != RES_XRCD_ALLOCATED) + return -EPERM; + + return 0; +} + static int remove_cq_ok(struct res_cq *res) { if (res->com.state == RES_CQ_BUSY) @@ -663,6 +677,8 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: return remove_counter_ok((struct res_counter *)res); + case RES_XRCD: + return remove_xrcdn_ok((struct res_xrcdn *)res); default: return -EINVAL; } @@ -1269,6 +1285,50 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return 0; } +static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = __mlx4_counter_alloc(dev, &index); + if (err) + return err; + + err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) + __mlx4_counter_free(dev, index); + else + set_param_l(out_param, index); + + return err; +} + +static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = __mlx4_xrcd_alloc(dev, &xrcdn); + if (err) + return err; + + err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + __mlx4_xrcd_free(dev, xrcdn); + else + set_param_l(out_param, xrcdn); + + return err; +} + int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1314,6 +1374,16 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, vhcr->in_param, &vhcr->out_param); break; + case RES_COUNTER: + err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + default: err = -EINVAL; break; @@ -1496,6 +1566,44 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, return 0; } +static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + index = get_param_l(&in_param); + err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) + return err; + + __mlx4_counter_free(dev, index); + + return err; +} + +static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + xrcdn = get_param_l(&in_param); + err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + return err; + + __mlx4_xrcd_free(dev, xrcdn); + + return err; +} + int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1541,6 +1649,15 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, vhcr->in_param, &vhcr->out_param); break; + case RES_COUNTER: + err = counter_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + default: break; } @@ -2536,7 +2653,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_qp qp; /* dummy for calling attach/detach */ u8 *gid = inbox->buf; enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; - int err, err1; + int err; int qpn; struct res_qp *rqp; int attach = vhcr->op_modifier; @@ -2571,7 +2688,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ex_rem: /* ignore error return below, already in error */ - err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type); + (void) rem_mcg_res(dev, slave, rqp, gid, prot, type); ex_put: put_res(dev, slave, qpn, RES_QP); @@ -2604,13 +2721,12 @@ static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) { struct res_gid *rgid; struct res_gid *tmp; - int err; struct mlx4_qp qp; /* dummy for calling attach/detach */ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { qp.qpn = rqp->local_qpn; - err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, - rgid->steer); + (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, + rgid->steer); list_del(&rgid->list); kfree(rgid); } @@ -3036,14 +3152,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); + if (err) + mlx4_dbg(dev, "rem_slave_eqs: failed" + " to move slave %d eqs %d to" + " SW ownership\n", slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); - if (!err) { - atomic_dec(&eq->mtt->ref_count); - state = RES_EQ_RESERVED; - } + atomic_dec(&eq->mtt->ref_count); + state = RES_EQ_RESERVED; break; default: @@ -3056,6 +3171,64 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static void rem_slave_counters(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *counter_list = + &tracker->slave_list[slave].res_list[RES_COUNTER]; + struct res_counter *counter; + struct res_counter *tmp; + int err; + int index; + + err = move_all_busy(dev, slave, RES_COUNTER); + if (err) + mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(counter, tmp, counter_list, com.list) { + if (counter->com.owner == slave) { + index = counter->com.res_id; + radix_tree_delete(&tracker->res_tree[RES_COUNTER], index); + list_del(&counter->com.list); + kfree(counter); + __mlx4_counter_free(dev, index); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *xrcdn_list = + &tracker->slave_list[slave].res_list[RES_XRCD]; + struct res_xrcdn *xrcd; + struct res_xrcdn *tmp; + int err; + int xrcdn; + + err = move_all_busy(dev, slave, RES_XRCD); + if (err) + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " + "busy for slave %d\n", slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { + if (xrcd->com.owner == slave) { + xrcdn = xrcd->com.res_id; + radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn); + list_del(&xrcd->com.list); + kfree(xrcd); + __mlx4_xrcd_free(dev, xrcdn); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -3069,5 +3242,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_mrs(dev, slave); rem_slave_eqs(dev, slave); rem_slave_mtts(dev, slave); + rem_slave_counters(dev, slave); + rem_slave_xrcdns(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index f84dd2dc82b..24fb049ac2f 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = { .owner = THIS_MODULE, }, .probe = ks8842_probe, - .remove = ks8842_remove, + .remove = __devexit_p(ks8842_remove), }; module_platform_driver(ks8842_platform_driver); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 27273ae1a6e..90153fc983c 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -4033,7 +4033,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &myri10ge_netdev_ops; netdev->mtu = myri10ge_initial_mtu; - netdev->base_addr = mgp->iomem_base; netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; netdev->features = netdev->hw_features; @@ -4047,12 +4046,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features &= ~NETIF_F_TSO; /* make sure we can get an irq, and that MSI can be - * setup (if available). Also ensure netdev->irq - * is set to correct value if MSI is enabled */ + * setup (if available). */ status = myri10ge_request_irq(mgp); if (status != 0) goto abort_with_firmware; - netdev->irq = pdev->irq; myri10ge_free_irq(mgp); /* Save configuration space to be restored if the @@ -4077,7 +4074,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", mgp->msi_enabled ? "MSI" : "xPIC", - netdev->irq, mgp->tx_boundary, mgp->fw_name, + pdev->irq, mgp->tx_boundary, mgp->fw_name, (mgp->wc_enabled ? "Enabled" : "Disabled")); board_number++; diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index eb836f770f5..f157334579f 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig @@ -6,9 +6,8 @@ config NET_VENDOR_NATSEMI bool "National Semi-conductor devices" default y depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \ - ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \ - MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \ - XTENSA_PLATFORM_XT2000 || ZORRO + ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \ + PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -21,21 +20,6 @@ config NET_VENDOR_NATSEMI if NET_VENDOR_NATSEMI -config IBMLANA - tristate "IBM LAN Adapter/A support" - depends on MCA - ---help--- - This is a Micro Channel Ethernet adapter. You need to set - CONFIG_MCA to use this driver. It is both available as an in-kernel - driver and as a module. - - To compile this driver as a module, choose M here. The only - currently supported card is the IBM LAN Adapter/A for Ethernet. It - will both support 16K and 32K memory windows, however a 32K window - gives a better security against packet losses. Usage of multiple - boards with this driver should be possible, but has not been tested - up to now due to lack of hardware. - config MACSONIC tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" depends on MAC diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile index 9aa5dea52b3..764c532a96d 100644 --- a/drivers/net/ethernet/natsemi/Makefile +++ b/drivers/net/ethernet/natsemi/Makefile @@ -2,7 +2,6 @@ # Makefile for the National Semi-conductor Sonic devices. # -obj-$(CONFIG_IBMLANA) += ibmlana.o obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_NATSEMI) += natsemi.o diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index d38e48d4f43..5b61d12f8b9 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -547,6 +547,7 @@ struct netdev_private { struct sk_buff *tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE]; struct net_device *dev; + void __iomem *ioaddr; struct napi_struct napi; /* Media monitoring timer */ struct timer_list timer; @@ -699,7 +700,9 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, static inline void __iomem *ns_ioaddr(struct net_device *dev) { - return (void __iomem *) dev->base_addr; + struct netdev_private *np = netdev_priv(dev); + + return np->ioaddr; } static inline void natsemi_irq_enable(struct net_device *dev) @@ -863,10 +866,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, /* Store MAC Address in perm_addr */ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); - dev->base_addr = (unsigned long __force) ioaddr; - dev->irq = irq; - np = netdev_priv(dev); + np->ioaddr = ioaddr; + netif_napi_add(dev, &np->napi, natsemi_poll, 64); np->dev = dev; @@ -914,9 +916,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, } option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; - if (dev->mem_start) - option = dev->mem_start; - /* The lower four bits are the media type. */ if (option) { if (option & 0x200) @@ -1532,20 +1531,21 @@ static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; int i; /* Reset the chip, just in case. */ natsemi_reset(dev); - i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", - dev->name, dev->irq); + dev->name, irq); i = alloc_ring(dev); if (i < 0) { - free_irq(dev->irq, dev); + free_irq(irq, dev); return i; } napi_enable(&np->napi); @@ -1794,6 +1794,7 @@ static void netdev_timer(unsigned long data) struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); int next_tick = NATSEMI_TIMER_FREQ; + const int irq = np->pci_dev->irq; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, @@ -1817,14 +1818,14 @@ static void netdev_timer(unsigned long data) if (netif_msg_drv(np)) printk(KERN_NOTICE "%s: possible phy reset: " "re-initializing\n", dev->name); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); natsemi_stop_rxtx(dev); dump_ring(dev); reinit_ring(dev); init_registers(dev); spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); } else { /* hurry back */ next_tick = HZ; @@ -1841,10 +1842,10 @@ static void netdev_timer(unsigned long data) spin_unlock_irq(&np->lock); } if (np->oom) { - disable_irq(dev->irq); + disable_irq(irq); np->oom = 0; refill_rx(dev); - enable_irq(dev->irq); + enable_irq(irq); if (!np->oom) { writel(RxOn, ioaddr + ChipCmd); } else { @@ -1885,8 +1886,9 @@ static void ns_tx_timeout(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); if (!np->hands_off) { if (netif_msg_tx_err(np)) @@ -1905,7 +1907,7 @@ static void ns_tx_timeout(struct net_device *dev) dev->name); } spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; @@ -2470,9 +2472,12 @@ static struct net_device_stats *get_stats(struct net_device *dev) #ifdef CONFIG_NET_POLL_CONTROLLER static void natsemi_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - intr_handler(dev->irq, dev); - enable_irq(dev->irq); + struct netdev_private *np = netdev_priv(dev); + const int irq = np->pci_dev->irq; + + disable_irq(irq); + intr_handler(irq, dev); + enable_irq(irq); } #endif @@ -2523,8 +2528,9 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; - disable_irq(dev->irq); + disable_irq(irq); spin_lock(&np->lock); /* stop engines */ natsemi_stop_rxtx(dev); @@ -2537,7 +2543,7 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu) /* restart engines */ writel(RxOn | TxOn, ioaddr + ChipCmd); spin_unlock(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); } return 0; } @@ -3135,6 +3141,7 @@ static int netdev_close(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); + const int irq = np->pci_dev->irq; if (netif_msg_ifdown(np)) printk(KERN_DEBUG @@ -3156,14 +3163,14 @@ static int netdev_close(struct net_device *dev) */ del_timer_sync(&np->timer); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); np->hands_off = 1; spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); - free_irq(dev->irq, dev); + free_irq(irq, dev); /* Interrupt disabled, interrupt handler released, * queue stopped, timer deleted, rtnl_lock held @@ -3256,9 +3263,11 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) rtnl_lock(); if (netif_running (dev)) { + const int irq = np->pci_dev->irq; + del_timer_sync(&np->timer); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); @@ -3267,7 +3276,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) netif_stop_queue(dev); spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); napi_disable(&np->napi); @@ -3307,6 +3316,8 @@ static int natsemi_resume (struct pci_dev *pdev) if (netif_device_present(dev)) goto out; if (netif_running(dev)) { + const int irq = np->pci_dev->irq; + BUG_ON(!np->hands_off); ret = pci_enable_device(pdev); if (ret < 0) { @@ -3320,13 +3331,13 @@ static int natsemi_resume (struct pci_dev *pdev) natsemi_reset(dev); init_ring(dev); - disable_irq(dev->irq); + disable_irq(irq); spin_lock_irq(&np->lock); np->hands_off = 0; init_registers(dev); netif_device_attach(dev); spin_unlock_irq(&np->lock); - enable_irq(dev->irq); + enable_irq(irq); mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 6338ef8606a..bb367582c1e 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2846,6 +2846,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) static void s2io_netpoll(struct net_device *dev) { struct s2io_nic *nic = netdev_priv(dev); + const int irq = nic->pdev->irq; struct XENA_dev_config __iomem *bar0 = nic->bar0; u64 val64 = 0xFFFFFFFFFFFFFFFFULL; int i; @@ -2855,7 +2856,7 @@ static void s2io_netpoll(struct net_device *dev) if (pci_channel_offline(nic->pdev)) return; - disable_irq(dev->irq); + disable_irq(irq); writeq(val64, &bar0->rx_traffic_int); writeq(val64, &bar0->tx_traffic_int); @@ -2884,7 +2885,7 @@ static void s2io_netpoll(struct net_device *dev) break; } } - enable_irq(dev->irq); + enable_irq(irq); } #endif @@ -3897,9 +3898,7 @@ static void remove_msix_isr(struct s2io_nic *sp) static void remove_inta_isr(struct s2io_nic *sp) { - struct net_device *dev = sp->dev; - - free_irq(sp->pdev->irq, dev); + free_irq(sp->pdev->irq, sp->dev); } /* ********************************************************* * @@ -7046,7 +7045,7 @@ static int s2io_add_isr(struct s2io_nic *sp) } } if (sp->config.intr_type == INTA) { - err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED, + err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED, sp->name, dev); if (err) { DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", @@ -7908,9 +7907,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) goto bar1_remap_failed; } - dev->irq = pdev->irq; - dev->base_addr = (unsigned long)sp->bar0; - /* Initializing the BAR1 address as the start of the FIFO pointer. */ for (j = 0; j < MAX_TX_FIFOS; j++) { mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index ef76725454d..51387c31914 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1882,25 +1882,24 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) */ static void vxge_netpoll(struct net_device *dev) { - struct __vxge_hw_device *hldev; - struct vxgedev *vdev; - - vdev = netdev_priv(dev); - hldev = pci_get_drvdata(vdev->pdev); + struct vxgedev *vdev = netdev_priv(dev); + struct pci_dev *pdev = vdev->pdev; + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); + const int irq = pdev->irq; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); - if (pci_channel_offline(vdev->pdev)) + if (pci_channel_offline(pdev)) return; - disable_irq(dev->irq); + disable_irq(irq); vxge_hw_device_clear_tx_rx(hldev); vxge_hw_device_clear_tx_rx(hldev); VXGE_COMPLETE_ALL_RX(vdev); VXGE_COMPLETE_ALL_TX(vdev); - enable_irq(dev->irq); + enable_irq(irq); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -2860,12 +2859,12 @@ static int vxge_open(struct net_device *dev) vdev->config.rx_pause_enable); if (vdev->vp_reset_timer.function == NULL) - vxge_os_timer(vdev->vp_reset_timer, - vxge_poll_vp_reset, vdev, (HZ/2)); + vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, + HZ / 2); /* There is no need to check for RxD leak and RxD lookup on Titan1A */ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) - vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, + vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, HZ / 2); set_bit(__VXGE_STATE_CARD_UP, &vdev->state); @@ -3424,9 +3423,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, ndev->features |= ndev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - /* Driver entry points */ - ndev->irq = vdev->pdev->irq; - ndev->base_addr = (unsigned long) hldev->bar0; ndev->netdev_ops = &vxge_netdev_ops; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index f52a42d1dbb..35f3e7552ec 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -416,12 +416,15 @@ struct vxge_tx_priv { static int p = val; \ module_param(p, int, 0) -#define vxge_os_timer(timer, handle, arg, exp) do { \ - init_timer(&timer); \ - timer.function = handle; \ - timer.data = (unsigned long) arg; \ - mod_timer(&timer, (jiffies + exp)); \ - } while (0); +static inline +void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data), + struct vxgedev *vdev, unsigned long timeout) +{ + init_timer(timer); + timer->function = func; + timer->data = (unsigned long)vdev; + mod_timer(timer, jiffies + timeout); +} void vxge_initialize_ethtool_ops(struct net_device *ndev); enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index aca13046e43..928913c4f3f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2279,6 +2279,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_sent_queue(np->dev, skb->len); + skb_tx_timestamp(skb); + np->put_tx.orig = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2426,6 +2428,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, netdev_sent_queue(np->dev, skb->len); + skb_tx_timestamp(skb); + np->put_tx.ex = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -3942,13 +3946,11 @@ static int nv_request_irq(struct net_device *dev, int intr_test) ret = pci_enable_msi(np->pci_dev); if (ret == 0) { np->msi_flags |= NV_MSI_ENABLED; - dev->irq = np->pci_dev->irq; if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { netdev_info(dev, "request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; - dev->irq = np->pci_dev->irq; goto out_err; } @@ -5649,9 +5651,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->base = ioremap(addr, np->register_size); if (!np->base) goto out_relreg; - dev->base_addr = (unsigned long)np->base; - - dev->irq = pci_dev->irq; np->rx_ring_size = RX_RING_DEFAULT; np->tx_ring_size = TX_RING_DEFAULT; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 6dfc26d85e4..d3469d8e3f0 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -990,10 +990,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget) ndev->stats.rx_errors++; } else { /* Packet is good */ - skb = dev_alloc_skb(len + 8); - if (!skb) + skb = dev_alloc_skb(len); + if (!skb) { ndev->stats.rx_dropped++; - else { + } else { prdbuf = skb_put(skb, len); /* Copy packet from buffer */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index ba781747d17..b07311eaa69 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -658,6 +658,7 @@ extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev); extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); extern u64 pch_rx_snap_read(struct pci_dev *pdev); extern u64 pch_tx_snap_read(struct pci_dev *pdev); +extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev); #endif /* pch_gbe_param.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 1e38d502a06..3787c64ee71 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -79,7 +79,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888 #define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF -#define PCH_GBE_ETH_ALEN 6 /* This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: @@ -101,18 +100,19 @@ const char pch_driver_version[] = DRV_VERSION; #ifdef CONFIG_PCH_PTP /* Macros for ieee1588 */ -#define TICKS_NS_SHIFT 5 - /* 0x40 Time Synchronization Channel Control Register Bits */ #define MASTER_MODE (1<<0) -#define SLAVE_MODE (0<<0) +#define SLAVE_MODE (0) #define V2_MODE (1<<31) -#define CAP_MODE0 (0<<16) +#define CAP_MODE0 (0) #define CAP_MODE2 (1<<17) /* 0x44 Time Synchronization Channel Event Register Bits */ #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) + +#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" +#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" #endif static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; @@ -120,6 +120,7 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); +static void pch_gbe_set_multi(struct net_device *netdev); #ifdef CONFIG_PCH_PTP static struct sock_filter ptp_filter[] = { @@ -133,10 +134,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) u16 *hi, *id; u32 lo; - if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) && - (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) { + if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE) return 0; - } offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; @@ -153,8 +152,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) seqid == *id); } -static void pch_rx_timestamp( - struct pch_gbe_adapter *adapter, struct sk_buff *skb) +static void +pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) { struct skb_shared_hwtstamps *shhwtstamps; struct pci_dev *pdev; @@ -183,7 +182,6 @@ static void pch_rx_timestamp( goto out; ns = pch_rx_snap_read(pdev); - ns <<= TICKS_NS_SHIFT; shhwtstamps = skb_hwtstamps(skb); memset(shhwtstamps, 0, sizeof(*shhwtstamps)); @@ -192,8 +190,8 @@ out: pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED); } -static void pch_tx_timestamp( - struct pch_gbe_adapter *adapter, struct sk_buff *skb) +static void +pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamps; struct pci_dev *pdev; @@ -202,17 +200,16 @@ static void pch_tx_timestamp( u32 cnt, val; shtx = skb_shinfo(skb); - if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)) - shtx->tx_flags |= SKBTX_IN_PROGRESS; - else + if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))) return; + shtx->tx_flags |= SKBTX_IN_PROGRESS; + /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; /* * This really stinks, but we have to poll for the Tx time stamp. - * Usually, the time stamp is ready after 4 to 6 microseconds. */ for (cnt = 0; cnt < 100; cnt++) { val = pch_ch_event_read(pdev); @@ -226,7 +223,6 @@ static void pch_tx_timestamp( } ns = pch_tx_snap_read(pdev); - ns <<= TICKS_NS_SHIFT; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); @@ -240,6 +236,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) struct hwtstamp_config cfg; struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev; + u8 station[20]; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -267,15 +264,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: adapter->hwts_rx_en = 0; - pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0)); + pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: adapter->hwts_rx_en = 1; - pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0)); + pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + adapter->hwts_rx_en = 1; + pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); + strcpy(station, PTP_L4_MULTICAST_SA); + pch_set_station_address(station, pdev); break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: adapter->hwts_rx_en = 1; - pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2)); + pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); + strcpy(station, PTP_L2_MULTICAST_SA); + pch_set_station_address(station, pdev); break; default: return -ERANGE; @@ -399,18 +404,18 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); #endif pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); - /* Setup the receive address */ + /* Setup the receive addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); return; } static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) { - /* Read the MAC address. and store to the private data */ + /* Read the MAC addresses. and store to the private data */ pch_gbe_mac_read_mac_addr(hw); iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); - /* Setup the MAC address */ + /* Setup the MAC addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); return; } @@ -460,7 +465,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, if (mc_addr_count) { pch_gbe_mac_mar_set(hw, mc_addr_list, i); mc_addr_count--; - mc_addr_list += PCH_GBE_ETH_ALEN; + mc_addr_list += ETH_ALEN; } else { /* Clear MAC address mask */ adrmask = ioread32(&hw->reg->ADDR_MASK); @@ -775,6 +780,8 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) void pch_gbe_reset(struct pch_gbe_adapter *adapter) { pch_gbe_mac_reset_hw(&adapter->hw); + /* reprogram multicast address register after reset */ + pch_gbe_set_multi(adapter->netdev); /* Setup the receive address. */ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); if (pch_gbe_hal_init_hw(&adapter->hw)) @@ -1178,8 +1185,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); unsigned int offset; - iph->check = 0; - iph->check = ip_fast_csum((u8 *) iph, iph->ihl); offset = skb_transport_offset(skb); if (iph->protocol == IPPROTO_TCP) { skb->csum = 0; @@ -1338,6 +1343,8 @@ static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) /* Stop Receive */ pch_gbe_mac_reset_rx(hw); } + /* reprogram multicast address register after reset */ + pch_gbe_set_multi(adapter->netdev); } static void pch_gbe_start_receive(struct pch_gbe_hw *hw) @@ -1922,7 +1929,6 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) } -static void pch_gbe_set_multi(struct net_device *netdev); /** * pch_gbe_up - Up GbE network device * @adapter: Board private structure diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 0d29f5f4b8e..c2367158350 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -683,8 +683,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, } hmp->base = ioaddr; - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; pci_set_drvdata(pdev, dev); hmp->chip_id = chip_id; @@ -859,14 +857,11 @@ static int hamachi_open(struct net_device *dev) u32 rx_int_var, tx_int_var; u16 fifo_info; - i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev); + i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED, + dev->name, dev); if (i) return i; - if (hamachi_debug > 1) - printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n", - dev->name, dev->irq); - hamachi_init_ring(dev); #if ADDRLEN == 64 @@ -1705,7 +1700,7 @@ static int hamachi_close(struct net_device *dev) } #endif /* __i386__ debugging only */ - free_irq(dev->irq, dev); + free_irq(hmp->pci_dev->irq, dev); del_timer_sync(&hmp->timer); diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 7757b80ef92..04e622fd468 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -427,9 +427,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; - pci_set_drvdata(pdev, dev); spin_lock_init(&np->lock); @@ -569,25 +566,20 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value static int yellowfin_open(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); + const int irq = yp->pci_dev->irq; void __iomem *ioaddr = yp->base; - int i, ret; + int i, rc; /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); - ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - - if (yellowfin_debug > 1) - netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n", - __func__, dev->irq); + rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) + return rc; - ret = yellowfin_init_ring(dev); - if (ret) { - free_irq(dev->irq, dev); - return ret; - } + rc = yellowfin_init_ring(dev); + if (rc < 0) + goto err_free_irq; iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); @@ -647,8 +639,12 @@ static int yellowfin_open(struct net_device *dev) yp->timer.data = (unsigned long)dev; yp->timer.function = yellowfin_timer; /* timer handler */ add_timer(&yp->timer); +out: + return rc; - return 0; +err_free_irq: + free_irq(irq, dev); + goto out; } static void yellowfin_timer(unsigned long data) @@ -1251,7 +1247,7 @@ static int yellowfin_close(struct net_device *dev) } #endif /* __i386__ debugging only */ - free_irq(dev->irq, dev); + free_irq(yp->pci_dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index ddc95b0ac78..e559dfa06d6 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -623,7 +623,7 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) mac->rx = NULL; } -static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, +static void pasemi_mac_replenish_rx_ring(struct net_device *dev, const int limit) { const struct pasemi_mac *mac = netdev_priv(dev); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index b5de8a7b90f..37ccbe54e62 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 78 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.78" +#define _NETXEN_NIC_LINUX_SUBVERSION 79 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.79" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) @@ -419,6 +419,8 @@ struct rcv_desc { (((sts_data) >> 52) & 0x1) #define netxen_get_lro_sts_seq_number(sts_data) \ ((sts_data) & 0x0FFFFFFFF) +#define netxen_get_lro_sts_mss(sts_data1) \ + ((sts_data1 >> 32) & 0x0FFFF) struct status_desc { @@ -794,6 +796,7 @@ struct netxen_cmd_args { #define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) #define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) #define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) +#define NX_CAP0_HW_LRO_MSS NX_CAP_BIT(0, 21) /* * Context state @@ -1073,6 +1076,8 @@ typedef struct { #define NX_FW_CAPABILITY_FVLANTX (1 << 9) #define NX_FW_CAPABILITY_HW_LRO (1 << 10) #define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) +#define NX_FW_CAPABILITY_MORE_CAPS (1 << 31) +#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG (1 << 2) /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -1155,6 +1160,7 @@ typedef struct { #define NETXEN_NIC_BRIDGE_ENABLED 0X10 #define NETXEN_NIC_DIAG_ENABLED 0x20 #define NETXEN_FW_RESET_OWNER 0x40 +#define NETXEN_FW_MSS_CAP 0x80 #define NETXEN_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) @@ -1201,6 +1207,9 @@ typedef struct { #define NX_FORCE_FW_RESET 0xdeaddead +/* Fw dump levels */ +static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; + /* Flash read/write address */ #define NX_FW_DUMP_REG1 0x00130060 #define NX_FW_DUMP_REG2 0x001e0000 @@ -1814,6 +1823,13 @@ struct netxen_brdinfo { char short_name[NETXEN_MAX_SHORT_NAME]; }; +struct netxen_dimm_cfg { + u8 presence; + u8 mem_type; + u8 dimm_type; + u32 size; +}; + static const struct netxen_brdinfo netxen_boards[] = { {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index f3c0057a802..7f556a84925 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -229,7 +229,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter) adapter->mdump.md_template; adapter->mdump.md_capture_buff = NULL; adapter->mdump.fw_supports_md = 1; - adapter->mdump.md_enabled = 1; + adapter->mdump.md_enabled = 0; return err; @@ -328,6 +328,9 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); + if (adapter->flags & NETXEN_FW_MSS_CAP) + cap |= NX_CAP0_HW_LRO_MSS; + prq->capabilities[0] = cpu_to_le32(cap); prq->host_int_crb_mode = cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 8c39299331a..39730403782 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -834,7 +834,7 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) static int netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) { - int ret = 0; + int i; struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_minidump *mdump = &adapter->mdump; @@ -844,7 +844,7 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) mdump->md_enabled = 1; if (adapter->fw_mdump_rdy) { netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); - return ret; + return 0; } netdev_info(netdev, "Forcing a fw dump\n"); nx_dev_request_reset(adapter); @@ -867,19 +867,21 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) adapter->flags &= ~NETXEN_FW_RESET_OWNER; break; default: - if (val->flag <= NX_DUMP_MASK_MAX && - val->flag >= NX_DUMP_MASK_MIN) { - mdump->md_capture_mask = val->flag & 0xff; - netdev_info(netdev, "Driver mask changed to: 0x%x\n", + for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) { + if (val->flag == FW_DUMP_LEVELS[i]) { + mdump->md_capture_mask = val->flag; + netdev_info(netdev, + "Driver mask changed to: 0x%x\n", mdump->md_capture_mask); - break; + return 0; + } } netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); return -EINVAL; } - return ret; + return 0; } static int diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h index b1a897cd9a8..28e076960bc 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -776,6 +776,7 @@ enum { #define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) #define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) +#define CRB_FW_CAPABILITIES_2 (NETXEN_CAM_RAM(0x12c)) #define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) /* @@ -955,6 +956,31 @@ enum { #define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) #define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) +/* MiniDIMM related macros */ +#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258)) +#define NETXEN_DIMM_PRESENT 0x1 +#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2 +#define NETXEN_DIMM_SIZE 0x4 +#define NETXEN_DIMM_MEMTYPE(VAL) ((VAL >> 3) & 0xf) +#define NETXEN_DIMM_NUMROWS(VAL) ((VAL >> 7) & 0xf) +#define NETXEN_DIMM_NUMCOLS(VAL) ((VAL >> 11) & 0xf) +#define NETXEN_DIMM_NUMRANKS(VAL) ((VAL >> 15) & 0x3) +#define NETXEN_DIMM_DATAWIDTH(VAL) ((VAL >> 18) & 0x3) +#define NETXEN_DIMM_NUMBANKS(VAL) ((VAL >> 21) & 0xf) +#define NETXEN_DIMM_TYPE(VAL) ((VAL >> 25) & 0x3f) +#define NETXEN_DIMM_VALID_FLAG 0x80000000 + +#define NETXEN_DIMM_MEM_DDR2_SDRAM 0x8 + +#define NETXEN_DIMM_STD_MEM_SIZE 512 + +#define NETXEN_DIMM_TYPE_RDIMM 0x1 +#define NETXEN_DIMM_TYPE_UDIMM 0x2 +#define NETXEN_DIMM_TYPE_SO_DIMM 0x4 +#define NETXEN_DIMM_TYPE_Micro_DIMM 0x8 +#define NETXEN_DIMM_TYPE_Mini_RDIMM 0x10 +#define NETXEN_DIMM_TYPE_Mini_UDIMM 0x20 + /* Device State */ #define NX_DEV_COLD 1 #define NX_DEV_INITALIZING 2 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 718b2744035..0d725dc91bc 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1131,7 +1131,6 @@ netxen_validate_firmware(struct netxen_adapter *adapter) _build(file_fw_ver)); return -EINVAL; } - val = nx_get_bios_version(adapter); netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); if ((__force u32)val != bios) { @@ -1661,6 +1660,9 @@ netxen_process_lro(struct netxen_adapter *adapter, length = skb->len; + if (adapter->flags & NETXEN_FW_MSS_CAP) + skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1); + netif_receive_skb(skb); adapter->stats.lro_pkts++; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 65a718f9ccd..342b3a79bd0 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1184,6 +1184,7 @@ netxen_nic_attach(struct netxen_adapter *adapter) int err, ring; struct nx_host_rds_ring *rds_ring; struct nx_host_tx_ring *tx_ring; + u32 capab2; if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) return 0; @@ -1192,6 +1193,13 @@ netxen_nic_attach(struct netxen_adapter *adapter) if (err) return err; + adapter->flags &= ~NETXEN_FW_MSS_CAP; + if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { + capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); + if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) + adapter->flags |= NETXEN_FW_MSS_CAP; + } + err = netxen_napi_add(adapter, netdev); if (err) return err; @@ -1810,7 +1818,6 @@ netxen_tso_check(struct net_device *netdev, flags = FLAGS_VLAN_TAGGED; } else if (vlan_tx_tag_present(skb)) { - flags = FLAGS_VLAN_OOB; vid = vlan_tx_tag_get(skb); netxen_set_tx_vlan_tci(first_desc, vid); @@ -2926,6 +2933,134 @@ static struct bin_attribute bin_attr_mem = { .write = netxen_sysfs_write_mem, }; +static ssize_t +netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + struct net_device *netdev = adapter->netdev; + struct netxen_dimm_cfg dimm; + u8 dw, rows, cols, banks, ranks; + u32 val; + + if (size != sizeof(struct netxen_dimm_cfg)) { + netdev_err(netdev, "Invalid size\n"); + return -1; + } + + memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); + val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); + + /* Checks if DIMM info is valid. */ + if (val & NETXEN_DIMM_VALID_FLAG) { + netdev_err(netdev, "Invalid DIMM flag\n"); + dimm.presence = 0xff; + goto out; + } + + rows = NETXEN_DIMM_NUMROWS(val); + cols = NETXEN_DIMM_NUMCOLS(val); + ranks = NETXEN_DIMM_NUMRANKS(val); + banks = NETXEN_DIMM_NUMBANKS(val); + dw = NETXEN_DIMM_DATAWIDTH(val); + + dimm.presence = (val & NETXEN_DIMM_PRESENT); + + /* Checks if DIMM info is present. */ + if (!dimm.presence) { + netdev_err(netdev, "DIMM not present\n"); + goto out; + } + + dimm.dimm_type = NETXEN_DIMM_TYPE(val); + + switch (dimm.dimm_type) { + case NETXEN_DIMM_TYPE_RDIMM: + case NETXEN_DIMM_TYPE_UDIMM: + case NETXEN_DIMM_TYPE_SO_DIMM: + case NETXEN_DIMM_TYPE_Micro_DIMM: + case NETXEN_DIMM_TYPE_Mini_RDIMM: + case NETXEN_DIMM_TYPE_Mini_UDIMM: + break; + default: + netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); + goto out; + } + + if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) + dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; + else + dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); + + if (val & NETXEN_DIMM_SIZE) { + dimm.size = NETXEN_DIMM_STD_MEM_SIZE; + goto out; + } + + if (!rows) { + netdev_err(netdev, "Invalid no of rows %x\n", rows); + goto out; + } + + if (!cols) { + netdev_err(netdev, "Invalid no of columns %x\n", cols); + goto out; + } + + if (!banks) { + netdev_err(netdev, "Invalid no of banks %x\n", banks); + goto out; + } + + ranks += 1; + + switch (dw) { + case 0x0: + dw = 32; + break; + case 0x1: + dw = 33; + break; + case 0x2: + dw = 36; + break; + case 0x3: + dw = 64; + break; + case 0x4: + dw = 72; + break; + case 0x5: + dw = 80; + break; + case 0x6: + dw = 128; + break; + case 0x7: + dw = 144; + break; + default: + netdev_err(netdev, "Invalid data-width %x\n", dw); + goto out; + } + + dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; + /* Size returned in MB. */ + dimm.size = (dimm.size) / 0x100000; +out: + memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); + return sizeof(struct netxen_dimm_cfg); + +} + +static struct bin_attribute bin_attr_dimm = { + .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, + .size = 0, + .read = netxen_sysfs_read_dimm, +}; + static void netxen_create_sysfs_entries(struct netxen_adapter *adapter) @@ -2963,6 +3098,8 @@ netxen_create_diag_entries(struct netxen_adapter *adapter) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_dimm)) + dev_info(dev, "failed to create dimm sysfs entry\n"); } @@ -2975,6 +3112,7 @@ netxen_remove_diag_entries(struct netxen_adapter *adapter) device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); + device_remove_bin_file(dev, &bin_attr_dimm); } #ifdef CONFIG_INET diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 385a4d5c7c2..8680a5dae4a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -36,8 +36,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 27 -#define QLCNIC_LINUX_VERSIONID "5.0.27" +#define _QLCNIC_LINUX_SUBVERSION 28 +#define QLCNIC_LINUX_VERSIONID "5.0.28" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -607,6 +607,7 @@ struct qlcnic_recv_context { #define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E #define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f #define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 +#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037 #define QLCNIC_RCODE_SUCCESS 0 #define QLCNIC_RCODE_NOT_SUPPORTED 9 @@ -1180,18 +1181,62 @@ struct qlcnic_esw_func_cfg { #define QLCNIC_STATS_ESWITCH 2 #define QLCNIC_QUERY_RX_COUNTER 0 #define QLCNIC_QUERY_TX_COUNTER 1 -#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL +#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL +#define QLCNIC_FILL_STATS(VAL1) \ + (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1) +#define QLCNIC_MAC_STATS 1 +#define QLCNIC_ESW_STATS 2 #define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ do { \ - if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \ (VAL1) = (VAL2); \ - else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \ (VAL1) += (VAL2); \ } while (0) +struct qlcnic_mac_statistics{ + __le64 mac_tx_frames; + __le64 mac_tx_bytes; + __le64 mac_tx_mcast_pkts; + __le64 mac_tx_bcast_pkts; + __le64 mac_tx_pause_cnt; + __le64 mac_tx_ctrl_pkt; + __le64 mac_tx_lt_64b_pkts; + __le64 mac_tx_lt_127b_pkts; + __le64 mac_tx_lt_255b_pkts; + __le64 mac_tx_lt_511b_pkts; + __le64 mac_tx_lt_1023b_pkts; + __le64 mac_tx_lt_1518b_pkts; + __le64 mac_tx_gt_1518b_pkts; + __le64 rsvd1[3]; + + __le64 mac_rx_frames; + __le64 mac_rx_bytes; + __le64 mac_rx_mcast_pkts; + __le64 mac_rx_bcast_pkts; + __le64 mac_rx_pause_cnt; + __le64 mac_rx_ctrl_pkt; + __le64 mac_rx_lt_64b_pkts; + __le64 mac_rx_lt_127b_pkts; + __le64 mac_rx_lt_255b_pkts; + __le64 mac_rx_lt_511b_pkts; + __le64 mac_rx_lt_1023b_pkts; + __le64 mac_rx_lt_1518b_pkts; + __le64 mac_rx_gt_1518b_pkts; + __le64 rsvd2[3]; + + __le64 mac_rx_length_error; + __le64 mac_rx_length_small; + __le64 mac_rx_length_large; + __le64 mac_rx_jabber; + __le64 mac_rx_dropped; + __le64 mac_rx_crc_error; + __le64 mac_align_error; +} __packed; + struct __qlcnic_esw_statistics { __le16 context_id; __le16 version; @@ -1352,6 +1397,8 @@ enum op_codes { #define QLCNIC_ENABLE_FW_DUMP 0xaddfeed #define QLCNIC_DISABLE_FW_DUMP 0xbadfeed #define QLCNIC_FORCE_FW_RESET 0xdeaddead +#define QLCNIC_SET_QUIESCENT 0xadd00010 +#define QLCNIC_RESET_QUIESCENT 0xadd00020 struct qlcnic_dump_operations { enum op_codes opcode; @@ -1510,6 +1557,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, struct __qlcnic_esw_statistics *); int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); +int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *); extern int qlcnic_config_tso; /* @@ -1559,6 +1607,7 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) } extern const struct ethtool_ops qlcnic_ethtool_ops; +extern const struct ethtool_ops qlcnic_ethtool_failed_ops; struct qlcnic_nic_template { int (*config_bridged_mode) (struct qlcnic_adapter *, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 569a837d2ac..8db85244e8a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -905,6 +905,65 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, return err; } +/* This routine will retrieve the MAC statistics from firmware */ +int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, + struct qlcnic_mac_statistics *mac_stats) +{ + struct qlcnic_mac_statistics *stats; + struct qlcnic_cmd_args cmd; + size_t stats_size = sizeof(struct qlcnic_mac_statistics); + dma_addr_t stats_dma_t; + void *stats_addr; + int err; + + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); + if (!stats_addr) { + dev_err(&adapter->pdev->dev, + "%s: Unable to allocate memory.\n", __func__); + return -ENOMEM; + } + memset(stats_addr, 0, stats_size); + memset(&cmd, 0, sizeof(cmd)); + cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS; + cmd.req.arg1 = stats_size << 16; + cmd.req.arg2 = MSD(stats_dma_t); + cmd.req.arg3 = LSD(stats_dma_t); + + qlcnic_issue_cmd(adapter, &cmd); + err = cmd.rsp.cmd; + + if (!err) { + stats = stats_addr; + mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); + mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); + mac_stats->mac_tx_mcast_pkts = + le64_to_cpu(stats->mac_tx_mcast_pkts); + mac_stats->mac_tx_bcast_pkts = + le64_to_cpu(stats->mac_tx_bcast_pkts); + mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); + mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); + mac_stats->mac_rx_mcast_pkts = + le64_to_cpu(stats->mac_rx_mcast_pkts); + mac_stats->mac_rx_length_error = + le64_to_cpu(stats->mac_rx_length_error); + mac_stats->mac_rx_length_small = + le64_to_cpu(stats->mac_rx_length_small); + mac_stats->mac_rx_length_large = + le64_to_cpu(stats->mac_rx_length_large); + mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); + mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); + mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); + } else { + dev_info(&adapter->pdev->dev, + "%s: Get mac stats failed =%d.\n", __func__, err); + } + + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + return err; +} + int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { @@ -920,13 +979,13 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, return -EIO; memset(esw_stats, 0, sizeof(u64)); - esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; + esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; esw_stats->context_id = eswitch; for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 89ddf7f7d7d..9e9e78a5c4d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -78,8 +78,46 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { "tx numbytes", }; -#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = { + "mac_tx_frames", + "mac_tx_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_pause_cnt", + "mac_tx_ctrl_pkt", + "mac_tx_lt_64b_pkts", + "mac_tx_lt_127b_pkts", + "mac_tx_lt_255b_pkts", + "mac_tx_lt_511b_pkts", + "mac_tx_lt_1023b_pkts", + "mac_tx_lt_1518b_pkts", + "mac_tx_gt_1518b_pkts", + "mac_rx_frames", + "mac_rx_bytes", + "mac_rx_mcast_pkts", + "mac_rx_bcast_pkts", + "mac_rx_pause_cnt", + "mac_rx_ctrl_pkt", + "mac_rx_lt_64b_pkts", + "mac_rx_lt_127b_pkts", + "mac_rx_lt_255b_pkts", + "mac_rx_lt_511b_pkts", + "mac_rx_lt_1023b_pkts", + "mac_rx_lt_1518b_pkts", + "mac_rx_gt_1518b_pkts", + "mac_rx_length_error", + "mac_rx_length_small", + "mac_rx_length_large", + "mac_rx_jabber", + "mac_rx_dropped", + "mac_rx_crc_error", + "mac_align_error", +}; + +#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings) #define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) +#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { "Register_Test_on_offline", @@ -644,8 +682,8 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset) return QLCNIC_TEST_LEN; case ETH_SS_STATS: if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; - return QLCNIC_STATS_LEN; + return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; + return QLCNIC_TOTAL_STATS_LEN; default: return -EOPNOTSUPP; } @@ -851,7 +889,7 @@ static void qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) { struct qlcnic_adapter *adapter = netdev_priv(dev); - int index, i; + int index, i, j; switch (stringset) { case ETH_SS_TEST: @@ -864,6 +902,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) qlcnic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } + for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_mac_stats_strings[j], + ETH_GSTRING_LEN); + } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { @@ -874,22 +917,64 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ - (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) - static void -qlcnic_fill_device_stats(int *index, u64 *data, - struct __qlcnic_esw_statistics *stats) +qlcnic_fill_stats(int *index, u64 *data, void *stats, int type) { int ind = *index; - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); + if (type == QLCNIC_MAC_STATS) { + struct qlcnic_mac_statistics *mac_stats = + (struct qlcnic_mac_statistics *)stats; + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts); + data[ind++] = + QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error); + data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error); + } else if (type == QLCNIC_ESW_STATS) { + struct __qlcnic_esw_statistics *esw_stats = + (struct __qlcnic_esw_statistics *)stats; + data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames); + data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes); + } *index = ind; } @@ -900,6 +985,7 @@ qlcnic_get_ethtool_stats(struct net_device *dev, { struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_esw_statistics port_stats; + struct qlcnic_mac_statistics mac_stats; int index, ret; for (index = 0; index < QLCNIC_STATS_LEN; index++) { @@ -911,6 +997,11 @@ qlcnic_get_ethtool_stats(struct net_device *dev, sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); } + /* Retrieve MAC statistics from firmware */ + memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); + qlcnic_get_mac_stats(adapter, &mac_stats); + qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; @@ -920,14 +1011,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev, if (ret) return; - qlcnic_fill_device_stats(&index, data, &port_stats.rx); + qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) return; - qlcnic_fill_device_stats(&index, data, &port_stats.tx); + qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS); } static int qlcnic_set_led(struct net_device *dev, @@ -1132,11 +1223,21 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + if (!fw_dump->tmpl_hdr) { + netdev_err(adapter->netdev, "FW Dump not supported\n"); + return -ENOTSUPP; + } + if (fw_dump->clr) dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; else dump->len = 0; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + + if (!fw_dump->enable) + dump->flag = ETH_FW_DUMP_DISABLE; + else + dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + dump->version = adapter->fw_version; return 0; } @@ -1150,6 +1251,11 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW Dump not supported\n"); + return -ENOTSUPP; + } + if (!fw_dump->clr) { netdev_info(netdev, "Dump not available\n"); return -EINVAL; @@ -1177,55 +1283,74 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, static int qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) { - int ret = 0; + int i; struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 state; switch (val->flag) { case QLCNIC_FORCE_FW_DUMP_KEY: + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + return -ENOTSUPP; + } if (!fw_dump->enable) { netdev_info(netdev, "FW dump not enabled\n"); - return ret; + return 0; } if (fw_dump->clr) { netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); - return ret; + return 0; } netdev_info(netdev, "Forcing a FW dump\n"); qlcnic_dev_request_reset(adapter); break; case QLCNIC_DISABLE_FW_DUMP: - if (fw_dump->enable) { + if (fw_dump->enable && fw_dump->tmpl_hdr) { netdev_info(netdev, "Disabling FW dump\n"); fw_dump->enable = 0; } - break; + return 0; case QLCNIC_ENABLE_FW_DUMP: - if (!fw_dump->enable && fw_dump->tmpl_hdr) { + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + return -ENOTSUPP; + } + if (!fw_dump->enable) { netdev_info(netdev, "Enabling FW dump\n"); fw_dump->enable = 1; } - break; + return 0; case QLCNIC_FORCE_FW_RESET: netdev_info(netdev, "Forcing a FW reset\n"); qlcnic_dev_request_reset(adapter); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - break; + return 0; + case QLCNIC_SET_QUIESCENT: + case QLCNIC_RESET_QUIESCENT: + state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) + netdev_info(netdev, "Device in FAILED state\n"); + return 0; default: - if (val->flag > QLCNIC_DUMP_MASK_MAX || - val->flag < QLCNIC_DUMP_MASK_MIN) { - netdev_info(netdev, - "Invalid dump level: 0x%x\n", val->flag); - ret = -EINVAL; - goto out; + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + return -ENOTSUPP; + } + for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) { + if (val->flag == FW_DUMP_LEVELS[i]) { + fw_dump->tmpl_hdr->drv_cap_mask = + val->flag; + netdev_info(netdev, "Driver mask changed to: 0x%x\n", + fw_dump->tmpl_hdr->drv_cap_mask); + return 0; + } } - fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; - netdev_info(netdev, "Driver mask changed to: 0x%x\n", - fw_dump->tmpl_hdr->drv_cap_mask); + netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); + return -EINVAL; } -out: - return ret; + return 0; } const struct ethtool_ops qlcnic_ethtool_ops = { @@ -1258,3 +1383,10 @@ const struct ethtool_ops qlcnic_ethtool_ops = { .get_dump_data = qlcnic_get_dump_data, .set_dump = qlcnic_set_dump, }; + +const struct ethtool_ops qlcnic_ethtool_failed_ops = { + .get_settings = qlcnic_get_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h index a52819303d1..6ced3195aad 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -704,6 +704,8 @@ enum { #define QLCNIC_DEV_FAILED 0x6 #define QLCNIC_DEV_QUISCENT 0x7 +#define QLCNIC_DEV_BADBAD 0xbad0bad0 + #define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ #define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ #define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ @@ -776,6 +778,10 @@ struct qlcnic_legacy_intr_set { #define FLASH_ROM_WINDOW 0x42110030 #define FLASH_ROM_DATA 0x42150000 + +static const u32 FW_DUMP_LEVELS[] = { + 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; + static const u32 MIU_TEST_READ_DATA[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 75c32e875fe..46e77a2c512 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -338,6 +338,10 @@ static const struct net_device_ops qlcnic_netdev_ops = { #endif }; +static const struct net_device_ops qlcnic_netdev_failed_ops = { + .ndo_open = qlcnic_open, +}; + static struct qlcnic_nic_template qlcnic_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_config_led, @@ -1623,8 +1627,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = adapter->nic_ops->start_firmware(adapter); if (err) { - dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); - goto err_out_decr_ref; + dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n" + "\t\tIf reboot doesn't help, try flashing the card\n"); + goto err_out_maintenance_mode; } if (qlcnic_read_mac_addr(adapter)) @@ -1695,6 +1700,18 @@ err_out_disable_pdev: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return err; + +err_out_maintenance_mode: + netdev->netdev_ops = &qlcnic_netdev_failed_ops; + SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + goto err_out_decr_ref; + } + pci_set_drvdata(pdev, adapter); + qlcnic_create_diag_entries(adapter); + return 0; } static void __devexit qlcnic_remove(struct pci_dev *pdev) @@ -1831,8 +1848,14 @@ done: static int qlcnic_open(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); int err; + if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) { + netdev_err(netdev, "Device in FAILED state\n"); + return -EIO; + } + netif_carrier_off(netdev); err = qlcnic_attach(adapter); @@ -1942,7 +1965,7 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter, __le16 vlan_id = 0; u8 hindex; - if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) + if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) return; if (adapter->fhash.fnum >= adapter->fhash.fmax) @@ -2212,8 +2235,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (adapter->flags & QLCNIC_MACSPOOF) { phdr = (struct ethhdr *)skb->data; - if (compare_ether_addr(phdr->h_source, - adapter->mac_addr)) + if (!ether_addr_equal(phdr->h_source, adapter->mac_addr)) goto drop_packet; } @@ -3018,6 +3040,12 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) return; state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) { + netdev_err(adapter->netdev, + "Device is in FAILED state, Please Reboot\n"); + qlcnic_api_unlock(adapter); + return; + } if (state == QLCNIC_DEV_READY) { QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); @@ -3061,6 +3089,9 @@ qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) msleep(10); + if (!adapter->fw_work.work.func) + return; + cancel_delayed_work_sync(&adapter->fw_work); } @@ -4280,6 +4311,7 @@ static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); if (device_create_bin_file(dev, &bin_attr_port_stats)) dev_info(dev, "failed to create port stats sysfs entry"); @@ -4288,14 +4320,19 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) return; if (device_create_file(dev, &dev_attr_diag_mode)) dev_info(dev, "failed to create diag_mode sysfs entry\n"); - if (device_create_file(dev, &dev_attr_beacon)) - dev_info(dev, "failed to create beacon sysfs entry"); if (device_create_bin_file(dev, &bin_attr_crb)) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); + + if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) + return; + if (device_create_bin_file(dev, &bin_attr_pci_config)) dev_info(dev, "failed to create pci config sysfs entry"); + if (device_create_file(dev, &dev_attr_beacon)) + dev_info(dev, "failed to create beacon sysfs entry"); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; if (device_create_bin_file(dev, &bin_attr_esw_config)) @@ -4314,16 +4351,19 @@ static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); device_remove_bin_file(dev, &bin_attr_port_stats); if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) return; device_remove_file(dev, &dev_attr_diag_mode); - device_remove_file(dev, &dev_attr_beacon); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); + if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) + return; device_remove_bin_file(dev, &bin_attr_pci_config); + device_remove_file(dev, &dev_attr_beacon); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; device_remove_bin_file(dev, &bin_attr_esw_config); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 49343ec21c8..09d8d33171d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -3845,7 +3845,7 @@ static int ql_wol(struct ql_adapter *qdev) if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) { netif_err(qdev, ifdown, qdev->ndev, - "Unsupported WOL paramter. qdev->wol = 0x%x.\n", + "Unsupported WOL parameter. qdev->wol = 0x%x.\n", qdev->wol); return -EINVAL; } diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index b96e1920e04..4de73643fec 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> * Copyright (C) 2007 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> - * Florian Fainelli <florian@openwrt.org> + * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -74,9 +74,13 @@ #define MT_ICR 0x0C /* TX interrupt control */ #define MR_ICR 0x10 /* RX interrupt control */ #define MTPR 0x14 /* TX poll command register */ +#define TM2TX 0x0001 /* Trigger MAC to transmit */ #define MR_BSR 0x18 /* RX buffer size */ #define MR_DCR 0x1A /* RX descriptor control */ #define MLSR 0x1C /* Last status */ +#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */ +#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */ +#define TX_LATEC 0x4000 /* Transmit late collision */ #define MMDIO 0x20 /* MDIO control register */ #define MDIO_WRITE 0x4000 /* MDIO write */ #define MDIO_READ 0x2000 /* MDIO read */ @@ -124,6 +128,9 @@ #define MID_3M 0x82 /* MID3 Medium */ #define MID_3H 0x84 /* MID3 High */ #define PHY_CC 0x88 /* PHY status change configuration register */ +#define SCEN 0x8000 /* PHY status change enable */ +#define PHYAD_SHIFT 8 /* PHY address shift */ +#define TMRDIV_SHIFT 0 /* Timer divider shift */ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ @@ -137,6 +144,8 @@ #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ #define MCAST_MAX 3 /* Max number multicast addresses to filter */ +#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */ + /* Descriptor status */ #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ #define DSC_RX_OK 0x4000 /* RX was successful */ @@ -187,7 +196,7 @@ struct r6040_private { dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; u16 tx_free_desc; - u16 mcr0, mcr1; + u16 mcr0; struct net_device *dev; struct mii_bus *mii_bus; struct napi_struct napi; @@ -204,7 +213,7 @@ static char version[] __devinitdata = DRV_NAME /* Read a word data from PHY Chip */ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) { - int limit = 2048; + int limit = MAC_DEF_TIMEOUT; u16 cmd; iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); @@ -222,7 +231,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) { - int limit = 2048; + int limit = MAC_DEF_TIMEOUT; u16 cmd; iowrite16(val, ioaddr + MMWD); @@ -358,27 +367,35 @@ err_exit: return rc; } -static void r6040_init_mac_regs(struct net_device *dev) +static void r6040_reset_mac(struct r6040_private *lp) { - struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - int limit = 2048; + int limit = MAC_DEF_TIMEOUT; u16 cmd; - /* Mask Off Interrupt */ - iowrite16(MSK_INT, ioaddr + MIER); - - /* Reset RDC MAC */ iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); if (cmd & MAC_RST) break; } + /* Reset internal state machine */ iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); +} + +static void r6040_init_mac_regs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + /* Mask Off Interrupt */ + iowrite16(MSK_INT, ioaddr + MIER); + + /* Reset RDC MAC */ + r6040_reset_mac(lp); /* MAC Bus Control Register */ iowrite16(MBCR_DEFAULT, ioaddr + MBCR); @@ -407,7 +424,7 @@ static void r6040_init_mac_regs(struct net_device *dev) /* Let TX poll the descriptors * we may got called by r6040_tx_timeout which has left * some unsent tx buffers */ - iowrite16(0x01, ioaddr + MTPR); + iowrite16(TM2TX, ioaddr + MTPR); } static void r6040_tx_timeout(struct net_device *dev) @@ -445,18 +462,13 @@ static void r6040_down(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - int limit = 2048; u16 *adrp; - u16 cmd; /* Stop MAC */ iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ - iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ - while (limit--) { - cmd = ioread16(ioaddr + MCR1); - if (cmd & MAC_RST) - break; - } + + /* Reset RDC MAC */ + r6040_reset_mac(lp); /* Restore MAC Address to MIDx */ adrp = (u16 *) dev->dev_addr; @@ -599,9 +611,9 @@ static void r6040_tx(struct net_device *dev) /* Check for errors */ err = ioread16(ioaddr + MLSR); - if (err & 0x0200) - dev->stats.rx_fifo_errors++; - if (err & (0x2000 | 0x4000)) + if (err & TX_FIFO_UNDR) + dev->stats.tx_fifo_errors++; + if (err & (TX_EXCEEDC | TX_LATEC)) dev->stats.tx_carrier_errors++; if (descptr->status & DSC_OWNER_MAC) @@ -736,11 +748,7 @@ static void r6040_mac_address(struct net_device *dev) u16 *adrp; /* Reset MAC */ - iowrite16(MAC_RST, ioaddr + MCR1); - /* Reset internal state machine */ - iowrite16(MAC_SM_RST, ioaddr + MAC_SM); - iowrite16(0, ioaddr + MAC_SM); - mdelay(5); + r6040_reset_mac(lp); /* Restore MAC Address */ adrp = (u16 *) dev->dev_addr; @@ -840,7 +848,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* Trigger the MAC to check the TX descriptor */ - iowrite16(0x01, ioaddr + MTPR); + iowrite16(TM2TX, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; /* If no tx resource, stop */ @@ -973,6 +981,7 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops r6040_netdev_ops = { @@ -1126,10 +1135,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, err = -EIO; goto err_out_free_res; } + /* If PHY status change register is still set to zero it means the - * bootloader didn't initialize it */ + * bootloader didn't initialize it, so we set it to: + * - enable phy status change + * - enable all phy addresses + * - set to lowest timer divider */ if (ioread16(ioaddr + PHY_CC) == 0) - iowrite16(0x9f07, ioaddr + PHY_CC); + iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT | + 7 << TMRDIV_SHIFT, ioaddr + PHY_CC); /* Init system & device */ lp->base = ioaddr; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index b3287c0fe27..5eef290997f 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -635,9 +635,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) */ static void cp_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - cp_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; + + disable_irq(irq); + cp_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -1117,6 +1120,7 @@ static void cp_free_rings (struct cp_private *cp) static int cp_open (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; int rc; netif_dbg(cp, ifup, dev, "enabling interface\n"); @@ -1129,7 +1133,7 @@ static int cp_open (struct net_device *dev) cp_init_hw(cp); - rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); + rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); if (rc) goto err_out_hw; @@ -1166,7 +1170,7 @@ static int cp_close (struct net_device *dev) spin_unlock_irqrestore(&cp->lock, flags); - free_irq(dev->irq, dev); + free_irq(cp->pdev->irq, dev); cp_free_rings(cp); return 0; @@ -1914,7 +1918,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long long)pciaddr); goto err_out_res; } - dev->base_addr = (unsigned long) regs; cp->regs = regs; cp_stop_hw(cp); @@ -1942,14 +1945,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; - dev->irq = pdev->irq; - rc = register_netdev(dev); if (rc) goto err_out_iomap; - netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", - dev->base_addr, dev->dev_addr, dev->irq); + netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", + regs, dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index df7fd8d083d..03df076ed59 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Whether to use MMIO or PIO. Default to MMIO. */ #ifdef CONFIG_8139TOO_PIO -static int use_io = 1; +static bool use_io = true; #else -static int use_io = 0; +static bool use_io = false; #endif /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). @@ -620,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -module_param(use_io, int, 0); +module_param(use_io, bool, 0); MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); module_param(multicast_filter_limit, int, 0); module_param_array(media, int, NULL, 0); @@ -750,15 +750,22 @@ static void rtl8139_chip_reset (void __iomem *ioaddr) static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) { + struct device *d = &pdev->dev; void __iomem *ioaddr; struct net_device *dev; struct rtl8139_private *tp; u8 tmp8; int rc, disable_dev_on_err = 0; - unsigned int i; - unsigned long pio_start, pio_end, pio_flags, pio_len; - unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; + unsigned int i, bar; + unsigned long io_len; u32 version; + static const struct { + unsigned long mask; + char *type; + } res[] = { + { IORESOURCE_IO, "PIO" }, + { IORESOURCE_MEM, "MMIO" } + }; assert (pdev != NULL); @@ -777,78 +784,45 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) if (rc) goto err_out; - pio_start = pci_resource_start (pdev, 0); - pio_end = pci_resource_end (pdev, 0); - pio_flags = pci_resource_flags (pdev, 0); - pio_len = pci_resource_len (pdev, 0); - - mmio_start = pci_resource_start (pdev, 1); - mmio_end = pci_resource_end (pdev, 1); - mmio_flags = pci_resource_flags (pdev, 1); - mmio_len = pci_resource_len (pdev, 1); - - /* set this immediately, we need to know before - * we talk to the chip directly */ - pr_debug("PIO region size == 0x%02lX\n", pio_len); - pr_debug("MMIO region size == 0x%02lX\n", mmio_len); - -retry: - if (use_io) { - /* make sure PCI base addr 0 is PIO */ - if (!(pio_flags & IORESOURCE_IO)) { - dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); - rc = -ENODEV; - goto err_out; - } - /* check for weird/broken PCI region reporting */ - if (pio_len < RTL_MIN_IO_SIZE) { - dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); - rc = -ENODEV; - goto err_out; - } - } else { - /* make sure PCI base addr 1 is MMIO */ - if (!(mmio_flags & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); - rc = -ENODEV; - goto err_out; - } - if (mmio_len < RTL_MIN_IO_SIZE) { - dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); - rc = -ENODEV; - goto err_out; - } - } - rc = pci_request_regions (pdev, DRV_NAME); if (rc) goto err_out; disable_dev_on_err = 1; - /* enable PCI bus-mastering */ pci_set_master (pdev); - if (use_io) { - ioaddr = pci_iomap(pdev, 0, 0); - if (!ioaddr) { - dev_err(&pdev->dev, "cannot map PIO, aborting\n"); - rc = -EIO; - goto err_out; - } - dev->base_addr = pio_start; - tp->regs_len = pio_len; - } else { - /* ioremap MMIO region */ - ioaddr = pci_iomap(pdev, 1, 0); - if (ioaddr == NULL) { - dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); - pci_release_regions(pdev); - use_io = 1; +retry: + /* PIO bar register comes first. */ + bar = !use_io; + + io_len = pci_resource_len(pdev, bar); + + dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len); + + if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) { + dev_err(d, "region #%d not a %s resource, aborting\n", bar, + res[bar].type); + rc = -ENODEV; + goto err_out; + } + if (io_len < RTL_MIN_IO_SIZE) { + dev_err(d, "Invalid PCI %s region size(s), aborting\n", + res[bar].type); + rc = -ENODEV; + goto err_out; + } + + ioaddr = pci_iomap(pdev, bar, 0); + if (!ioaddr) { + dev_err(d, "cannot map %s\n", res[bar].type); + if (!use_io) { + use_io = true; goto retry; } - dev->base_addr = (long) ioaddr; - tp->regs_len = mmio_len; + rc = -ENODEV; + goto err_out; } + tp->regs_len = io_len; tp->mmio_addr = ioaddr; /* Bring old chips out of low-power mode. */ @@ -1035,8 +1009,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; - dev->irq = pdev->irq; - /* tp zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); @@ -1062,9 +1034,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, pci_set_drvdata (pdev, dev); - netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", + netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n", board_info[ent->driver_data].name, - dev->base_addr, dev->dev_addr, dev->irq); + ioaddr, dev->dev_addr, pdev->irq); netdev_dbg(dev, "Identified 8139 chip type '%s'\n", rtl_chip_info[tp->chipset].name); @@ -1339,10 +1311,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location, static int rtl8139_open (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); - int retval; void __iomem *ioaddr = tp->mmio_addr; + const int irq = tp->pci_dev->irq; + int retval; - retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); + retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); if (retval) return retval; @@ -1351,7 +1324,7 @@ static int rtl8139_open (struct net_device *dev) tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, &tp->rx_ring_dma, GFP_KERNEL); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { - free_irq(dev->irq, dev); + free_irq(irq, dev); if (tp->tx_bufs) dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, @@ -1377,7 +1350,7 @@ static int rtl8139_open (struct net_device *dev) "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", __func__, (unsigned long long)pci_resource_start (tp->pci_dev, 1), - dev->irq, RTL_R8 (MediaStatus), + irq, RTL_R8 (MediaStatus), tp->mii.full_duplex ? "full" : "half"); rtl8139_start_thread(tp); @@ -2240,9 +2213,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) */ static void rtl8139_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - rtl8139_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -2295,7 +2271,7 @@ static int rtl8139_close (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); - free_irq (dev->irq, dev); + free_irq(tp->pci_dev->irq, dev); rtl8139_tx_clear (tp); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ce6b44d1f25..4f74b9762c2 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -44,6 +44,8 @@ #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" #ifdef RTL8169_DEBUG #define assert(expr) \ @@ -137,6 +139,8 @@ enum mac_version { RTL_GIGA_MAC_VER_34, RTL_GIGA_MAC_VER_35, RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, RTL_GIGA_MAC_NONE = 0xff, }; @@ -249,6 +253,12 @@ static const struct { [RTL_GIGA_MAC_VER_36] = _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_37] = + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_38] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, + JUMBO_9K, false), }; #undef _R @@ -319,6 +329,8 @@ enum rtl_registers { Config0 = 0x51, Config1 = 0x52, Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, @@ -359,6 +371,9 @@ enum rtl8168_8101_registers { #define CSIAR_BYTE_ENABLE 0x0f #define CSIAR_BYTE_ENABLE_SHIFT 12 #define CSIAR_ADDR_MASK 0x0fff +#define CSIAR_FUNC_CARD 0x00000000 +#define CSIAR_FUNC_SDIO 0x00010000 +#define CSIAR_FUNC_NIC 0x00020000 PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -720,6 +735,11 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; + struct csi_ops { + void (*write)(void __iomem *, int, int); + u32 (*read)(void __iomem *, int); + } csi_ops; + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_settings)(struct net_device *, struct ethtool_cmd *); void (*phy_reset_enable)(struct rtl8169_private *tp); @@ -772,6 +792,8 @@ MODULE_FIRMWARE(FIRMWARE_8168E_3); MODULE_FIRMWARE(FIRMWARE_8105E_1); MODULE_FIRMWARE(FIRMWARE_8168F_1); MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); static void rtl_lock_work(struct rtl8169_private *tp) { @@ -1082,40 +1104,6 @@ static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) return value; } -static void rtl_csi_write(void __iomem *ioaddr, int addr, int value) -{ - unsigned int i; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - for (i = 0; i < 100; i++) { - if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) - break; - udelay(10); - } -} - -static u32 rtl_csi_read(void __iomem *ioaddr, int addr) -{ - u32 value = ~0x00; - unsigned int i; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - for (i = 0; i < 100; i++) { - if (RTL_R32(CSIAR) & CSIAR_FLAG) { - value = RTL_R32(CSIDR); - break; - } - udelay(10); - } - - return value; -} - static void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) { @@ -1285,7 +1273,8 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) if (!netif_running(dev)) return; - if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { if (RTL_R8(PHYstatus) & _1000bpsF) { rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); @@ -1320,6 +1309,16 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 0x0000003f, ERIAR_EXGMAC); } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (RTL_R8(PHYstatus) & _10bps) { + rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, + 0x4d02, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011, + 0x0060, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, + 0x0000, ERIAR_EXGMAC); + } } } @@ -1400,7 +1399,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) u16 reg; u8 mask; } cfg[] = { - { WAKE_ANY, Config1, PMEnable }, { WAKE_PHY, Config3, LinkUp }, { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, @@ -1408,16 +1406,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_MCAST, Config5, MWF }, { WAKE_ANY, Config5, LanWake } }; + u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + RTL_W8(Cfg9346, Cfg9346_Lock); } @@ -1857,6 +1871,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_strings = rtl8169_get_strings, .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -1880,6 +1895,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, int mac_version; } mac_info[] = { /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, @@ -1917,6 +1933,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, @@ -3017,6 +3034,28 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); } +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3058,24 +3097,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - /* For 4-corner performance improve */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); - - /* Improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168f_hw_phy_config(tp); /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); @@ -3088,23 +3110,104 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) { rtl_apply_firmware(tp); - /* For 4-corner performance improve */ + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); - /* Improve 10M EEE waveform */ + /* uc same-seed solution */ rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); rtl_writephy(tp, 0x1f, 0x0000); } @@ -3151,6 +3254,25 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + static void rtl_hw_phy_config(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -3239,6 +3361,14 @@ static void rtl_hw_phy_config(struct net_device *dev) rtl8168f_2_hw_phy_config(tp); break; + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + default: break; } @@ -3476,6 +3606,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3511,15 +3643,45 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) static void r810x_pll_power_down(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + if (rtl_wol_pll_power_down(tp)) return; r810x_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } } static void r810x_pll_power_up(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + r810x_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } } static void r8168_phy_power_up(struct rtl8169_private *tp) @@ -3623,13 +3785,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) && - r8168dp_check_dash(tp)) { - return; - } - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@ -3674,6 +3829,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_16: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_37: ops->down = r810x_pll_power_down; ops->up = r810x_pll_power_up; break; @@ -3698,6 +3854,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -3983,7 +4140,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) udelay(20); } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36) { + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) udelay(100); @@ -4189,22 +4348,141 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); } -static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + if (tp->csi_ops.write) + tp->csi_ops.write(tp->mmio_addr, addr, value); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + if (tp->csi_ops.read) + return tp->csi_ops.read(tp->mmio_addr, addr); + else + return ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) { u32 csi; - csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; - rtl_csi_write(ioaddr, 0x070c, csi | bits); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x17000000); +} + +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27000000); +} + +static void r8169_csi_write(void __iomem *ioaddr, int addr, int value) +{ + unsigned int i; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) + break; + udelay(10); + } +} + +static u32 r8169_csi_read(void __iomem *ioaddr, int addr) +{ + u32 value = ~0x00; + unsigned int i; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(CSIAR) & CSIAR_FLAG) { + value = RTL_R32(CSIDR); + break; + } + udelay(10); + } + + return value; +} + +static void r8402_csi_write(void __iomem *ioaddr, int addr, int value) +{ + unsigned int i; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) + break; + udelay(10); + } } -static void rtl_csi_access_enable_1(void __iomem *ioaddr) +static u32 r8402_csi_read(void __iomem *ioaddr, int addr) { - rtl_csi_access_enable(ioaddr, 0x17000000); + u32 value = ~0x00; + unsigned int i; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(CSIAR) & CSIAR_FLAG) { + value = RTL_R32(CSIDR); + break; + } + udelay(10); + } + + return value; } -static void rtl_csi_access_enable_2(void __iomem *ioaddr) +static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp) { - rtl_csi_access_enable(ioaddr, 0x27000000); + struct csi_ops *ops = &tp->csi_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + ops->write = NULL; + ops->read = NULL; + break; + + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + ops->write = r8402_csi_write; + ops->read = r8402_csi_read; + break; + + default: + ops->write = r8169_csi_write; + ops->read = r8169_csi_read; + break; + } } struct ephy_info { @@ -4261,8 +4539,11 @@ static void rtl_enable_clock_request(struct pci_dev *pdev) PktCntrDisable | \ Mac_dbgo_sel) -static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); @@ -4271,17 +4552,22 @@ static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); } -static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) { - rtl_hw_start_8168bb(ioaddr, pdev); + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8168bb(tp); RTL_W8(MaxTxPacketSize, TxPacketMax); RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); } -static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); @@ -4293,8 +4579,9 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168cp[] = { { 0x01, 0, 0x0001 }, { 0x02, 0x0800, 0x1000 }, @@ -4303,16 +4590,19 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); @@ -4321,9 +4611,12 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); @@ -4337,52 +4630,57 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168c_1[] = { { 0x02, 0x0800, 0x1000 }, { 0x03, 0, 0x0002 }, { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168c_2[] = { { 0x01, 0, 0x0001 }, { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) { - rtl_hw_start_8168c_2(ioaddr, pdev); + rtl_hw_start_8168c_2(tp); } -static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); rtl_disable_clock_request(pdev); @@ -4393,9 +4691,12 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_1(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -4404,8 +4705,10 @@ static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) rtl_disable_clock_request(pdev); } -static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { { 0x0b, ~0, 0x48 }, { 0x19, 0x20, 0x50 }, @@ -4413,7 +4716,7 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) }; int i; - rtl_csi_access_enable_1(ioaddr); + rtl_csi_access_enable_1(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -4430,8 +4733,10 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) rtl_enable_clock_request(pdev); } -static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_1[] = { { 0x00, 0x0200, 0x0100 }, { 0x00, 0x0000, 0x0004 }, @@ -4448,7 +4753,7 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); @@ -4465,14 +4770,16 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); } -static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(ioaddr); + rtl_csi_access_enable_1(tp); rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); @@ -4503,18 +4810,12 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); } -static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - static const struct ephy_info e_info_8168f_1[] = { - { 0x06, 0x00c0, 0x0020 }, - { 0x08, 0x0001, 0x0002 }, - { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } - }; - - rtl_csi_access_enable_1(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; - rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + rtl_csi_access_enable_2(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -4528,8 +4829,6 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, - ERIAR_EXGMAC); RTL_W8(MaxTxPacketSize, EarlySize); @@ -4537,20 +4836,54 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, + ERIAR_EXGMAC); /* Adjust EEE LED frequency */ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); +} - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, + ERIAR_EXGMAC); } static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(Cfg9346, Cfg9346_Unlock); @@ -4581,67 +4914,71 @@ static void rtl_hw_start_8168(struct net_device *dev) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: - rtl_hw_start_8168bb(ioaddr, pdev); + rtl_hw_start_8168bb(tp); break; case RTL_GIGA_MAC_VER_12: case RTL_GIGA_MAC_VER_17: - rtl_hw_start_8168bef(ioaddr, pdev); + rtl_hw_start_8168bef(tp); break; case RTL_GIGA_MAC_VER_18: - rtl_hw_start_8168cp_1(ioaddr, pdev); + rtl_hw_start_8168cp_1(tp); break; case RTL_GIGA_MAC_VER_19: - rtl_hw_start_8168c_1(ioaddr, pdev); + rtl_hw_start_8168c_1(tp); break; case RTL_GIGA_MAC_VER_20: - rtl_hw_start_8168c_2(ioaddr, pdev); + rtl_hw_start_8168c_2(tp); break; case RTL_GIGA_MAC_VER_21: - rtl_hw_start_8168c_3(ioaddr, pdev); + rtl_hw_start_8168c_3(tp); break; case RTL_GIGA_MAC_VER_22: - rtl_hw_start_8168c_4(ioaddr, pdev); + rtl_hw_start_8168c_4(tp); break; case RTL_GIGA_MAC_VER_23: - rtl_hw_start_8168cp_2(ioaddr, pdev); + rtl_hw_start_8168cp_2(tp); break; case RTL_GIGA_MAC_VER_24: - rtl_hw_start_8168cp_3(ioaddr, pdev); + rtl_hw_start_8168cp_3(tp); break; case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_27: - rtl_hw_start_8168d(ioaddr, pdev); + rtl_hw_start_8168d(tp); break; case RTL_GIGA_MAC_VER_28: - rtl_hw_start_8168d_4(ioaddr, pdev); + rtl_hw_start_8168d_4(tp); break; case RTL_GIGA_MAC_VER_31: - rtl_hw_start_8168dp(ioaddr, pdev); + rtl_hw_start_8168dp(tp); break; case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: - rtl_hw_start_8168e_1(ioaddr, pdev); + rtl_hw_start_8168e_1(tp); break; case RTL_GIGA_MAC_VER_34: - rtl_hw_start_8168e_2(ioaddr, pdev); + rtl_hw_start_8168e_2(tp); break; case RTL_GIGA_MAC_VER_35: case RTL_GIGA_MAC_VER_36: - rtl_hw_start_8168f_1(ioaddr, pdev); + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); break; default: @@ -4668,8 +5005,10 @@ static void rtl_hw_start_8168(struct net_device *dev) PktCntrDisable | \ Mac_dbgo_sel) -static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8102e_1[] = { { 0x01, 0, 0x6e65 }, { 0x02, 0, 0x091f }, @@ -4682,7 +5021,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) }; u8 cfg1; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); RTL_W8(DBG_REG, FIX_NAK_1); @@ -4699,9 +5038,12 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } -static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -4709,15 +5051,16 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); } -static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) { - rtl_hw_start_8102e_2(ioaddr, pdev); + rtl_hw_start_8102e_2(tp); - rtl_ephy_write(ioaddr, 0x03, 0xc2f9); + rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9); } -static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8105e_1[] = { { 0x07, 0, 0x4000 }, { 0x19, 0, 0x0200 }, @@ -4741,12 +5084,44 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); } -static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) { - rtl_hw_start_8105e_1(ioaddr, pdev); + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8105e_1(tp); rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); } +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_csi_access_enable_2(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, + ERIAR_EXGMAC); +} + static void rtl_hw_start_8101(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -4770,22 +5145,26 @@ static void rtl_hw_start_8101(struct net_device *dev) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: - rtl_hw_start_8102e_1(ioaddr, pdev); + rtl_hw_start_8102e_1(tp); break; case RTL_GIGA_MAC_VER_08: - rtl_hw_start_8102e_3(ioaddr, pdev); + rtl_hw_start_8102e_3(tp); break; case RTL_GIGA_MAC_VER_09: - rtl_hw_start_8102e_2(ioaddr, pdev); + rtl_hw_start_8102e_2(tp); break; case RTL_GIGA_MAC_VER_29: - rtl_hw_start_8105e_1(ioaddr, pdev); + rtl_hw_start_8105e_1(tp); break; case RTL_GIGA_MAC_VER_30: - rtl_hw_start_8105e_2(ioaddr, pdev); + rtl_hw_start_8105e_2(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); break; } @@ -6182,6 +6561,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_mdio_ops(tp); rtl_init_pll_power_ops(tp); rtl_init_jumbo_ops(tp); + rtl_init_csi_ops(tp); rtl8169_print_mac_version(tp); diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 3fb2355af37..46df3a04030 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -4,11 +4,11 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" - depends on SUPERH && \ + depends on (SUPERH || ARCH_SHMOBILE) && \ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757) + CPU_SUBTYPE_SH7757 || ARCH_R8A7740) select CRC32 select NET_CORE select MII @@ -17,4 +17,5 @@ config SH_ETH ---help--- Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757. + - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, + and R8A7740. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d63e09b29a9..be3c2217916 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -386,6 +386,114 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev) sh_eth_write(ndev, 0x0, CSMR); } +#elif defined(CONFIG_ARCH_R8A7740) +#define SH_ETH_HAS_TSU 1 +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long mii; + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); + + switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_GMII: + mii = 2; + break; + case PHY_INTERFACE_MODE_MII: + mii = 1; + break; + case PHY_INTERFACE_MODE_RMII: + default: + mii = 0; + break; + } + sh_eth_write(ndev, mii, RMII_MII); +} + +static void sh_eth_reset(struct net_device *ndev) +{ + int cnt = 100; + + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); + while (cnt > 0) { + if (!(sh_eth_read(ndev, EDMR) & 0x3)) + break; + mdelay(1); + cnt--; + } + if (cnt == 0) + printk(KERN_ERR "Device reset fail\n"); + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); +} + +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + default: + break; + } +} + +/* R8A7740 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, +}; + #elif defined(CONFIG_CPU_SUBTYPE_SH7619) #define SH_ETH_RESET_DEFAULT 1 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { @@ -443,7 +551,7 @@ static void sh_eth_reset(struct net_device *ndev) } #endif -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static void sh_eth_set_receive_align(struct sk_buff *skb) { int reserve; @@ -919,6 +1027,10 @@ static int sh_eth_rx(struct net_device *ndev) desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; +#if defined(CONFIG_ARCH_R8A7740) + desc_status >>= 16; +#endif + if (--boguscnt < 0) break; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 0fa14afce23..57b8e1fc5d1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -372,7 +372,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { }; /* Driver's parameters */ -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) #define SH4_SKB_RX_ALIGN 32 #else #define SH2_SH3_SKB_RX_ALIGN 2 @@ -381,7 +381,8 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { /* * Register's bits */ -#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) +#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\ + defined(CONFIG_ARCH_R8A7740) /* EDSR */ enum EDSR_BIT { EDSR_ENT = 0x01, EDSR_ENR = 0x02, diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index 1895605abb3..8e9fda0c7ae 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c @@ -937,7 +937,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev) do { unsigned long flags; spin_lock_irqsave(&pd->lock, flags); - for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) + for (i = 0; i < ARRAY_SIZE(pd->stats); i++) pd->stats[i] = pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); s6gmac_stats_collect(pd, &statinf[0][0]); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4a0005342e6..b95f2e1b33f 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -656,25 +656,30 @@ static void efx_stop_datapath(struct efx_nic *efx) struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; + struct pci_dev *dev = efx->pci_dev; int rc; EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); - rc = efx_nic_flush_queues(efx); - if (rc && EFX_WORKAROUND_7803(efx)) { - /* Schedule a reset to recover from the flush failure. The - * descriptor caches reference memory we're about to free, - * but falcon_reconfigure_mac_wrapper() won't reconnect - * the MACs because of the pending reset. */ - netif_err(efx, drv, efx->net_dev, - "Resetting to recover from flush failure\n"); - efx_schedule_reset(efx, RESET_TYPE_ALL); - } else if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); - } else { - netif_dbg(efx, drv, efx->net_dev, - "successfully flushed all queues\n"); + /* Only perform flush if dma is enabled */ + if (dev->is_busmaster) { + rc = efx_nic_flush_queues(efx); + + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } } efx_for_each_channel(channel, efx) { @@ -2492,8 +2497,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); - pci_set_drvdata(pci_dev, NULL); efx_fini_struct(efx); + pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); }; @@ -2695,6 +2700,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, fail2: efx_fini_struct(efx); fail1: + pci_set_drvdata(pci_dev, NULL); WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index f22f45f515a..03ded364c8d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1023,7 +1023,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return -EINVAL; /* Is it a default UC or MC filter? */ - if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) && + if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && vlan_tag_mask == 0) { if (is_multicast_ether_addr(mac_entry->h_dest)) rc = efx_filter_set_mc_def(&spec); @@ -1108,6 +1108,39 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, return 0; } +static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_eeprom) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_eeprom(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +static int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_info) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_info(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} + const struct ethtool_ops efx_ethtool_ops = { .get_settings = efx_ethtool_get_settings, .set_settings = efx_ethtool_set_settings, @@ -1137,4 +1170,6 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, + .get_module_info = efx_ethtool_get_module_info, + .get_module_eeprom = efx_ethtool_get_module_eeprom, }; diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c index 7bcad899a93..13cb40fe90c 100644 --- a/drivers/net/ethernet/sfc/mcdi_phy.c +++ b/drivers/net/ethernet/sfc/mcdi_phy.c @@ -739,6 +739,80 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, return NULL; } +#define SFP_PAGE_SIZE 128 +#define SFP_NUM_PAGES 2 +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX]; + u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN]; + size_t outlen; + int rc; + unsigned int payload_len; + unsigned int space_remaining = ee->len; + unsigned int page; + unsigned int page_off; + unsigned int to_copy; + u8 *user_data = data; + + BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + + page_off = ee->offset % SFP_PAGE_SIZE; + page = ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < SFP_NUM_PAGES)) { + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, + GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + /* Copy as much as we can into data */ + payload_len -= page_off; + to_copy = (space_remaining < payload_len) ? + space_remaining : payload_len; + + memcpy(user_data, + outbuf + page_off + + MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST, + to_copy); + + space_remaining -= to_copy; + user_data += to_copy; + page_off = 0; + page++; + } + + return 0; +} + +static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + switch (phy_cfg->media) { + case MC_CMD_MEDIA_SFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + return 0; + default: + return -EOPNOTSUPP; + } +} + const struct efx_phy_operations efx_mcdi_phy_ops = { .probe = efx_mcdi_phy_probe, .init = efx_port_dummy_op_int, @@ -751,4 +825,6 @@ const struct efx_phy_operations efx_mcdi_phy_ops = { .test_alive = efx_mcdi_phy_test_alive, .run_tests = efx_mcdi_phy_run_tests, .test_name = efx_mcdi_phy_test_name, + .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, + .get_module_info = efx_mcdi_phy_get_module_info, }; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f0385e1fb2d..0e575359af1 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -252,8 +252,6 @@ struct efx_rx_page_state { * @max_fill: RX descriptor maximum fill level (<= ring size) * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill * (<= @max_fill) - * @fast_fill_limit: The level to which a fast fill will fill - * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) * @min_fill: RX descriptor minimum non-zero fill level. * This records the minimum fill level observed when a ring * refill was triggered. @@ -274,7 +272,6 @@ struct efx_rx_queue { int removed_count; unsigned int max_fill; unsigned int fast_fill_trigger; - unsigned int fast_fill_limit; unsigned int min_fill; unsigned int min_overfill; unsigned int alloc_page_count; @@ -522,6 +519,11 @@ struct efx_phy_operations { int (*test_alive) (struct efx_nic *efx); const char *(*test_name) (struct efx_nic *efx, unsigned int index); int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); + int (*get_module_eeprom) (struct efx_nic *efx, + struct ethtool_eeprom *ee, + u8 *data); + int (*get_module_info) (struct efx_nic *efx, + struct ethtool_modinfo *modinfo); }; /** diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c index 8a7caf88ffb..326a28637f3 100644 --- a/drivers/net/ethernet/sfc/qt202x_phy.c +++ b/drivers/net/ethernet/sfc/qt202x_phy.c @@ -449,6 +449,37 @@ static void qt202x_phy_remove(struct efx_nic *efx) efx->phy_data = NULL; } +static int qt202x_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + return 0; +} + +static int qt202x_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int mmd, reg_base, rc, i; + + if (efx->phy_type == PHY_TYPE_QT2025C) { + mmd = MDIO_MMD_PCS; + reg_base = 0xd000; + } else { + mmd = MDIO_MMD_PMAPMD; + reg_base = 0x8007; + } + + for (i = 0; i < ee->len; i++) { + rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i); + if (rc < 0) + return rc; + data[i] = rc; + } + + return 0; +} + const struct efx_phy_operations falcon_qt202x_phy_ops = { .probe = qt202x_phy_probe, .init = qt202x_phy_init, @@ -459,4 +490,6 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = { .get_settings = qt202x_phy_get_settings, .set_settings = efx_mdio_set_settings, .test_alive = efx_mdio_test_alive, + .get_module_eeprom = qt202x_phy_get_module_eeprom, + .get_module_info = qt202x_phy_get_module_info, }; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 763fa2fe1a3..243e91f3dff 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -76,12 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; /* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring. */ -static unsigned int rx_refill_threshold = 90; - -/* This is the percentage fill level to which an RX queue will be refilled - * when the "RX refill threshold" is reached. - */ -static unsigned int rx_refill_limit = 95; +static unsigned int rx_refill_threshold; /* * RX maximum head room required. @@ -342,7 +337,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * This will aim to fill the RX descriptor queue up to - * @rx_queue->@fast_fill_limit. If there is insufficient atomic + * @rx_queue->@max_fill. If there is insufficient atomic * memory to do so, a slow fill will be scheduled. * * The caller must provide serialisation (none is used here). In practise, @@ -367,15 +362,14 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) rx_queue->min_fill = fill_level; } - space = rx_queue->fast_fill_limit - fill_level; - if (space < EFX_RX_BATCH) - goto out; + space = rx_queue->max_fill - fill_level; + EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" " level %d to level %d using %s allocation\n", efx_rx_queue_index(rx_queue), fill_level, - rx_queue->fast_fill_limit, + rx_queue->max_fill, channel->rx_alloc_push_pages ? "page" : "skb"); do { @@ -681,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) void efx_init_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; - unsigned int max_fill, trigger, limit; + unsigned int max_fill, trigger, max_trigger; netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); @@ -694,12 +688,17 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) /* Initialise limit fields */ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; - trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; - limit = max_fill * min(rx_refill_limit, 100U) / 100U; + max_trigger = max_fill - EFX_RX_BATCH; + if (rx_refill_threshold != 0) { + trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; + if (trigger > max_trigger) + trigger = max_trigger; + } else { + trigger = max_trigger; + } rx_queue->max_fill = max_fill; rx_queue->fast_fill_trigger = trigger; - rx_queue->fast_fill_limit = limit; /* Set up RX descriptor ring */ rx_queue->enabled = true; @@ -746,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); module_param(rx_refill_threshold, uint, 0444); MODULE_PARM_DESC(rx_refill_threshold, - "RX descriptor ring fast/slow fill threshold (%)"); + "RX descriptor ring refill threshold (%)"); diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index a284d644053..32e55664df6 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -39,9 +39,7 @@ #define SC92031_NAME "sc92031" /* BAR 0 is MMIO, BAR 1 is PIO */ -#ifndef SC92031_USE_BAR -#define SC92031_USE_BAR 0 -#endif +#define SC92031_USE_PIO 0 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ static int multicast_filter_limit = 64; @@ -366,7 +364,7 @@ static void sc92031_disable_interrupts(struct net_device *dev) mmiowb(); /* wait for any concurrent interrupt/tasklet to finish */ - synchronize_irq(dev->irq); + synchronize_irq(priv->pdev->irq); tasklet_disable(&priv->tasklet); } @@ -1114,10 +1112,13 @@ static void sc92031_tx_timeout(struct net_device *dev) #ifdef CONFIG_NET_POLL_CONTROLLER static void sc92031_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) + struct sc92031_priv *priv = netdev_priv(dev); + const int irq = priv->pdev->irq; + + disable_irq(irq); + if (sc92031_interrupt(irq, dev) != IRQ_NONE) sc92031_tasklet((unsigned long)dev); - enable_irq(dev->irq); + enable_irq(irq); } #endif @@ -1402,7 +1403,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, struct net_device *dev; struct sc92031_priv *priv; u32 mac0, mac1; - unsigned long base_addr; err = pci_enable_device(pdev); if (unlikely(err < 0)) @@ -1422,7 +1422,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, if (unlikely(err < 0)) goto out_request_regions; - port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); + port_base = pci_iomap(pdev, SC92031_USE_PIO, 0); if (unlikely(!port_base)) { err = -EIO; goto out_iomap; @@ -1437,14 +1437,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); -#if SC92031_USE_BAR == 0 - dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR); - dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR); -#elif SC92031_USE_BAR == 1 - dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR); -#endif - dev->irq = pdev->irq; - /* faked with skb_copy_and_csum_dev */ dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; @@ -1478,13 +1470,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, if (err < 0) goto out_register_netdev; -#if SC92031_USE_BAR == 0 - base_addr = dev->mem_start; -#elif SC92031_USE_BAR == 1 - base_addr = dev->base_addr; -#endif printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, - base_addr, dev->dev_addr, dev->irq); + (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr, + pdev->irq); return 0; diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index a9deda8eaf6..4613591b43e 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -729,7 +729,7 @@ static void sis190_tx_interrupt(struct net_device *dev, * The interrupt handler does all of the Rx thread work and cleans up after * the Tx thread. */ -static irqreturn_t sis190_interrupt(int irq, void *__dev) +static irqreturn_t sis190_irq(int irq, void *__dev) { struct net_device *dev = __dev; struct sis190_private *tp = netdev_priv(dev); @@ -772,11 +772,11 @@ out: static void sis190_netpoll(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; + const int irq = tp->pci_dev->irq; - disable_irq(pdev->irq); - sis190_interrupt(pdev->irq, dev); - enable_irq(pdev->irq); + disable_irq(irq); + sis190_irq(irq, dev); + enable_irq(irq); } #endif @@ -1085,7 +1085,7 @@ static int sis190_open(struct net_device *dev) sis190_request_timer(dev); - rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); + rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev); if (rc < 0) goto err_release_timer_2; @@ -1097,11 +1097,9 @@ err_release_timer_2: sis190_delete_timer(dev); sis190_rx_clear(tp); err_free_rx_1: - pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, - tp->rx_dma); + pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); err_free_tx_0: - pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, - tp->tx_dma); + pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); goto out; } @@ -1141,7 +1139,7 @@ static void sis190_down(struct net_device *dev) spin_unlock_irq(&tp->lock); - synchronize_irq(dev->irq); + synchronize_irq(tp->pci_dev->irq); if (!poll_locked) poll_locked++; @@ -1161,7 +1159,7 @@ static int sis190_close(struct net_device *dev) sis190_down(dev); - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); @@ -1884,8 +1882,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, dev->netdev_ops = &sis190_netdev_ops; SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); - dev->irq = pdev->irq; - dev->base_addr = (unsigned long) 0xdead; dev->watchdog_timeo = SIS190_TX_TIMEOUT; spin_lock_init(&tp->lock); @@ -1902,7 +1898,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", pci_name(pdev), sis_chip_info[ent->driver_data].name, - ioaddr, dev->irq, dev->dev_addr); + ioaddr, pdev->irq, dev->dev_addr); netdev_info(dev, "%s mode.\n", (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 5ccf02e7e3a..203d9c6ec23 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -168,6 +168,8 @@ struct sis900_private { unsigned int cur_phy; struct mii_if_info mii_info; + void __iomem *ioaddr; + struct timer_list timer; /* Link status detection timer. */ u8 autong_complete; /* 1: auto-negotiate complete */ @@ -201,13 +203,18 @@ MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtere MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt"); MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level"); +#define sw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define sw8(reg, val) iowrite8(val, ioaddr + (reg)) +#define sr32(reg) ioread32(ioaddr + (reg)) +#define sr16(reg) ioread16(ioaddr + (reg)) + #ifdef CONFIG_NET_POLL_CONTROLLER static void sis900_poll(struct net_device *dev); #endif static int sis900_open(struct net_device *net_dev); static int sis900_mii_probe (struct net_device * net_dev); static void sis900_init_rxfilter (struct net_device * net_dev); -static u16 read_eeprom(long ioaddr, int location); +static u16 read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *net_dev, int phy_id, int location); static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); static void sis900_timer(unsigned long data); @@ -231,7 +238,7 @@ static u16 sis900_default_phy(struct net_device * net_dev); static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy); static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr); static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr); -static void sis900_set_mode (long ioaddr, int speed, int duplex); +static void sis900_set_mode(struct sis900_private *, int speed, int duplex); static const struct ethtool_ops sis900_ethtool_ops; /** @@ -246,7 +253,8 @@ static const struct ethtool_ops sis900_ethtool_ops; static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) { - long ioaddr = pci_resource_start(pci_dev, 0); + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; u16 signature; int i; @@ -325,29 +333,30 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) { - long ioaddr = net_dev->base_addr; + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; u32 rfcrSave; u32 i; - rfcrSave = inl(rfcr + ioaddr); + rfcrSave = sr32(rfcr); - outl(rfcrSave | RELOAD, ioaddr + cr); - outl(0, ioaddr + cr); + sw32(cr, rfcrSave | RELOAD); + sw32(cr, 0); /* disable packet filtering before setting filter */ - outl(rfcrSave & ~RFEN, rfcr + ioaddr); + sw32(rfcr, rfcrSave & ~RFEN); /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { - outl((i << RFADDR_shift), ioaddr + rfcr); - *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); + sw32(rfcr, (i << RFADDR_shift)); + *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr); } /* Store MAC Address in perm_addr */ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); /* enable packet filtering */ - outl(rfcrSave | RFEN, rfcr + ioaddr); + sw32(rfcr, rfcrSave | RFEN); return 1; } @@ -371,31 +380,30 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) { - long ioaddr = net_dev->base_addr; - long ee_addr = ioaddr + mear; - u32 waittime = 0; - int i; + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int wait, rc = 0; - outl(EEREQ, ee_addr); - while(waittime < 2000) { - if(inl(ee_addr) & EEGNT) { + sw32(mear, EEREQ); + for (wait = 0; wait < 2000; wait++) { + if (sr32(mear) & EEGNT) { + u16 *mac = (u16 *)net_dev->dev_addr; + int i; /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) - ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); /* Store MAC Address in perm_addr */ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); - outl(EEDONE, ee_addr); - return 1; - } else { - udelay(1); - waittime ++; + rc = 1; + break; } + udelay(1); } - outl(EEDONE, ee_addr); - return 0; + sw32(mear, EEDONE); + return rc; } static const struct net_device_ops sis900_netdev_ops = { @@ -433,7 +441,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, struct pci_dev *dev; dma_addr_t ring_dma; void *ring_space; - long ioaddr; + void __iomem *ioaddr; int i, ret; const char *card_name = card_names[pci_id->driver_data]; const char *dev_name = pci_name(pci_dev); @@ -464,14 +472,17 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, SET_NETDEV_DEV(net_dev, &pci_dev->dev); /* We do a request_region() to register /proc/ioports info. */ - ioaddr = pci_resource_start(pci_dev, 0); ret = pci_request_regions(pci_dev, "sis900"); if (ret) goto err_out; + /* IO region. */ + ioaddr = pci_iomap(pci_dev, 0, 0); + if (!ioaddr) + goto err_out_cleardev; + sis_priv = netdev_priv(net_dev); - net_dev->base_addr = ioaddr; - net_dev->irq = pci_dev->irq; + sis_priv->ioaddr = ioaddr; sis_priv->pci_dev = pci_dev; spin_lock_init(&sis_priv->lock); @@ -480,7 +491,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) { ret = -ENOMEM; - goto err_out_cleardev; + goto err_out_unmap; } sis_priv->tx_ring = ring_space; sis_priv->tx_ring_dma = ring_dma; @@ -534,7 +545,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, /* 630ET : set the mii access mode as software-mode */ if (sis_priv->chipset_rev == SIS630ET_900_REV) - outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr); + sw32(cr, ACCESSMODE | sr32(cr)); /* probe for mii transceiver */ if (sis900_mii_probe(net_dev) == 0) { @@ -556,25 +567,27 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, goto err_unmap_rx; /* print some information about our NIC */ - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", - net_dev->name, card_name, ioaddr, net_dev->irq, + printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n", + net_dev->name, card_name, ioaddr, pci_dev->irq, net_dev->dev_addr); /* Detect Wake on Lan support */ - ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; + ret = (sr32(CFGPMC) & PMESP) >> 27; if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); return 0; - err_unmap_rx: +err_unmap_rx: pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, sis_priv->rx_ring_dma); - err_unmap_tx: +err_unmap_tx: pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, sis_priv->tx_ring_dma); - err_out_cleardev: - pci_set_drvdata(pci_dev, NULL); +err_out_unmap: + pci_iounmap(pci_dev, ioaddr); +err_out_cleardev: + pci_set_drvdata(pci_dev, NULL); pci_release_regions(pci_dev); err_out: free_netdev(net_dev); @@ -798,7 +811,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph /* Delay between EEPROM clock transitions. */ -#define eeprom_delay() inl(ee_addr) +#define eeprom_delay() sr32(mear) /** * read_eeprom - Read Serial EEPROM @@ -809,41 +822,41 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph * Note that location is in word (16 bits) unit */ -static u16 __devinit read_eeprom(long ioaddr, int location) +static u16 __devinit read_eeprom(void __iomem *ioaddr, int location) { + u32 read_cmd = location | EEread; int i; u16 retval = 0; - long ee_addr = ioaddr + mear; - u32 read_cmd = location | EEread; - outl(0, ee_addr); + sw32(mear, 0); eeprom_delay(); - outl(EECS, ee_addr); + sw32(mear, EECS); eeprom_delay(); /* Shift the read command (9) bits out. */ for (i = 8; i >= 0; i--) { u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS; - outl(dataval, ee_addr); + + sw32(mear, dataval); eeprom_delay(); - outl(dataval | EECLK, ee_addr); + sw32(mear, dataval | EECLK); eeprom_delay(); } - outl(EECS, ee_addr); + sw32(mear, EECS); eeprom_delay(); /* read the 16-bits data in */ for (i = 16; i > 0; i--) { - outl(EECS, ee_addr); + sw32(mear, EECS); eeprom_delay(); - outl(EECS | EECLK, ee_addr); + sw32(mear, EECS | EECLK); eeprom_delay(); - retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0); + retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0); eeprom_delay(); } /* Terminate the EEPROM access. */ - outl(0, ee_addr); + sw32(mear, 0); eeprom_delay(); return retval; @@ -852,24 +865,27 @@ static u16 __devinit read_eeprom(long ioaddr, int location) /* Read and write the MII management registers using software-generated serial MDIO protocol. Note that the command bits and data bits are send out separately */ -#define mdio_delay() inl(mdio_addr) +#define mdio_delay() sr32(mear) -static void mdio_idle(long mdio_addr) +static void mdio_idle(struct sis900_private *sp) { - outl(MDIO | MDDIR, mdio_addr); + void __iomem *ioaddr = sp->ioaddr; + + sw32(mear, MDIO | MDDIR); mdio_delay(); - outl(MDIO | MDDIR | MDC, mdio_addr); + sw32(mear, MDIO | MDDIR | MDC); } -/* Syncronize the MII management interface by shifting 32 one bits out. */ -static void mdio_reset(long mdio_addr) +/* Synchronize the MII management interface by shifting 32 one bits out. */ +static void mdio_reset(struct sis900_private *sp) { + void __iomem *ioaddr = sp->ioaddr; int i; for (i = 31; i >= 0; i--) { - outl(MDDIR | MDIO, mdio_addr); + sw32(mear, MDDIR | MDIO); mdio_delay(); - outl(MDDIR | MDIO | MDC, mdio_addr); + sw32(mear, MDDIR | MDIO | MDC); mdio_delay(); } } @@ -887,31 +903,33 @@ static void mdio_reset(long mdio_addr) static int mdio_read(struct net_device *net_dev, int phy_id, int location) { - long mdio_addr = net_dev->base_addr + mear; int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift); + struct sis900_private *sp = netdev_priv(net_dev); + void __iomem *ioaddr = sp->ioaddr; u16 retval = 0; int i; - mdio_reset(mdio_addr); - mdio_idle(mdio_addr); + mdio_reset(sp); + mdio_idle(sp); for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; - outl(dataval, mdio_addr); + + sw32(mear, dataval); mdio_delay(); - outl(dataval | MDC, mdio_addr); + sw32(mear, dataval | MDC); mdio_delay(); } /* Read the 16 data bits. */ for (i = 16; i > 0; i--) { - outl(0, mdio_addr); + sw32(mear, 0); mdio_delay(); - retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0); - outl(MDC, mdio_addr); + retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0); + sw32(mear, MDC); mdio_delay(); } - outl(0x00, mdio_addr); + sw32(mear, 0x00); return retval; } @@ -931,19 +949,21 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location) static void mdio_write(struct net_device *net_dev, int phy_id, int location, int value) { - long mdio_addr = net_dev->base_addr + mear; int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift); + struct sis900_private *sp = netdev_priv(net_dev); + void __iomem *ioaddr = sp->ioaddr; int i; - mdio_reset(mdio_addr); - mdio_idle(mdio_addr); + mdio_reset(sp); + mdio_idle(sp); /* Shift the command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; - outb(dataval, mdio_addr); + + sw8(mear, dataval); mdio_delay(); - outb(dataval | MDC, mdio_addr); + sw8(mear, dataval | MDC); mdio_delay(); } mdio_delay(); @@ -951,21 +971,22 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location, /* Shift the value bits out. */ for (i = 15; i >= 0; i--) { int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR; - outl(dataval, mdio_addr); + + sw32(mear, dataval); mdio_delay(); - outl(dataval | MDC, mdio_addr); + sw32(mear, dataval | MDC); mdio_delay(); } mdio_delay(); /* Clear out extra bits. */ for (i = 2; i > 0; i--) { - outb(0, mdio_addr); + sw8(mear, 0); mdio_delay(); - outb(MDC, mdio_addr); + sw8(mear, MDC); mdio_delay(); } - outl(0x00, mdio_addr); + sw32(mear, 0x00); } @@ -1000,9 +1021,12 @@ static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) */ static void sis900_poll(struct net_device *dev) { - disable_irq(dev->irq); - sis900_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct sis900_private *sp = netdev_priv(dev); + const int irq = sp->pci_dev->irq; + + disable_irq(irq); + sis900_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -1018,7 +1042,7 @@ static int sis900_open(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; int ret; /* Soft reset the chip. */ @@ -1027,8 +1051,8 @@ sis900_open(struct net_device *net_dev) /* Equalizer workaround Rule */ sis630_set_eq(net_dev, sis_priv->chipset_rev); - ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED, - net_dev->name, net_dev); + ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED, + net_dev->name, net_dev); if (ret) return ret; @@ -1042,12 +1066,12 @@ sis900_open(struct net_device *net_dev) netif_start_queue(net_dev); /* Workaround for EDB */ - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); - outl(RxENA | inl(ioaddr + cr), ioaddr + cr); - outl(IE, ioaddr + ier); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(cr, RxENA | sr32(cr)); + sw32(ier, IE); sis900_check_mode(net_dev, sis_priv->mii); @@ -1074,31 +1098,30 @@ static void sis900_init_rxfilter (struct net_device * net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; u32 rfcrSave; u32 i; - rfcrSave = inl(rfcr + ioaddr); + rfcrSave = sr32(rfcr); /* disable packet filtering before setting filter */ - outl(rfcrSave & ~RFEN, rfcr + ioaddr); + sw32(rfcr, rfcrSave & ~RFEN); /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { - u32 w; + u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i); - w = (u32) *((u16 *)(net_dev->dev_addr)+i); - outl((i << RFADDR_shift), ioaddr + rfcr); - outl(w, ioaddr + rfdr); + sw32(rfcr, i << RFADDR_shift); + sw32(rfdr, w); if (netif_msg_hw(sis_priv)) { printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n", - net_dev->name, i, inl(ioaddr + rfdr)); + net_dev->name, i, sr32(rfdr)); } } /* enable packet filtering */ - outl(rfcrSave | RFEN, rfcr + ioaddr); + sw32(rfcr, rfcrSave | RFEN); } /** @@ -1112,7 +1135,7 @@ static void sis900_init_tx_ring(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; int i; sis_priv->tx_full = 0; @@ -1128,10 +1151,10 @@ sis900_init_tx_ring(struct net_device *net_dev) } /* load Transmit Descriptor Register */ - outl(sis_priv->tx_ring_dma, ioaddr + txdp); + sw32(txdp, sis_priv->tx_ring_dma); if (netif_msg_hw(sis_priv)) printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n", - net_dev->name, inl(ioaddr + txdp)); + net_dev->name, sr32(txdp)); } /** @@ -1146,7 +1169,7 @@ static void sis900_init_rx_ring(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; int i; sis_priv->cur_rx = 0; @@ -1181,10 +1204,10 @@ sis900_init_rx_ring(struct net_device *net_dev) sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); /* load Receive Descriptor Register */ - outl(sis_priv->rx_ring_dma, ioaddr + rxdp); + sw32(rxdp, sis_priv->rx_ring_dma); if (netif_msg_hw(sis_priv)) printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n", - net_dev->name, inl(ioaddr + rxdp)); + net_dev->name, sr32(rxdp)); } /** @@ -1298,7 +1321,7 @@ static void sis900_timer(unsigned long data) sis900_read_mode(net_dev, &speed, &duplex); if (duplex){ - sis900_set_mode(net_dev->base_addr, speed, duplex); + sis900_set_mode(sis_priv, speed, duplex); sis630_set_eq(net_dev, sis_priv->chipset_rev); netif_start_queue(net_dev); } @@ -1359,25 +1382,25 @@ static void sis900_timer(unsigned long data) static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; int speed, duplex; if (mii_phy->phy_types == LAN) { - outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg); + sw32(cfg, ~EXD & sr32(cfg)); sis900_set_capability(net_dev , mii_phy); sis900_auto_negotiate(net_dev, sis_priv->cur_phy); } else { - outl(EXD | inl(ioaddr + cfg), ioaddr + cfg); + sw32(cfg, EXD | sr32(cfg)); speed = HW_SPEED_HOME; duplex = FDX_CAPABLE_HALF_SELECTED; - sis900_set_mode(ioaddr, speed, duplex); + sis900_set_mode(sis_priv, speed, duplex); sis_priv->autong_complete = 1; } } /** * sis900_set_mode - Set the media mode of mac register. - * @ioaddr: the address of the device + * @sp: the device private data * @speed : the transmit speed to be determined * @duplex: the duplex mode to be determined * @@ -1388,11 +1411,12 @@ static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_ph * double words. */ -static void sis900_set_mode (long ioaddr, int speed, int duplex) +static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex) { + void __iomem *ioaddr = sp->ioaddr; u32 tx_flags = 0, rx_flags = 0; - if (inl(ioaddr + cfg) & EDB_MASTER_EN) { + if (sr32( cfg) & EDB_MASTER_EN) { tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | (TX_FILL_THRESH << TxFILLT_shift); rx_flags = DMA_BURST_64 << RxMXDMA_shift; @@ -1420,8 +1444,8 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex) rx_flags |= RxAJAB; #endif - outl (tx_flags, ioaddr + txcfg); - outl (rx_flags, ioaddr + rxcfg); + sw32(txcfg, tx_flags); + sw32(rxcfg, rx_flags); } /** @@ -1528,16 +1552,17 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex static void sis900_tx_timeout(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; unsigned long flags; int i; - if(netif_msg_tx_err(sis_priv)) + if (netif_msg_tx_err(sis_priv)) { printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", - net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); + net_dev->name, sr32(cr), sr32(isr)); + } /* Disable interrupts by clearing the interrupt mask. */ - outl(0x0000, ioaddr + imr); + sw32(imr, 0x0000); /* use spinlock to prevent interrupt handler accessing buffer ring */ spin_lock_irqsave(&sis_priv->lock, flags); @@ -1566,10 +1591,10 @@ static void sis900_tx_timeout(struct net_device *net_dev) net_dev->trans_start = jiffies; /* prevent tx timeout */ /* load Transmit Descriptor Register */ - outl(sis_priv->tx_ring_dma, ioaddr + txdp); + sw32(txdp, sis_priv->tx_ring_dma); /* Enable all known interrupts by setting the interrupt mask. */ - outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); } /** @@ -1586,7 +1611,7 @@ static netdev_tx_t sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; unsigned int entry; unsigned long flags; unsigned int index_cur_tx, index_dirty_tx; @@ -1608,7 +1633,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); - outl(TxENA | inl(ioaddr + cr), ioaddr + cr); + sw32(cr, TxENA | sr32(cr)); sis_priv->cur_tx ++; index_cur_tx = sis_priv->cur_tx; @@ -1654,14 +1679,14 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) struct net_device *net_dev = dev_instance; struct sis900_private *sis_priv = netdev_priv(net_dev); int boguscnt = max_interrupt_work; - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; u32 status; unsigned int handled = 0; spin_lock (&sis_priv->lock); do { - status = inl(ioaddr + isr); + status = sr32(isr); if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) /* nothing intresting happened */ @@ -1696,7 +1721,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) if(netif_msg_intr(sis_priv)) printk(KERN_DEBUG "%s: exiting interrupt, " "interrupt status = 0x%#8.8x.\n", - net_dev->name, inl(ioaddr + isr)); + net_dev->name, sr32(isr)); spin_unlock (&sis_priv->lock); return IRQ_RETVAL(handled); @@ -1715,7 +1740,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) static int sis900_rx(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; u32 rx_status = sis_priv->rx_ring[entry].cmdsts; int rx_work_limit; @@ -1847,7 +1872,7 @@ refill_rx_ring: } } /* re-enable the potentially idle receive state matchine */ - outl(RxENA | inl(ioaddr + cr), ioaddr + cr ); + sw32(cr , RxENA | sr32(cr)); return 0; } @@ -1932,31 +1957,31 @@ static void sis900_finish_xmit (struct net_device *net_dev) static int sis900_close(struct net_device *net_dev) { - long ioaddr = net_dev->base_addr; struct sis900_private *sis_priv = netdev_priv(net_dev); + struct pci_dev *pdev = sis_priv->pci_dev; + void __iomem *ioaddr = sis_priv->ioaddr; struct sk_buff *skb; int i; netif_stop_queue(net_dev); /* Disable interrupts by clearing the interrupt mask. */ - outl(0x0000, ioaddr + imr); - outl(0x0000, ioaddr + ier); + sw32(imr, 0x0000); + sw32(ier, 0x0000); /* Stop the chip's Tx and Rx Status Machine */ - outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); + sw32(cr, RxDIS | TxDIS | sr32(cr)); del_timer(&sis_priv->timer); - free_irq(net_dev->irq, net_dev); + free_irq(pdev->irq, net_dev); /* Free Tx and RX skbuff */ for (i = 0; i < NUM_RX_DESC; i++) { skb = sis_priv->rx_skbuff[i]; if (skb) { - pci_unmap_single(sis_priv->pci_dev, - sis_priv->rx_ring[i].bufptr, - RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); sis_priv->rx_skbuff[i] = NULL; } @@ -1964,9 +1989,8 @@ static int sis900_close(struct net_device *net_dev) for (i = 0; i < NUM_TX_DESC; i++) { skb = sis_priv->tx_skbuff[i]; if (skb) { - pci_unmap_single(sis_priv->pci_dev, - sis_priv->tx_ring[i].bufptr, skb->len, - PCI_DMA_TODEVICE); + pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr, + skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); sis_priv->tx_skbuff[i] = NULL; } @@ -2055,14 +2079,14 @@ static int sis900_nway_reset(struct net_device *net_dev) static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long pmctrl_addr = net_dev->base_addr + pmctrl; + void __iomem *ioaddr = sis_priv->ioaddr; u32 cfgpmcsr = 0, pmctrl_bits = 0; if (wol->wolopts == 0) { pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); cfgpmcsr &= ~PME_EN; pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); - outl(pmctrl_bits, pmctrl_addr); + sw32(pmctrl, pmctrl_bits); if (netif_msg_wol(sis_priv)) printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name); return 0; @@ -2077,7 +2101,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo if (wol->wolopts & WAKE_PHY) pmctrl_bits |= LINKON; - outl(pmctrl_bits, pmctrl_addr); + sw32(pmctrl, pmctrl_bits); pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); cfgpmcsr |= PME_EN; @@ -2090,10 +2114,11 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { - long pmctrl_addr = net_dev->base_addr + pmctrl; + struct sis900_private *sp = netdev_priv(net_dev); + void __iomem *ioaddr = sp->ioaddr; u32 pmctrl_bits; - pmctrl_bits = inl(pmctrl_addr); + pmctrl_bits = sr32(pmctrl); if (pmctrl_bits & MAGICPKT) wol->wolopts |= WAKE_MAGIC; if (pmctrl_bits & LINKON) @@ -2279,8 +2304,8 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision) static void set_rx_mode(struct net_device *net_dev) { - long ioaddr = net_dev->base_addr; struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ int i, table_entries; u32 rx_mode; @@ -2322,24 +2347,24 @@ static void set_rx_mode(struct net_device *net_dev) /* update Multicast Hash Table in Receive Filter */ for (i = 0; i < table_entries; i++) { /* why plus 0x04 ??, That makes the correct value for hash table. */ - outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr); - outl(mc_filter[i], ioaddr + rfdr); + sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift); + sw32(rfdr, mc_filter[i]); } - outl(RFEN | rx_mode, ioaddr + rfcr); + sw32(rfcr, RFEN | rx_mode); /* sis900 is capable of looping back packets at MAC level for * debugging purpose */ if (net_dev->flags & IFF_LOOPBACK) { u32 cr_saved; /* We must disable Tx/Rx before setting loopback mode */ - cr_saved = inl(ioaddr + cr); - outl(cr_saved | TxDIS | RxDIS, ioaddr + cr); + cr_saved = sr32(cr); + sw32(cr, cr_saved | TxDIS | RxDIS); /* enable loopback */ - outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg); - outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg); + sw32(txcfg, sr32(txcfg) | TxMLB); + sw32(rxcfg, sr32(rxcfg) | RxATX); /* restore cr */ - outl(cr_saved, ioaddr + cr); + sw32(cr, cr_saved); } } @@ -2355,26 +2380,25 @@ static void set_rx_mode(struct net_device *net_dev) static void sis900_reset(struct net_device *net_dev) { struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; - int i = 0; + void __iomem *ioaddr = sis_priv->ioaddr; u32 status = TxRCMP | RxRCMP; + int i; - outl(0, ioaddr + ier); - outl(0, ioaddr + imr); - outl(0, ioaddr + rfcr); + sw32(ier, 0); + sw32(imr, 0); + sw32(rfcr, 0); - outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr); + sw32(cr, RxRESET | TxRESET | RESET | sr32(cr)); /* Check that the chip has finished the reset. */ - while (status && (i++ < 1000)) { - status ^= (inl(isr + ioaddr) & status); - } + for (i = 0; status && (i < 1000); i++) + status ^= sr32(isr) & status; - if( (sis_priv->chipset_rev >= SIS635A_900_REV) || - (sis_priv->chipset_rev == SIS900B_900_REV) ) - outl(PESEL | RND_CNT, ioaddr + cfg); + if (sis_priv->chipset_rev >= SIS635A_900_REV || + sis_priv->chipset_rev == SIS900B_900_REV) + sw32(cfg, PESEL | RND_CNT); else - outl(PESEL, ioaddr + cfg); + sw32(cfg, PESEL); } /** @@ -2388,10 +2412,12 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev) { struct net_device *net_dev = pci_get_drvdata(pci_dev); struct sis900_private *sis_priv = netdev_priv(net_dev); - struct mii_phy *phy = NULL; + + unregister_netdev(net_dev); while (sis_priv->first_mii) { - phy = sis_priv->first_mii; + struct mii_phy *phy = sis_priv->first_mii; + sis_priv->first_mii = phy->next; kfree(phy); } @@ -2400,7 +2426,7 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev) sis_priv->rx_ring_dma); pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, sis_priv->tx_ring_dma); - unregister_netdev(net_dev); + pci_iounmap(pci_dev, sis_priv->ioaddr); free_netdev(net_dev); pci_release_regions(pci_dev); pci_set_drvdata(pci_dev, NULL); @@ -2411,7 +2437,8 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev) static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct net_device *net_dev = pci_get_drvdata(pci_dev); - long ioaddr = net_dev->base_addr; + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; if(!netif_running(net_dev)) return 0; @@ -2420,7 +2447,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) netif_device_detach(net_dev); /* Stop the chip's Tx and Rx Status Machine */ - outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); + sw32(cr, RxDIS | TxDIS | sr32(cr)); pci_set_power_state(pci_dev, PCI_D3hot); pci_save_state(pci_dev); @@ -2432,7 +2459,7 @@ static int sis900_resume(struct pci_dev *pci_dev) { struct net_device *net_dev = pci_get_drvdata(pci_dev); struct sis900_private *sis_priv = netdev_priv(net_dev); - long ioaddr = net_dev->base_addr; + void __iomem *ioaddr = sis_priv->ioaddr; if(!netif_running(net_dev)) return 0; @@ -2453,9 +2480,9 @@ static int sis900_resume(struct pci_dev *pci_dev) sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); - outl(RxENA | inl(ioaddr + cr), ioaddr + cr); - outl(IE, ioaddr + ier); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(cr, RxENA | sr32(cr)); + sw32(ier, IE); sis900_check_mode(net_dev, sis_priv->mii); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 2a662e6112e..d01e59c348a 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -146,6 +146,12 @@ enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 }; #define EPIC_TOTAL_SIZE 0x100 #define USE_IO_OPS 1 +#ifdef USE_IO_OPS +#define EPIC_BAR 0 +#else +#define EPIC_BAR 1 +#endif + typedef enum { SMSC_83C170_0, SMSC_83C170, @@ -176,21 +182,11 @@ static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = { }; MODULE_DEVICE_TABLE (pci, epic_pci_tbl); - -#ifndef USE_IO_OPS -#undef inb -#undef inw -#undef inl -#undef outb -#undef outw -#undef outl -#define inb readb -#define inw readw -#define inl readl -#define outb writeb -#define outw writew -#define outl writel -#endif +#define ew16(reg, val) iowrite16(val, ioaddr + (reg)) +#define ew32(reg, val) iowrite32(val, ioaddr + (reg)) +#define er8(reg) ioread8(ioaddr + (reg)) +#define er16(reg) ioread16(ioaddr + (reg)) +#define er32(reg) ioread32(ioaddr + (reg)) /* Offsets to registers, using the (ugh) SMC names. */ enum epic_registers { @@ -275,6 +271,7 @@ struct epic_private { u32 irq_mask; unsigned int rx_buf_sz; /* Based on MTU+slack. */ + void __iomem *ioaddr; struct pci_dev *pci_dev; /* PCI bus location. */ int chip_id, chip_flags; @@ -290,7 +287,7 @@ struct epic_private { }; static int epic_open(struct net_device *dev); -static int read_eeprom(long ioaddr, int location); +static int read_eeprom(struct epic_private *, int); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); static void epic_restart(struct net_device *dev); @@ -321,11 +318,11 @@ static const struct net_device_ops epic_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static int __devinit epic_init_one (struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit epic_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { static int card_idx = -1; - long ioaddr; + void __iomem *ioaddr; int chip_idx = (int) ent->driver_data; int irq; struct net_device *dev; @@ -368,19 +365,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); -#ifdef USE_IO_OPS - ioaddr = pci_resource_start (pdev, 0); -#else - ioaddr = pci_resource_start (pdev, 1); - ioaddr = (long) pci_ioremap_bar(pdev, 1); + ioaddr = pci_iomap(pdev, EPIC_BAR, 0); if (!ioaddr) { dev_err(&pdev->dev, "ioremap failed\n"); goto err_out_free_netdev; } -#endif pci_set_drvdata(pdev, dev); ep = netdev_priv(dev); + ep->ioaddr = ioaddr; ep->mii.dev = dev; ep->mii.mdio_read = mdio_read; ep->mii.mdio_write = mdio_write; @@ -409,34 +402,31 @@ static int __devinit epic_init_one (struct pci_dev *pdev, duplex = full_duplex[card_idx]; } - dev->base_addr = ioaddr; - dev->irq = irq; - spin_lock_init(&ep->lock); spin_lock_init(&ep->napi_lock); ep->reschedule_in_poll = 0; /* Bring the chip out of low-power mode. */ - outl(0x4200, ioaddr + GENCTL); + ew32(GENCTL, 0x4200); /* Magic?! If we don't set this bit the MII interface won't work. */ /* This magic is documented in SMSC app note 7.15 */ for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); + ew32(TEST1, 0x0008); /* Turn on the MII transceiver. */ - outl(0x12, ioaddr + MIICfg); + ew32(MIICfg, 0x12); if (chip_idx == 1) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - outl(0x0200, ioaddr + GENCTL); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + ew32(GENCTL, 0x0200); /* Note: the '175 does not have a serial EEPROM. */ for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4)); if (debug > 2) { dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); for (i = 0; i < 64; i++) - printk(" %4.4x%s", read_eeprom(ioaddr, i), + printk(" %4.4x%s", read_eeprom(ep, i), i % 16 == 15 ? "\n" : ""); } @@ -481,8 +471,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev, /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ if (ep->chip_flags & MII_PWRDWN) - outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); - outl(0x0008, ioaddr + GENCTL); + ew32(NVCTL, er32(NVCTL) & ~0x483c); + ew32(GENCTL, 0x0008); /* The lower four bits are the media type. */ if (duplex) { @@ -501,8 +491,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev, if (ret < 0) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", - dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, + printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n", + dev->name, pci_id_tbl[chip_idx].name, + (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq, dev->dev_addr); out: @@ -513,10 +504,8 @@ err_out_unmap_rx: err_out_unmap_tx: pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); err_out_iounmap: -#ifndef USE_IO_OPS - iounmap(ioaddr); + pci_iounmap(pdev, ioaddr); err_out_free_netdev: -#endif free_netdev(dev); err_out_free_res: pci_release_regions(pdev); @@ -540,7 +529,7 @@ err_out_disable: This serves to flush the operation to the PCI bus. */ -#define eeprom_delay() inl(ee_addr) +#define eeprom_delay() er32(EECTL) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD (5 << 6) @@ -550,67 +539,67 @@ err_out_disable: static void epic_disable_int(struct net_device *dev, struct epic_private *ep) { - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; - outl(0x00000000, ioaddr + INTMASK); + ew32(INTMASK, 0x00000000); } -static inline void __epic_pci_commit(long ioaddr) +static inline void __epic_pci_commit(void __iomem *ioaddr) { #ifndef USE_IO_OPS - inl(ioaddr + INTMASK); + er32(INTMASK); #endif } static inline void epic_napi_irq_off(struct net_device *dev, struct epic_private *ep) { - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; - outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); + ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); __epic_pci_commit(ioaddr); } static inline void epic_napi_irq_on(struct net_device *dev, struct epic_private *ep) { - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; /* No need to commit possible posted write */ - outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); + ew32(INTMASK, ep->irq_mask | EpicNapiEvent); } -static int __devinit read_eeprom(long ioaddr, int location) +static int __devinit read_eeprom(struct epic_private *ep, int location) { + void __iomem *ioaddr = ep->ioaddr; int i; int retval = 0; - long ee_addr = ioaddr + EECTL; int read_cmd = location | - (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); + (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); - outl(EE_ENB & ~EE_CS, ee_addr); - outl(EE_ENB, ee_addr); + ew32(EECTL, EE_ENB & ~EE_CS); + ew32(EECTL, EE_ENB); /* Shift the read command bits out. */ for (i = 12; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; - outl(EE_ENB | dataval, ee_addr); + ew32(EECTL, EE_ENB | dataval); eeprom_delay(); - outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK); eeprom_delay(); } - outl(EE_ENB, ee_addr); + ew32(EECTL, EE_ENB); for (i = 16; i > 0; i--) { - outl(EE_ENB | EE_SHIFT_CLK, ee_addr); + ew32(EECTL, EE_ENB | EE_SHIFT_CLK); eeprom_delay(); - retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); - outl(EE_ENB, ee_addr); + retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0); + ew32(EECTL, EE_ENB); eeprom_delay(); } /* Terminate the EEPROM access. */ - outl(EE_ENB & ~EE_CS, ee_addr); + ew32(EECTL, EE_ENB & ~EE_CS); return retval; } @@ -618,22 +607,23 @@ static int __devinit read_eeprom(long ioaddr, int location) #define MII_WRITEOP 2 static int mdio_read(struct net_device *dev, int phy_id, int location) { - long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; int i; - outl(read_cmd, ioaddr + MIICtrl); + ew32(MIICtrl, read_cmd); /* Typical operation takes 25 loops. */ for (i = 400; i > 0; i--) { barrier(); - if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { + if ((er32(MIICtrl) & MII_READOP) == 0) { /* Work around read failure bug. */ if (phy_id == 1 && location < 6 && - inw(ioaddr + MIIData) == 0xffff) { - outl(read_cmd, ioaddr + MIICtrl); + er16(MIIData) == 0xffff) { + ew32(MIICtrl, read_cmd); continue; } - return inw(ioaddr + MIIData); + return er16(MIIData); } } return 0xffff; @@ -641,14 +631,15 @@ static int mdio_read(struct net_device *dev, int phy_id, int location) static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) { - long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; int i; - outw(value, ioaddr + MIIData); - outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); + ew16(MIIData, value); + ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP); for (i = 10000; i > 0; i--) { barrier(); - if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) + if ((er32(MIICtrl) & MII_WRITEOP) == 0) break; } } @@ -657,25 +648,26 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) static int epic_open(struct net_device *dev) { struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - int i; - int retval; + void __iomem *ioaddr = ep->ioaddr; + const int irq = ep->pci_dev->irq; + int rc, i; /* Soft reset the chip. */ - outl(0x4001, ioaddr + GENCTL); + ew32(GENCTL, 0x4001); napi_enable(&ep->napi); - if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { + rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { napi_disable(&ep->napi); - return retval; + return rc; } epic_init_ring(dev); - outl(0x4000, ioaddr + GENCTL); + ew32(GENCTL, 0x4000); /* This magic is documented in SMSC app note 7.15 */ for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); + ew32(TEST1, 0x0008); /* Pull the chip out of low-power mode, enable interrupts, and set for PCI read multiple. The MIIcfg setting and strange write order are @@ -683,29 +675,29 @@ static int epic_open(struct net_device *dev) wiring on the Ositech CardBus card. */ #if 0 - outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); + ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); #endif if (ep->chip_flags & MII_PWRDWN) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); /* Tell the chip to byteswap descriptors on big-endian hosts */ #ifdef __BIG_ENDIAN - outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); - inl(ioaddr + GENCTL); - outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8)); + er32(GENCTL); + ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8)); #else - outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); - inl(ioaddr + GENCTL); - outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8)); + er32(GENCTL); + ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8)); #endif udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ for (i = 0; i < 3; i++) - outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); + ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); ep->tx_threshold = TX_FIFO_THRESH; - outl(ep->tx_threshold, ioaddr + TxThresh); + ew32(TxThresh, ep->tx_threshold); if (media2miictl[dev->if_port & 15]) { if (ep->mii_phy_cnt) @@ -731,26 +723,27 @@ static int epic_open(struct net_device *dev) } } - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); - outl(ep->rx_ring_dma, ioaddr + PRxCDAR); - outl(ep->tx_ring_dma, ioaddr + PTxCDAR); + ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); + ew32(PRxCDAR, ep->rx_ring_dma); + ew32(PTxCDAR, ep->tx_ring_dma); /* Start the chip's Rx process. */ set_rx_mode(dev); - outl(StartRx | RxQueued, ioaddr + COMMAND); + ew32(COMMAND, StartRx | RxQueued); netif_start_queue(dev); /* Enable interrupts by setting the interrupt mask. */ - outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun - | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); - - if (debug > 1) - printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " - "%s-duplex.\n", - dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), - ep->mii.full_duplex ? "full" : "half"); + ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull | + ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | + TxUnderrun); + + if (debug > 1) { + printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d " + "status %4.4x %s-duplex.\n", + dev->name, ioaddr, irq, er32(GENCTL), + ep->mii.full_duplex ? "full" : "half"); + } /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ @@ -760,27 +753,29 @@ static int epic_open(struct net_device *dev) ep->timer.function = epic_timer; /* timer handler */ add_timer(&ep->timer); - return 0; + return rc; } /* Reset the chip to recover from a PCI transaction error. This may occur at interrupt time. */ static void epic_pause(struct net_device *dev) { - long ioaddr = dev->base_addr; + struct net_device_stats *stats = &dev->stats; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; netif_stop_queue (dev); /* Disable interrupts by clearing the interrupt mask. */ - outl(0x00000000, ioaddr + INTMASK); + ew32(INTMASK, 0x00000000); /* Stop the chip's Tx and Rx DMA processes. */ - outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); + ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA); /* Update the error counts. */ - if (inw(ioaddr + COMMAND) != 0xffff) { - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + if (er16(COMMAND) != 0xffff) { + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); } /* Remove the packets on the Rx queue. */ @@ -789,12 +784,12 @@ static void epic_pause(struct net_device *dev) static void epic_restart(struct net_device *dev) { - long ioaddr = dev->base_addr; struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; int i; /* Soft reset the chip. */ - outl(0x4001, ioaddr + GENCTL); + ew32(GENCTL, 0x4001); printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); @@ -802,47 +797,46 @@ static void epic_restart(struct net_device *dev) /* This magic is documented in SMSC app note 7.15 */ for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); + ew32(TEST1, 0x0008); #ifdef __BIG_ENDIAN - outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8)); #else - outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8)); #endif - outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); + ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); if (ep->chip_flags & MII_PWRDWN) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); for (i = 0; i < 3; i++) - outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); + ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); ep->tx_threshold = TX_FIFO_THRESH; - outl(ep->tx_threshold, ioaddr + TxThresh); - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); - outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* - sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); - outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* - sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); + ew32(TxThresh, ep->tx_threshold); + ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); + ew32(PRxCDAR, ep->rx_ring_dma + + (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); + ew32(PTxCDAR, ep->tx_ring_dma + + (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); /* Start the chip's Rx process. */ set_rx_mode(dev); - outl(StartRx | RxQueued, ioaddr + COMMAND); + ew32(COMMAND, StartRx | RxQueued); /* Enable interrupts by setting the interrupt mask. */ - outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun - | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); + ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull | + ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | + TxUnderrun); printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" " interrupt %4.4x.\n", - dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), - (int)inl(ioaddr + INTSTAT)); + dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT)); } static void check_media(struct net_device *dev) { struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; int negotiated = mii_lpa & ep->mii.advertising; int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; @@ -856,7 +850,7 @@ static void check_media(struct net_device *dev) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" " partner capability of %4.4x.\n", dev->name, ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); + ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); } } @@ -864,16 +858,15 @@ static void epic_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; int next_tick = 5*HZ; if (debug > 3) { printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", - dev->name, (int)inl(ioaddr + TxSTAT)); + dev->name, er32(TxSTAT)); printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " - "IntStatus %4.4x RxStatus %4.4x.\n", - dev->name, (int)inl(ioaddr + INTMASK), - (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT)); + "IntStatus %4.4x RxStatus %4.4x.\n", dev->name, + er32(INTMASK), er32(INTSTAT), er32(RxSTAT)); } check_media(dev); @@ -885,23 +878,22 @@ static void epic_timer(unsigned long data) static void epic_tx_timeout(struct net_device *dev) { struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; if (debug > 0) { printk(KERN_WARNING "%s: Transmit timeout using MII device, " - "Tx status %4.4x.\n", - dev->name, (int)inw(ioaddr + TxSTAT)); + "Tx status %4.4x.\n", dev->name, er16(TxSTAT)); if (debug > 1) { printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", dev->name, ep->dirty_tx, ep->cur_tx); } } - if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ + if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */ dev->stats.tx_fifo_errors++; - outl(RestartTx, ioaddr + COMMAND); + ew32(COMMAND, RestartTx); } else { epic_restart(dev); - outl(TxQueued, dev->base_addr + COMMAND); + ew32(COMMAND, TxQueued); } dev->trans_start = jiffies; /* prevent tx timeout */ @@ -959,6 +951,7 @@ static void epic_init_ring(struct net_device *dev) static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; int entry, free_count; u32 ctrl_word; unsigned long flags; @@ -999,13 +992,12 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&ep->lock, flags); /* Trigger an immediate transmit demand. */ - outl(TxQueued, dev->base_addr + COMMAND); + ew32(COMMAND, TxQueued); if (debug > 4) printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " - "flag %2.2x Tx status %8.8x.\n", - dev->name, (int)skb->len, entry, ctrl_word, - (int)inl(dev->base_addr + TxSTAT)); + "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len, + entry, ctrl_word, er32(TxSTAT)); return NETDEV_TX_OK; } @@ -1086,18 +1078,17 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; unsigned int handled = 0; int status; - status = inl(ioaddr + INTSTAT); + status = er32(INTSTAT); /* Acknowledge all of the current interrupt sources ASAP. */ - outl(status & EpicNormalEvent, ioaddr + INTSTAT); + ew32(INTSTAT, status & EpicNormalEvent); if (debug > 4) { printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " - "intstat=%#8.8x.\n", dev->name, status, - (int)inl(ioaddr + INTSTAT)); + "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT)); } if ((status & IntrSummary) == 0) @@ -1118,19 +1109,21 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) /* Check uncommon events all at once. */ if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { + struct net_device_stats *stats = &dev->stats; + if (status == EpicRemoved) goto out; /* Always update the error counts to avoid overhead later. */ - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); if (status & TxUnderrun) { /* Tx FIFO underflow. */ - dev->stats.tx_fifo_errors++; - outl(ep->tx_threshold += 128, ioaddr + TxThresh); + stats->tx_fifo_errors++; + ew32(TxThresh, ep->tx_threshold += 128); /* Restart the transmit process. */ - outl(RestartTx, ioaddr + COMMAND); + ew32(COMMAND, RestartTx); } if (status & PCIBusErr170) { printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", @@ -1139,7 +1132,7 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) epic_restart(dev); } /* Clear all error sources. */ - outl(status & 0x7f18, ioaddr + INTSTAT); + ew32(INTSTAT, status & 0x7f18); } out: @@ -1248,17 +1241,17 @@ static int epic_rx(struct net_device *dev, int budget) static void epic_rx_err(struct net_device *dev, struct epic_private *ep) { - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; int status; - status = inl(ioaddr + INTSTAT); + status = er32(INTSTAT); if (status == EpicRemoved) return; if (status & RxOverflow) /* Missed a Rx frame. */ dev->stats.rx_errors++; if (status & (RxOverflow | RxFull)) - outw(RxQueued, ioaddr + COMMAND); + ew16(COMMAND, RxQueued); } static int epic_poll(struct napi_struct *napi, int budget) @@ -1266,7 +1259,7 @@ static int epic_poll(struct napi_struct *napi, int budget) struct epic_private *ep = container_of(napi, struct epic_private, napi); struct net_device *dev = ep->mii.dev; int work_done = 0; - long ioaddr = dev->base_addr; + void __iomem *ioaddr = ep->ioaddr; rx_action: @@ -1287,7 +1280,7 @@ rx_action: more = ep->reschedule_in_poll; if (!more) { __napi_complete(napi); - outl(EpicNapiEvent, ioaddr + INTSTAT); + ew32(INTSTAT, EpicNapiEvent); epic_napi_irq_on(dev, ep); } else ep->reschedule_in_poll--; @@ -1303,8 +1296,9 @@ rx_action: static int epic_close(struct net_device *dev) { - long ioaddr = dev->base_addr; struct epic_private *ep = netdev_priv(dev); + struct pci_dev *pdev = ep->pci_dev; + void __iomem *ioaddr = ep->ioaddr; struct sk_buff *skb; int i; @@ -1313,13 +1307,13 @@ static int epic_close(struct net_device *dev) if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, (int)inl(ioaddr + INTSTAT)); + dev->name, er32(INTSTAT)); del_timer_sync(&ep->timer); epic_disable_int(dev, ep); - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); epic_pause(dev); @@ -1330,7 +1324,7 @@ static int epic_close(struct net_device *dev) ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ ep->rx_ring[i].buflength = 0; if (skb) { - pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, + pci_unmap_single(pdev, ep->rx_ring[i].bufaddr, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } @@ -1341,26 +1335,28 @@ static int epic_close(struct net_device *dev) ep->tx_skbuff[i] = NULL; if (!skb) continue; - pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, - skb->len, PCI_DMA_TODEVICE); + pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len, + PCI_DMA_TODEVICE); dev_kfree_skb(skb); } /* Green! Leave the chip in low-power mode. */ - outl(0x0008, ioaddr + GENCTL); + ew32(GENCTL, 0x0008); return 0; } static struct net_device_stats *epic_get_stats(struct net_device *dev) { - long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; if (netif_running(dev)) { - /* Update the error counts. */ - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + struct net_device_stats *stats = &dev->stats; + + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); } return &dev->stats; @@ -1373,13 +1369,13 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev) static void set_rx_mode(struct net_device *dev) { - long ioaddr = dev->base_addr; struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; unsigned char mc_filter[8]; /* Multicast hash filter */ int i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - outl(0x002C, ioaddr + RxCtrl); + ew32(RxCtrl, 0x002c); /* Unconditionally log net taps. */ memset(mc_filter, 0xff, sizeof(mc_filter)); } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { @@ -1387,9 +1383,9 @@ static void set_rx_mode(struct net_device *dev) is never enabled. */ /* Too many to filter perfectly -- accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); - outl(0x000C, ioaddr + RxCtrl); + ew32(RxCtrl, 0x000c); } else if (netdev_mc_empty(dev)) { - outl(0x0004, ioaddr + RxCtrl); + ew32(RxCtrl, 0x0004); return; } else { /* Never executed, for now. */ struct netdev_hw_addr *ha; @@ -1404,7 +1400,7 @@ static void set_rx_mode(struct net_device *dev) /* ToDo: perhaps we need to stop the Tx and Rx process here? */ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { for (i = 0; i < 4; i++) - outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); + ew16(MC0 + i*4, ((u16 *)mc_filter)[i]); memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); } } @@ -1466,22 +1462,26 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static int ethtool_begin(struct net_device *dev) { - unsigned long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + /* power-up, if interface is down */ - if (! netif_running(dev)) { - outl(0x0200, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + if (!netif_running(dev)) { + ew32(GENCTL, 0x0200); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); } return 0; } static void ethtool_complete(struct net_device *dev) { - unsigned long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + /* power-down, if interface is down */ - if (! netif_running(dev)) { - outl(0x0008, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); + if (!netif_running(dev)) { + ew32(GENCTL, 0x0008); + ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000); } } @@ -1500,14 +1500,14 @@ static const struct ethtool_ops netdev_ethtool_ops = { static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct epic_private *np = netdev_priv(dev); - long ioaddr = dev->base_addr; + void __iomem *ioaddr = np->ioaddr; struct mii_ioctl_data *data = if_mii(rq); int rc; /* power-up, if interface is down */ if (! netif_running(dev)) { - outl(0x0200, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + ew32(GENCTL, 0x0200); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); } /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ @@ -1517,14 +1517,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) /* power-down, if interface is down */ if (! netif_running(dev)) { - outl(0x0008, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); + ew32(GENCTL, 0x0008); + ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000); } return rc; } -static void __devexit epic_remove_one (struct pci_dev *pdev) +static void __devexit epic_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct epic_private *ep = netdev_priv(dev); @@ -1532,9 +1532,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev) pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); unregister_netdev(dev); -#ifndef USE_IO_OPS - iounmap((void*) dev->base_addr); -#endif + pci_iounmap(pdev, ep->ioaddr); pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); @@ -1548,13 +1546,14 @@ static void __devexit epic_remove_one (struct pci_dev *pdev) static int epic_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); - long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; if (!netif_running(dev)) return 0; epic_pause(dev); /* Put the chip into low-power mode. */ - outl(0x0008, ioaddr + GENCTL); + ew32(GENCTL, 0x0008); /* pci_power_off(pdev, -1); */ return 0; } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index cd3defb11ff..dab9c6f671e 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2066,6 +2066,7 @@ static const struct ethtool_ops smsc911x_ethtool_ops = { .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, .get_eeprom = smsc911x_ethtool_get_eeprom, .set_eeprom = smsc911x_ethtool_set_eeprom, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops smsc911x_netdev_ops = { diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 38386478532..fd33b21f6c9 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -54,7 +54,7 @@ struct smsc9420_ring_info { }; struct smsc9420_pdata { - void __iomem *base_addr; + void __iomem *ioaddr; struct pci_dev *pdev; struct net_device *dev; @@ -114,13 +114,13 @@ do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) { - return ioread32(pd->base_addr + offset); + return ioread32(pd->ioaddr + offset); } static inline void smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) { - iowrite32(value, pd->base_addr + offset); + iowrite32(value, pd->ioaddr + offset); } static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) @@ -469,6 +469,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = { .set_eeprom = smsc9420_ethtool_set_eeprom, .get_regs_len = smsc9420_ethtool_getregslen, .get_regs = smsc9420_ethtool_getregs, + .get_ts_info = ethtool_op_get_ts_info, }; /* Sets the device MAC address to dev_addr */ @@ -659,7 +660,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id) ulong flags; BUG_ON(!pd); - BUG_ON(!pd->base_addr); + BUG_ON(!pd->ioaddr); int_cfg = smsc9420_reg_read(pd, INT_CFG); @@ -720,9 +721,12 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id) #ifdef CONFIG_NET_POLL_CONTROLLER static void smsc9420_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); + struct smsc9420_pdata *pd = netdev_priv(dev); + const int irq = pd->pdev->irq; + + disable_irq(irq); smsc9420_isr(0, dev); - enable_irq(dev->irq); + enable_irq(irq); } #endif /* CONFIG_NET_POLL_CONTROLLER */ @@ -759,7 +763,7 @@ static int smsc9420_stop(struct net_device *dev) smsc9420_stop_rx(pd); smsc9420_free_rx_ring(pd); - free_irq(dev->irq, pd); + free_irq(pd->pdev->irq, pd); smsc9420_dmac_soft_reset(pd); @@ -1331,15 +1335,12 @@ out: static int smsc9420_open(struct net_device *dev) { - struct smsc9420_pdata *pd; + struct smsc9420_pdata *pd = netdev_priv(dev); u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; + const int irq = pd->pdev->irq; unsigned long flags; int result = 0, timeout; - BUG_ON(!dev); - pd = netdev_priv(dev); - BUG_ON(!pd); - if (!is_valid_ether_addr(dev->dev_addr)) { smsc_warn(IFUP, "dev_addr is not a valid MAC address"); result = -EADDRNOTAVAIL; @@ -1358,9 +1359,10 @@ static int smsc9420_open(struct net_device *dev) smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); smsc9420_pci_flush_write(pd); - if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, - DRV_NAME, pd)) { - smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); + result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, + DRV_NAME, pd); + if (result) { + smsc_warn(IFUP, "Unable to use IRQ = %d", irq); result = -ENODEV; goto out_0; } @@ -1395,7 +1397,7 @@ static int smsc9420_open(struct net_device *dev) smsc9420_pci_flush_write(pd); /* test the IRQ connection to the ISR */ - smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); + smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq); pd->software_irq_signal = false; spin_lock_irqsave(&pd->int_lock, flags); @@ -1430,7 +1432,7 @@ static int smsc9420_open(struct net_device *dev) goto out_free_irq_1; } - smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); + smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq); result = smsc9420_alloc_tx_ring(pd); if (result) { @@ -1490,7 +1492,7 @@ out_free_rx_ring_3: out_free_tx_ring_2: smsc9420_free_tx_ring(pd); out_free_irq_1: - free_irq(dev->irq, pd); + free_irq(irq, pd); out_0: return result; } @@ -1519,7 +1521,7 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) smsc9420_stop_rx(pd); smsc9420_free_rx_ring(pd); - free_irq(dev->irq, pd); + free_irq(pd->pdev->irq, pd); netif_device_detach(dev); } @@ -1552,6 +1554,7 @@ static int smsc9420_resume(struct pci_dev *pdev) smsc_warn(IFUP, "pci_enable_wake failed: %d", err); if (netif_running(dev)) { + /* FIXME: gross. It looks like ancient PM relic.*/ err = smsc9420_open(dev); netif_device_attach(dev); } @@ -1625,8 +1628,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* registers are double mapped with 0 offset for LE and 0x200 for BE */ virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; - dev->base_addr = (ulong)virt_addr; - pd = netdev_priv(dev); /* pci descriptors are created in the PCI consistent area */ @@ -1646,7 +1647,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) pd->pdev = pdev; pd->dev = dev; - pd->base_addr = virt_addr; + pd->ioaddr = virt_addr; pd->msg_enable = smsc_debug; pd->rx_csum = true; @@ -1669,7 +1670,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->netdev_ops = &smsc9420_netdev_ops; dev->ethtool_ops = &smsc9420_ethtool_ops; - dev->irq = pdev->irq; netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); @@ -1727,7 +1727,7 @@ static void __devexit smsc9420_remove(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); - iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); + iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET); pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 0319d640f72..bcd54d6e94f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -97,6 +97,16 @@ struct stmmac_extra_stats { unsigned long normal_irq_n; }; +/* CSR Frequency Access Defines*/ +#define CSR_F_35M 35000000 +#define CSR_F_60M 60000000 +#define CSR_F_100M 100000000 +#define CSR_F_150M 150000000 +#define CSR_F_250M 250000000 +#define CSR_F_300M 300000000 + +#define MAC_CSR_H_FRQ_MASK 0x20 + #define HASH_TABLE_SIZE 64 #define PAUSE_TIME 0x200 @@ -137,6 +147,7 @@ struct stmmac_extra_stats { #define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ #define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */ #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ +#define DEFAULT_DMA_PBL 8 enum rx_frame_status { /* IPC status */ good_frame = 0, @@ -228,7 +239,7 @@ struct stmmac_desc_ops { int (*get_rx_owner) (struct dma_desc *p); void (*set_rx_owner) (struct dma_desc *p); /* Get the receive frame size */ - int (*get_rx_frame_len) (struct dma_desc *p); + int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); /* Return the reception status looking at the RDES1 */ int (*rx_status) (void *data, struct stmmac_extra_stats *x, struct dma_desc *p); @@ -236,7 +247,8 @@ struct stmmac_desc_ops { struct stmmac_dma_ops { /* DMA core initialization */ - int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx); + int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx); /* Dump DMA registers */ void (*dump_regs) (void __iomem *ioaddr); /* Set tx/rx threshold in the csr6 register @@ -261,14 +273,14 @@ struct stmmac_dma_ops { struct stmmac_ops { /* MAC core initialization */ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; - /* Support checksum offload engine */ - int (*rx_coe) (void __iomem *ioaddr); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc) (void __iomem *ioaddr); /* Dump MAC registers */ void (*dump_regs) (void __iomem *ioaddr); /* Handle extra events on specific interrupts hw dependent */ void (*host_irq_status) (void __iomem *ioaddr); /* Multicast filter setting */ - void (*set_filter) (struct net_device *dev); + void (*set_filter) (struct net_device *dev, int id); /* Flow control setting */ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex, unsigned int fc, unsigned int pause_time); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index cfcef0ea0fa..23478bf4ed7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -61,9 +61,11 @@ enum power_event { }; /* GMAC HW ADDR regs */ -#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8)) -#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8)) -#define GMAC_MAX_UNICAST_ADDRESSES 16 +#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ + (reg * 8)) +#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ + (reg * 8)) +#define GMAC_MAX_PERFECT_ADDRESSES 32 #define GMAC_AN_CTRL 0x000000c0 /* AN control */ #define GMAC_AN_STATUS 0x000000c4 /* AN status */ @@ -139,10 +141,11 @@ enum rx_tx_priority_ratio { }; #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 -#define DMA_BUS_MODE_4PBL 0x01000000 +#define DMA_BUS_MODE_PBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA CRS Control and Status Register Mapping */ @@ -205,4 +208,7 @@ enum rtc_control { #define GMAC_MMC_TX_INTR 0x108 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 +/* Synopsys Core versions */ +#define DWMAC_CORE_3_40 34 + extern const struct stmmac_dma_ops dwmac1000_dma_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index b1c48b97594..b5e4d02f15c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -46,7 +46,7 @@ static void dwmac1000_core_init(void __iomem *ioaddr) #endif } -static int dwmac1000_rx_coe_supported(void __iomem *ioaddr) +static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr) { u32 value = readl(ioaddr + GMAC_CONTROL); @@ -84,10 +84,11 @@ static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, GMAC_ADDR_LOW(reg_n)); } -static void dwmac1000_set_filter(struct net_device *dev) +static void dwmac1000_set_filter(struct net_device *dev, int id) { void __iomem *ioaddr = (void __iomem *) dev->base_addr; unsigned int value = 0; + unsigned int perfect_addr_number; CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", __func__, netdev_mc_count(dev), netdev_uc_count(dev)); @@ -121,8 +122,14 @@ static void dwmac1000_set_filter(struct net_device *dev) writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); } + /* Extra 16 regs are available in cores newer than the 3.40. */ + if (id > DWMAC_CORE_3_40) + perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES; + else + perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2; + /* Handle multiple unicast addresses (perfect filtering)*/ - if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES) + if (netdev_uc_count(dev) > perfect_addr_number) /* Switch to promiscuous mode is more than 16 addrs are required */ value |= GMAC_FRAME_FILTER_PR; @@ -211,7 +218,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr) static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, - .rx_coe = dwmac1000_rx_coe_supported, + .rx_ipc = dwmac1000_rx_ipc_enable, .dump_regs = dwmac1000_dump_regs, .host_irq_status = dwmac1000_irq_status, .set_filter = dwmac1000_set_filter, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 4d5402a1d26..033500090f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -30,8 +30,8 @@ #include "dwmac1000.h" #include "dwmac_dma.h" -static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, - u32 dma_rx) +static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, + int mb, int burst_len, u32 dma_tx, u32 dma_rx) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; @@ -48,15 +48,51 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, if (limit < 0) return -EBUSY; - value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | - ((pbl << DMA_BUS_MODE_PBL_SHIFT) | - (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + /* + * Set the DMA PBL (Programmable Burst Length) mode + * Before stmmac core 3.50 this mode bit was 4xPBL, and + * post 3.5 mode bit acts as 8*PBL. + * For core rev < 3.5, when the core is set for 4xPBL mode, the + * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats + * depending on pbl value. + * For core rev > 3.5, when the core is set for 8xPBL mode, the + * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats + * depending on pbl value. + */ + value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | + (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + + /* Set the Fixed burst mode */ + if (fb) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (mb) + value |= DMA_BUS_MODE_MB; #ifdef CONFIG_STMMAC_DA value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ #endif writel(value, ioaddr + DMA_BUS_MODE); + /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE + * for supported bursts. + * + * Note: This is applicable only for revision GMACv3.61a. For + * older version this register is reserved and shall have no + * effect. + * + * Note: + * For Fixed Burst Mode: if we directly write 0xFF to this + * register using the configurations pass from platform code, + * this would ensure that all bursts supported by core are set + * and those which are not supported would remain ineffective. + * + * For Non Fixed Burst Mode: provide the maximum value of the + * burst length. Any burst equal or below the provided burst + * length would be allowed to perform. */ + writel(burst_len, ioaddr + DMA_AXI_BUS_MODE); + /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 138fb8dd1e8..19e0f4eed2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -43,11 +43,6 @@ static void dwmac100_core_init(void __iomem *ioaddr) #endif } -static int dwmac100_rx_coe_supported(void __iomem *ioaddr) -{ - return 0; -} - static void dwmac100_dump_mac_regs(void __iomem *ioaddr) { pr_info("\t----------------------------------------------\n" @@ -72,6 +67,11 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr) readl(ioaddr + MAC_VLAN2)); } +static int dwmac100_rx_ipc_enable(void __iomem *ioaddr) +{ + return 0; +} + static void dwmac100_irq_status(void __iomem *ioaddr) { return; @@ -89,7 +89,7 @@ static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); } -static void dwmac100_set_filter(struct net_device *dev) +static void dwmac100_set_filter(struct net_device *dev, int id) { void __iomem *ioaddr = (void __iomem *) dev->base_addr; u32 value = readl(ioaddr + MAC_CONTROL); @@ -160,7 +160,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode) static const struct stmmac_ops dwmac100_ops = { .core_init = dwmac100_core_init, - .rx_coe = dwmac100_rx_coe_supported, + .rx_ipc = dwmac100_rx_ipc_enable, .dump_regs = dwmac100_dump_mac_regs, .host_irq_status = dwmac100_irq_status, .set_filter = dwmac100_set_filter, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index bc17fd08b55..c2b4d55a79b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -32,8 +32,8 @@ #include "dwmac100.h" #include "dwmac_dma.h" -static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, - u32 dma_rx) +static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, + int mb, int burst_len, u32 dma_tx, u32 dma_rx) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; @@ -52,7 +52,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), - ioaddr + DMA_BUS_MODE); + ioaddr + DMA_BUS_MODE); /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 437edacd602..6e0360f9cfd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -32,6 +32,7 @@ #define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */ #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ #define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index f20aa12931d..4e0e18a44fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -31,6 +31,8 @@ #define DWMAC_LIB_DBG(fmt, args...) do { } while (0) #endif +#define GMAC_HI_REG_AE 0x80000000 + /* CSR1 enables the transmit DMA to check for new descriptor */ void dwmac_enable_dma_transmission(void __iomem *ioaddr) { @@ -233,7 +235,11 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned long data; data = (addr[5] << 8) | addr[4]; - writel(data, ioaddr + high); + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | GMAC_HI_REG_AE, ioaddr + high); data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; writel(data, ioaddr + low); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index ad1b627f8ec..2fc8ef95f97 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -22,6 +22,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/stmmac.h> #include "common.h" #include "descs_com.h" @@ -309,9 +310,17 @@ static void enh_desc_close_tx_desc(struct dma_desc *p) p->des01.etx.interrupt = 1; } -static int enh_desc_get_rx_frame_len(struct dma_desc *p) +static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { - return p->des01.erx.frame_length; + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + return p->des01.erx.frame_length - 2; + else + return p->des01.erx.frame_length; } const struct stmmac_desc_ops enh_desc_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 25953bb45a7..68962c549a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -22,6 +22,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/stmmac.h> #include "common.h" #include "descs_com.h" @@ -201,9 +202,17 @@ static void ndesc_close_tx_desc(struct dma_desc *p) p->des01.tx.interrupt = 1; } -static int ndesc_get_rx_frame_len(struct dma_desc *p) +static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { - return p->des01.rx.frame_length; + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + return p->des01.rx.frame_length - 2; + else + return p->des01.rx.frame_length; } const struct stmmac_desc_ops ndesc_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b4b095fdcf2..6b5d060ee9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -21,7 +21,9 @@ *******************************************************************************/ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Feb_2012" +#define DRV_MODULE_VERSION "March_2012" + +#include <linux/clk.h> #include <linux/stmmac.h> #include <linux/phy.h> #include "common.h" @@ -56,8 +58,6 @@ struct stmmac_priv { struct stmmac_extra_stats xstats; struct napi_struct napi; - - int rx_coe; int no_csum_insertion; struct phy_device *phydev; @@ -81,6 +81,11 @@ struct stmmac_priv { struct stmmac_counters mmc; struct dma_features dma_cap; int hw_cap_support; +#ifdef CONFIG_HAVE_CLK + struct clk *stmmac_clk; +#endif + int clk_csr; + int synopsys_id; }; extern int phyaddr; @@ -99,3 +104,42 @@ int stmmac_dvr_remove(struct net_device *ndev); struct stmmac_priv *stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, void __iomem *addr); + +#ifdef CONFIG_HAVE_CLK +static inline int stmmac_clk_enable(struct stmmac_priv *priv) +{ + if (!IS_ERR(priv->stmmac_clk)) + return clk_enable(priv->stmmac_clk); + + return 0; +} + +static inline void stmmac_clk_disable(struct stmmac_priv *priv) +{ + if (IS_ERR(priv->stmmac_clk)) + return; + + clk_disable(priv->stmmac_clk); +} +static inline int stmmac_clk_get(struct stmmac_priv *priv) +{ + priv->stmmac_clk = clk_get(priv->device, NULL); + + if (IS_ERR(priv->stmmac_clk)) + return PTR_ERR(priv->stmmac_clk); + + return 0; +} +#else +static inline int stmmac_clk_enable(struct stmmac_priv *priv) +{ + return 0; +} +static inline void stmmac_clk_disable(struct stmmac_priv *priv) +{ +} +static inline int stmmac_clk_get(struct stmmac_priv *priv) +{ + return 0; +} +#endif /* CONFIG_HAVE_CLK */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index f98e1511660..ce431846fc6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -481,6 +481,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_wol = stmmac_get_wol, .set_wol = stmmac_set_wol, .get_sset_count = stmmac_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48d56da62f0..70966330f44 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -163,6 +163,38 @@ static void stmmac_verify_args(void) pause = PAUSE_TIME; } +static void stmmac_clk_csr_set(struct stmmac_priv *priv) +{ +#ifdef CONFIG_HAVE_CLK + u32 clk_rate; + + if (IS_ERR(priv->stmmac_clk)) + return; + + clk_rate = clk_get_rate(priv->stmmac_clk); + + /* Platform provided default clk_csr would be assumed valid + * for all other cases except for the below mentioned ones. */ + if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { + if (clk_rate < CSR_F_35M) + priv->clk_csr = STMMAC_CSR_20_35M; + else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) + priv->clk_csr = STMMAC_CSR_35_60M; + else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) + priv->clk_csr = STMMAC_CSR_60_100M; + else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) + priv->clk_csr = STMMAC_CSR_100_150M; + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) + priv->clk_csr = STMMAC_CSR_150_250M; + else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + priv->clk_csr = STMMAC_CSR_250_300M; + } /* For values higher than the IEEE 802.3 specified frequency + * we can not estimate the proper divider as it is not known + * the frequency of clk_csr_i. So we do not change the default + * divider. */ +#endif +} + #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) static void print_pkt(unsigned char *buf, int len) { @@ -307,7 +339,13 @@ static int stmmac_init_phy(struct net_device *dev) priv->speed = 0; priv->oldduplex = -1; - snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id); + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", + priv->plat->bus_id); + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->plat->phy_addr); pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); @@ -884,6 +922,26 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) priv->dev->dev_addr); } +static int stmmac_init_dma_engine(struct stmmac_priv *priv) +{ + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; + int mixed_burst = 0; + + /* Some DMA parameters can be passed from the platform; + * in case of these are not passed we keep a default + * (good for all the chips) and init the DMA! */ + if (priv->plat->dma_cfg) { + pbl = priv->plat->dma_cfg->pbl; + fixed_burst = priv->plat->dma_cfg->fixed_burst; + mixed_burst = priv->plat->dma_cfg->mixed_burst; + burst_len = priv->plat->dma_cfg->burst_len; + } + + return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, + burst_len, priv->dma_tx_phy, + priv->dma_rx_phy); +} + /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -898,16 +956,6 @@ static int stmmac_open(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - stmmac_check_ether_addr(priv); - - /* MDIO bus Registration */ - ret = stmmac_mdio_register(dev); - if (ret < 0) { - pr_debug("%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); - return ret; - } - #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) @@ -925,6 +973,10 @@ static int stmmac_open(struct net_device *dev) } else priv->tm->enable = 1; #endif + stmmac_clk_enable(priv); + + stmmac_check_ether_addr(priv); + ret = stmmac_init_phy(dev); if (unlikely(ret)) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); @@ -938,8 +990,7 @@ static int stmmac_open(struct net_device *dev) init_dma_desc_rings(dev); /* DMA initialization and SW reset */ - ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, - priv->dma_tx_phy, priv->dma_rx_phy); + ret = stmmac_init_dma_engine(priv); if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); goto open_error; @@ -1026,6 +1077,8 @@ open_error: if (priv->phydev) phy_disconnect(priv->phydev); + stmmac_clk_disable(priv); + return ret; } @@ -1077,7 +1130,7 @@ static int stmmac_release(struct net_device *dev) #ifdef CONFIG_STMMAC_DEBUG_FS stmmac_exit_fs(); #endif - stmmac_mdio_unregister(dev); + stmmac_clk_disable(priv); return 0; } @@ -1276,7 +1329,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) struct sk_buff *skb; int frame_len; - frame_len = priv->hw->desc->get_rx_frame_len(p); + frame_len = priv->hw->desc->get_rx_frame_len(p, + priv->plat->rx_coe); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ if (unlikely(status != llc_snap)) @@ -1312,7 +1366,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) #endif skb->protocol = eth_type_trans(skb, priv->dev); - if (unlikely(!priv->rx_coe)) { + if (unlikely(!priv->plat->rx_coe)) { /* No RX COE for old mac10/100 devices */ skb_checksum_none_assert(skb); netif_receive_skb(skb); @@ -1413,7 +1467,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); spin_lock(&priv->lock); - priv->hw->mac->set_filter(dev); + priv->hw->mac->set_filter(dev, priv->synopsys_id); spin_unlock(&priv->lock); } @@ -1459,8 +1513,10 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (!priv->rx_coe) + if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) features &= ~NETIF_F_RXCSUM; + else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) + features &= ~NETIF_F_IPV6_CSUM; if (!priv->plat->tx_coe) features &= ~NETIF_F_ALL_CSUM; @@ -1584,7 +1640,7 @@ static const struct file_operations stmmac_rings_status_fops = { .open = stmmac_sysfs_ring_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) @@ -1656,7 +1712,7 @@ static const struct file_operations stmmac_dma_cap_fops = { .open = stmmac_sysfs_dma_cap_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static int stmmac_init_fs(struct net_device *dev) @@ -1752,7 +1808,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->hw->ring = &ring_mode_ops; /* Get and dump the chip ID */ - stmmac_get_synopsys_id(priv); + priv->synopsys_id = stmmac_get_synopsys_id(priv); /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); @@ -1765,17 +1821,32 @@ static int stmmac_hw_init(struct stmmac_priv *priv) * register (if supported). */ priv->plat->enh_desc = priv->dma_cap.enh_desc; - priv->plat->tx_coe = priv->dma_cap.tx_coe; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; + + priv->plat->tx_coe = priv->dma_cap.tx_coe; + + if (priv->dma_cap.rx_coe_type2) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; + else if (priv->dma_cap.rx_coe_type1) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; + } else pr_info(" No HW DMA feature register supported"); /* Select the enhnaced/normal descriptor structures */ stmmac_selec_desc_mode(priv); - priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); - if (priv->rx_coe) - pr_info(" RX Checksum Offload Engine supported\n"); + /* Enable the IPC (Checksum Offload) and check if the feature has been + * enabled during the core configuration. */ + ret = priv->hw->mac->rx_ipc(priv->ioaddr); + if (!ret) { + pr_warning(" RX IPC Checksum Offload not configured.\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + } + + if (priv->plat->rx_coe) + pr_info(" RX Checksum Offload Engine supported (type %d)\n", + priv->plat->rx_coe); if (priv->plat->tx_coe) pr_info(" TX Checksum insertion supported\n"); @@ -1856,6 +1927,28 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, goto error; } + if (stmmac_clk_get(priv)) + pr_warning("%s: warning: cannot get CSR clock\n", __func__); + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (!priv->plat->clk_csr) + stmmac_clk_csr_set(priv); + else + priv->clk_csr = priv->plat->clk_csr; + + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + pr_debug("%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); + goto error; + } + return priv; error: @@ -1883,6 +1976,7 @@ int stmmac_dvr_remove(struct net_device *ndev) priv->hw->dma->stop_tx(priv->ioaddr); stmmac_set_mac(priv->ioaddr, false); + stmmac_mdio_unregister(ndev); netif_carrier_off(ndev); unregister_netdev(ndev); free_netdev(ndev); @@ -1895,6 +1989,7 @@ int stmmac_suspend(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); int dis_ic = 0; + unsigned long flags; if (!ndev || !netif_running(ndev)) return 0; @@ -1902,7 +1997,7 @@ int stmmac_suspend(struct net_device *ndev) if (priv->phydev) phy_stop(priv->phydev); - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); netif_stop_queue(ndev); @@ -1925,21 +2020,24 @@ int stmmac_suspend(struct net_device *ndev) /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); - else + else { stmmac_set_mac(priv->ioaddr, false); - - spin_unlock(&priv->lock); + /* Disable clock in case of PWM is off */ + stmmac_clk_disable(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); return 0; } int stmmac_resume(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long flags; if (!netif_running(ndev)) return 0; - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); /* Power Down bit, into the PM register, is cleared * automatically as soon as a magic packet or a Wake-up frame @@ -1948,6 +2046,9 @@ int stmmac_resume(struct net_device *ndev) * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, 0); + else + /* enable the clk prevously disabled */ + stmmac_clk_enable(priv); netif_device_attach(ndev); @@ -1964,7 +2065,7 @@ int stmmac_resume(struct net_device *ndev) netif_start_queue(ndev); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); if (priv->phydev) phy_start(priv->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 73195329aa4..ade10823204 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -34,6 +34,22 @@ #define MII_BUSY 0x00000001 #define MII_WRITE 0x00000002 +static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) +{ + unsigned long curr; + unsigned long finish = jiffies + 3 * HZ; + + do { + curr = jiffies; + if (readl(ioaddr + mii_addr) & MII_BUSY) + cpu_relax(); + else + return 0; + } while (!time_after_eq(curr, finish)); + + return -EBUSY; +} + /** * stmmac_mdio_read * @bus: points to the mii_bus structure @@ -54,11 +70,15 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data; u16 regValue = (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))); - regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); + regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; - do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); writel(regValue, priv->ioaddr + mii_address); - do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; /* Read the data from the MII data register */ data = (int)readl(priv->ioaddr + mii_data); @@ -86,20 +106,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) | MII_WRITE; - value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); - + value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); /* Wait until any existing MII operation is complete */ - do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; /* Set the MII address register to write */ writel(phydata, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); - - return 0; + return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index da66ed7c3c5..58fab5303e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -28,6 +28,7 @@ struct plat_stmmacenet_data plat_dat; struct stmmac_mdio_bus_data mdio_data; +struct stmmac_dma_cfg dma_cfg; static void stmmac_default_data(void) { @@ -35,7 +36,6 @@ static void stmmac_default_data(void) plat_dat.bus_id = 1; plat_dat.phy_addr = 0; plat_dat.interface = PHY_INTERFACE_MODE_GMII; - plat_dat.pbl = 32; plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat_dat.has_gmac = 1; plat_dat.force_sf_dma_mode = 1; @@ -44,6 +44,10 @@ static void stmmac_default_data(void) mdio_data.phy_reset = NULL; mdio_data.phy_mask = 0; plat_dat.mdio_bus_data = &mdio_data; + + dma_cfg.pbl = 32; + dma_cfg.burst_len = DMA_AXI_BLEN_256; + plat_dat.dma_cfg = &dma_cfg; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 116529a366b..3dd8f080380 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -50,7 +50,6 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * once needed on other platforms. */ if (of_device_is_compatible(np, "st,spear600-gmac")) { - plat->pbl = 8; plat->has_gmac = 1; plat->pmt = 1; } @@ -189,9 +188,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) if (priv->plat->exit) priv->plat->exit(pdev); - if (priv->plat->exit) - priv->plat->exit(pdev); - platform_set_drvdata(pdev, NULL); iounmap((void *)priv->ioaddr); @@ -218,14 +214,26 @@ static int stmmac_pltfr_resume(struct device *dev) int stmmac_pltfr_freeze(struct device *dev) { + int ret; + struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev); struct net_device *ndev = dev_get_drvdata(dev); + struct platform_device *pdev = to_platform_device(dev); - return stmmac_freeze(ndev); + ret = stmmac_freeze(ndev); + if (plat_dat->exit) + plat_dat->exit(pdev); + + return ret; } int stmmac_pltfr_restore(struct device *dev) { + struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev); struct net_device *ndev = dev_get_drvdata(dev); + struct platform_device *pdev = to_platform_device(dev); + + if (plat_dat->init) + plat_dat->init(pdev); return stmmac_restore(ndev); } diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index c99b3b0e2ea..703c8cce2a2 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9838,7 +9838,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, goto err_out_release_parent; } } - if (err || dma_mask == DMA_BIT_MASK(32)) { + if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 4ba96909671..3cf4ab75583 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -401,7 +401,7 @@ static int gem_rxmac_reset(struct gem *gp) return 1; } - udelay(5000); + mdelay(5); /* Execute RX reset command. */ writel(gp->swrst_base | GREG_SWRST_RXRST, @@ -2898,7 +2898,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, } gp->pdev = pdev; - dev->base_addr = (long) pdev; gp->dev = dev; gp->msg_enable = DEFAULT_MSG; @@ -2972,7 +2971,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, netif_napi_add(dev, &gp->napi, gem_poll, 64); dev->ethtool_ops = &gem_ethtool_ops; dev->watchdog_timeo = 5 * HZ; - dev->irq = pdev->irq; dev->dma = 0; /* Set that now, in case PM kicks in now */ diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index b95e7e681b3..dfc00c4683e 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2182,11 +2182,12 @@ static int happy_meal_open(struct net_device *dev) * into a single source which we register handling at probe time. */ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { - if (request_irq(dev->irq, happy_meal_interrupt, - IRQF_SHARED, dev->name, (void *)dev)) { + res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED, + dev->name, dev); + if (res) { HMD(("EAGAIN\n")); printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", - dev->irq); + hp->irq); return -EAGAIN; } @@ -2199,7 +2200,7 @@ static int happy_meal_open(struct net_device *dev) spin_unlock_irq(&hp->happy_lock); if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) - free_irq(dev->irq, dev); + free_irq(hp->irq, dev); return res; } @@ -2221,7 +2222,7 @@ static int happy_meal_close(struct net_device *dev) * time and never unregister. */ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) - free_irq(dev->irq, dev); + free_irq(hp->irq, dev); return 0; } @@ -2777,7 +2778,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= dev->hw_features | NETIF_F_RXCSUM; - dev->irq = op->archdata.irqs[0]; + hp->irq = op->archdata.irqs[0]; #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) /* Hook up SBUS register/descriptor accessors. */ @@ -2981,8 +2982,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, if (hme_version_printed++ == 0) printk(KERN_INFO "%s", version); - dev->base_addr = (long) pdev; - hp = netdev_priv(dev); hp->happy_dev = pdev; @@ -3087,12 +3086,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, init_timer(&hp->happy_timer); + hp->irq = pdev->irq; hp->dev = dev; dev->netdev_ops = &hme_netdev_ops; dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &hme_ethtool_ops; - dev->irq = pdev->irq; - dev->dma = 0; /* Happy Meal can do it all... */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h index 64f278360d8..f4307654e4a 100644 --- a/drivers/net/ethernet/sun/sunhme.h +++ b/drivers/net/ethernet/sun/sunhme.h @@ -432,6 +432,7 @@ struct happy_meal { dma_addr_t hblock_dvma; /* DVMA visible address happy block */ unsigned int happy_flags; /* Driver state flags */ + int irq; enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ unsigned int happy_bursts; /* Get your mind out of the gutter */ unsigned int paddr; /* PHY address for transceiver */ diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 38e3ae9155b..a108db35924 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -618,7 +618,7 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) struct vnet_port *port; hlist_for_each_entry(port, n, hp, hash) { - if (!compare_ether_addr(port->raddr, skb->data)) + if (ether_addr_equal(port->raddr, skb->data)) return port; } port = NULL; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index ad973ffc9ff..8846516678c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1317,7 +1317,7 @@ static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, static void print_rxfd(struct rxf_desc *rxfd) { - DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" + DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n" "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); } @@ -1988,10 +1988,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* these fields are used for info purposes only * so we can have them same for all ports of the board */ ndev->if_port = port; - ndev->base_addr = pciaddr; - ndev->mem_start = pciaddr; - ndev->mem_end = pciaddr + regionSize; - ndev->irq = pdev->irq; ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 34558766cbf..d614c374ed9 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -92,7 +92,7 @@ enum cpdma_state { CPDMA_STATE_TEARDOWN, }; -const char *cpdma_state_str[] = { "idle", "active", "teardown" }; +static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; struct cpdma_ctlr { enum cpdma_state state; @@ -276,6 +276,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->num_chan = CPDMA_MAX_CHANNELS; return ctlr; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_create); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { @@ -321,6 +322,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_start); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { @@ -351,6 +353,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) { @@ -421,6 +424,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { @@ -444,6 +448,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) kfree(ctlr); return ret; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { @@ -528,6 +533,7 @@ err_chan_busy: err_chan_alloc: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { @@ -545,6 +551,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) kfree(chan); return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_destroy); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats) @@ -693,6 +700,7 @@ unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return ret; } +EXPORT_SYMBOL_GPL(cpdma_chan_submit); static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, @@ -776,6 +784,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota) } return used; } +EXPORT_SYMBOL_GPL(cpdma_chan_process); int cpdma_chan_start(struct cpdma_chan *chan) { @@ -803,6 +812,7 @@ int cpdma_chan_start(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_start); int cpdma_chan_stop(struct cpdma_chan *chan) { @@ -863,6 +873,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_stop); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 08aff1a2087..4da93a5d7ec 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -627,6 +627,7 @@ static const struct ethtool_ops ethtool_ops = { .get_link = ethtool_op_get_link, .get_coalesce = emac_get_coalesce, .set_coalesce = emac_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, }; /** diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index efd36691ce5..3e6abf0f277 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -2545,7 +2545,7 @@ static void tlan_phy_reset(struct net_device *dev) phy = priv->phy[priv->phy_num]; - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name); tlan_mii_sync(dev->base_addr); value = MII_GC_LOOPBK | MII_GC_RESET; tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 3d501ec7fad..96070e9b50d 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -843,7 +843,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) if (!is_multicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; - filter = compare_ether_addr(mine, buf); + filter = !ether_addr_equal(mine, buf); } } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 5c14f82c495..961c8321451 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1590,8 +1590,8 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) found = 0; oldest = NULL; list_for_each_entry(target, &wl->network_list, list) { - if (!compare_ether_addr(&target->hwinfo->bssid[2], - &scan_info->bssid[2])) { + if (ether_addr_equal(&target->hwinfo->bssid[2], + &scan_info->bssid[2])) { found = 1; pr_debug("%s: same BBS found scanned list\n", __func__); @@ -1691,8 +1691,8 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) /* If bss specified, check it only */ if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) { - if (!compare_ether_addr(&scan_info->hwinfo->bssid[2], - wl->bssid)) { + if (ether_addr_equal(&scan_info->hwinfo->bssid[2], + wl->bssid)) { best_bss = scan_info; pr_debug("%s: bssid matched\n", __func__); break; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index fcfa01f7ceb..0459c096629 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -689,9 +689,12 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) #ifdef CONFIG_NET_POLL_CONTROLLER static void rhine_poll(struct net_device *dev) { - disable_irq(dev->irq); - rhine_interrupt(dev->irq, (void *)dev); - enable_irq(dev->irq); + struct rhine_private *rp = netdev_priv(dev); + const int irq = rp->pdev->irq; + + disable_irq(irq); + rhine_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -972,7 +975,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, } #endif /* USE_MMIO */ - dev->base_addr = (unsigned long)ioaddr; rp->base = ioaddr; /* Get chip registers into a sane state */ @@ -995,8 +997,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, if (!phy_id) phy_id = ioread8(ioaddr + 0x6C); - dev->irq = pdev->irq; - spin_lock_init(&rp->lock); mutex_init(&rp->task_lock); INIT_WORK(&rp->reset_task, rhine_reset_task); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 8a5d7c100a5..ea3e0a21ba7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2488,8 +2488,8 @@ static int velocity_close(struct net_device *dev) if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) velocity_get_ip(vptr); - if (dev->irq != 0) - free_irq(dev->irq, dev); + + free_irq(vptr->pdev->irq, dev); velocity_free_rings(vptr); @@ -2755,8 +2755,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi if (ret < 0) goto err_free_dev; - dev->irq = pdev->irq; - ret = velocity_get_pci_info(vptr, pdev); if (ret < 0) { /* error message already printed */ @@ -2779,8 +2777,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi mac_wol_reset(regs); - dev->base_addr = vptr->ioaddr; - for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(®s->PAR[i]); @@ -2806,7 +2802,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); - dev->irq = pdev->irq; dev->netdev_ops = &velocity_netdev_ops; dev->ethtool_ops = &velocity_ethtool_ops; netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig new file mode 100644 index 00000000000..cb18043f583 --- /dev/null +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -0,0 +1,73 @@ +# +# WIZnet devices configuration +# + +config NET_VENDOR_WIZNET + bool "WIZnet devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about WIZnet devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_WIZNET + +config WIZNET_W5100 + tristate "WIZnet W5100 Ethernet support" + depends on HAS_IOMEM + ---help--- + Support for WIZnet W5100 chips. + + W5100 is a single chip with integrated 10/100 Ethernet MAC, + PHY and hardware TCP/IP stack, but this driver is limited to + the MAC and PHY functions only, onchip TCP/IP is unused. + + To compile this driver as a module, choose M here: the module + will be called w5100. + +config WIZNET_W5300 + tristate "WIZnet W5300 Ethernet support" + depends on HAS_IOMEM + ---help--- + Support for WIZnet W5300 chips. + + W5300 is a single chip with integrated 10/100 Ethernet MAC, + PHY and hardware TCP/IP stack, but this driver is limited to + the MAC and PHY functions only, onchip TCP/IP is unused. + + To compile this driver as a module, choose M here: the module + will be called w5300. + +choice + prompt "WIZnet interface mode" + depends on WIZNET_W5100 || WIZNET_W5300 + default WIZNET_BUS_ANY + +config WIZNET_BUS_DIRECT + bool "Direct address bus mode" + ---help--- + In direct address mode host system can directly access all registers + after mapping to Memory-Mapped I/O space. + +config WIZNET_BUS_INDIRECT + bool "Indirect address bus mode" + ---help--- + In indirect address mode host system indirectly accesses registers + using Indirect Mode Address Register and Indirect Mode Data Register, + which are directly mapped to Memory-Mapped I/O space. + +config WIZNET_BUS_ANY + bool "Select interface mode in runtime" + ---help--- + If interface mode is unknown in compile time, it can be selected + in runtime from board/platform resources configuration. + + Performance may decrease compared to explicitly selected bus mode. +endchoice + +endif # NET_VENDOR_WIZNET diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile new file mode 100644 index 00000000000..c614535227e --- /dev/null +++ b/drivers/net/ethernet/wiznet/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_WIZNET_W5100) += w5100.o +obj-$(CONFIG_WIZNET_W5300) += w5300.o diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c new file mode 100644 index 00000000000..a75e9ef5a4c --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -0,0 +1,808 @@ +/* + * Ethernet driver for the WIZnet W5100 chip. + * + * Copyright (C) 2006-2008 WIZnet Co.,Ltd. + * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru> + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kconfig.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/platform_data/wiznet.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/gpio.h> + +#define DRV_NAME "w5100" +#define DRV_VERSION "2012-04-04" + +MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION); +MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>"); +MODULE_ALIAS("platform:"DRV_NAME); +MODULE_LICENSE("GPL"); + +/* + * Registers + */ +#define W5100_COMMON_REGS 0x0000 +#define W5100_MR 0x0000 /* Mode Register */ +#define MR_RST 0x80 /* S/W reset */ +#define MR_PB 0x10 /* Ping block */ +#define MR_AI 0x02 /* Address Auto-Increment */ +#define MR_IND 0x01 /* Indirect mode */ +#define W5100_SHAR 0x0009 /* Source MAC address */ +#define W5100_IR 0x0015 /* Interrupt Register */ +#define W5100_IMR 0x0016 /* Interrupt Mask Register */ +#define IR_S0 0x01 /* S0 interrupt */ +#define W5100_RTR 0x0017 /* Retry Time-value Register */ +#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ +#define W5100_RMSR 0x001a /* Receive Memory Size */ +#define W5100_TMSR 0x001b /* Transmit Memory Size */ +#define W5100_COMMON_REGS_LEN 0x0040 + +#define W5100_S0_REGS 0x0400 +#define W5100_S0_MR 0x0400 /* S0 Mode Register */ +#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ +#define W5100_S0_CR 0x0401 /* S0 Command Register */ +#define S0_CR_OPEN 0x01 /* OPEN command */ +#define S0_CR_CLOSE 0x10 /* CLOSE command */ +#define S0_CR_SEND 0x20 /* SEND command */ +#define S0_CR_RECV 0x40 /* RECV command */ +#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */ +#define S0_IR_SENDOK 0x10 /* complete sending */ +#define S0_IR_RECV 0x04 /* receiving data */ +#define W5100_S0_SR 0x0403 /* S0 Status Register */ +#define S0_SR_MACRAW 0x42 /* mac raw mode */ +#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */ +#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */ +#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */ +#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */ +#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */ +#define W5100_S0_REGS_LEN 0x0040 + +#define W5100_TX_MEM_START 0x4000 +#define W5100_TX_MEM_END 0x5fff +#define W5100_TX_MEM_MASK 0x1fff +#define W5100_RX_MEM_START 0x6000 +#define W5100_RX_MEM_END 0x7fff +#define W5100_RX_MEM_MASK 0x1fff + +/* + * Device driver private data structure + */ +struct w5100_priv { + void __iomem *base; + spinlock_t reg_lock; + bool indirect; + u8 (*read)(struct w5100_priv *priv, u16 addr); + void (*write)(struct w5100_priv *priv, u16 addr, u8 data); + u16 (*read16)(struct w5100_priv *priv, u16 addr); + void (*write16)(struct w5100_priv *priv, u16 addr, u16 data); + void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + int irq; + int link_irq; + int link_gpio; + + struct napi_struct napi; + struct net_device *ndev; + bool promisc; + u32 msg_enable; +}; + +/************************************************************************ + * + * Lowlevel I/O functions + * + ***********************************************************************/ + +/* + * In direct address mode host system can directly access W5100 registers + * after mapping to Memory-Mapped I/O space. + * + * 0x8000 bytes are required for memory space. + */ +static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr) +{ + return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static inline void w5100_write_direct(struct w5100_priv *priv, + u16 addr, u8 data) +{ + iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) +{ + u16 data; + data = w5100_read_direct(priv, addr) << 8; + data |= w5100_read_direct(priv, addr + 1); + return data; +} + +static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data) +{ + w5100_write_direct(priv, addr, data >> 8); + w5100_write_direct(priv, addr + 1, data); +} + +static void w5100_readbuf_direct(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + int i; + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_RX_MEM_END)) + addr = W5100_RX_MEM_START; + *buf++ = w5100_read_direct(priv, addr); + } +} + +static void w5100_writebuf_direct(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + int i; + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_TX_MEM_END)) + addr = W5100_TX_MEM_START; + w5100_write_direct(priv, addr, *buf++); + } +} + +/* + * In indirect address mode host system indirectly accesses registers by + * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data + * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space. + * Mode Register (MR) is directly accessible. + * + * Only 0x04 bytes are required for memory space. + */ +#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */ +#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */ + +static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr) +{ + unsigned long flags; + u8 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + data = w5100_read_direct(priv, W5100_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + w5100_write_direct(priv, W5100_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr) +{ + unsigned long flags; + u16 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + data = w5100_read_direct(priv, W5100_IDM_DR) << 8; + data |= w5100_read_direct(priv, W5100_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + w5100_write_direct(priv, W5100_IDM_DR, data >> 8); + w5100_write_direct(priv, W5100_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static void w5100_readbuf_indirect(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_RX_MEM_END)) { + addr = W5100_RX_MEM_START; + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + } + *buf++ = w5100_read_direct(priv, W5100_IDM_DR); + } + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static void w5100_writebuf_indirect(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_TX_MEM_END)) { + addr = W5100_TX_MEM_START; + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + } + w5100_write_direct(priv, W5100_IDM_DR, *buf++); + } + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +#if defined(CONFIG_WIZNET_BUS_DIRECT) +#define w5100_read w5100_read_direct +#define w5100_write w5100_write_direct +#define w5100_read16 w5100_read16_direct +#define w5100_write16 w5100_write16_direct +#define w5100_readbuf w5100_readbuf_direct +#define w5100_writebuf w5100_writebuf_direct + +#elif defined(CONFIG_WIZNET_BUS_INDIRECT) +#define w5100_read w5100_read_indirect +#define w5100_write w5100_write_indirect +#define w5100_read16 w5100_read16_indirect +#define w5100_write16 w5100_write16_indirect +#define w5100_readbuf w5100_readbuf_indirect +#define w5100_writebuf w5100_writebuf_indirect + +#else /* CONFIG_WIZNET_BUS_ANY */ +#define w5100_read priv->read +#define w5100_write priv->write +#define w5100_read16 priv->read16 +#define w5100_write16 priv->write16 +#define w5100_readbuf priv->readbuf +#define w5100_writebuf priv->writebuf +#endif + +static int w5100_command(struct w5100_priv *priv, u16 cmd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + w5100_write(priv, W5100_S0_CR, cmd); + mmiowb(); + + while (w5100_read(priv, W5100_S0_CR) != 0) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + return 0; +} + +static void w5100_write_macaddr(struct w5100_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int i; + + for (i = 0; i < ETH_ALEN; i++) + w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]); + mmiowb(); +} + +static void w5100_hw_reset(struct w5100_priv *priv) +{ + w5100_write_direct(priv, W5100_MR, MR_RST); + mmiowb(); + mdelay(5); + w5100_write_direct(priv, W5100_MR, priv->indirect ? + MR_PB | MR_AI | MR_IND : + MR_PB); + mmiowb(); + w5100_write(priv, W5100_IMR, 0); + w5100_write_macaddr(priv); + + /* Configure 16K of internal memory + * as 8K RX buffer and 8K TX buffer + */ + w5100_write(priv, W5100_RMSR, 0x03); + w5100_write(priv, W5100_TMSR, 0x03); + mmiowb(); +} + +static void w5100_hw_start(struct w5100_priv *priv) +{ + w5100_write(priv, W5100_S0_MR, priv->promisc ? + S0_MR_MACRAW : S0_MR_MACRAW_MF); + mmiowb(); + w5100_command(priv, S0_CR_OPEN); + w5100_write(priv, W5100_IMR, IR_S0); + mmiowb(); +} + +static void w5100_hw_close(struct w5100_priv *priv) +{ + w5100_write(priv, W5100_IMR, 0); + mmiowb(); + w5100_command(priv, S0_CR_CLOSE); +} + +/*********************************************************************** + * + * Device driver functions / callbacks + * + ***********************************************************************/ + +static void w5100_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 w5100_get_link(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + if (gpio_is_valid(priv->link_gpio)) + return !!gpio_get_value(priv->link_gpio); + + return 1; +} + +static u32 w5100_get_msglevel(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void w5100_set_msglevel(struct net_device *ndev, u32 value) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static int w5100_get_regs_len(struct net_device *ndev) +{ + return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN; +} + +static void w5100_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *_buf) +{ + struct w5100_priv *priv = netdev_priv(ndev); + u8 *buf = _buf; + u16 i; + + regs->version = 1; + for (i = 0; i < W5100_COMMON_REGS_LEN; i++) + *buf++ = w5100_read(priv, W5100_COMMON_REGS + i); + for (i = 0; i < W5100_S0_REGS_LEN; i++) + *buf++ = w5100_read(priv, W5100_S0_REGS + i); +} + +static void w5100_tx_timeout(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + w5100_hw_reset(priv); + w5100_hw_start(priv); + ndev->stats.tx_errors++; + ndev->trans_start = jiffies; + netif_wake_queue(ndev); +} + +static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + u16 offset; + + netif_stop_queue(ndev); + + offset = w5100_read16(priv, W5100_S0_TX_WR); + w5100_writebuf(priv, offset, skb->data, skb->len); + w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); + mmiowb(); + ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + dev_kfree_skb(skb); + + w5100_command(priv, S0_CR_SEND); + + return NETDEV_TX_OK; +} + +static int w5100_napi_poll(struct napi_struct *napi, int budget) +{ + struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int rx_count; + u16 rx_len; + u16 offset; + u8 header[2]; + + for (rx_count = 0; rx_count < budget; rx_count++) { + u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); + if (rx_buf_len == 0) + break; + + offset = w5100_read16(priv, W5100_S0_RX_RD); + w5100_readbuf(priv, offset, header, 2); + rx_len = get_unaligned_be16(header) - 2; + + skb = netdev_alloc_skb_ip_align(ndev, rx_len); + if (unlikely(!skb)) { + w5100_write16(priv, W5100_S0_RX_RD, + offset + rx_buf_len); + w5100_command(priv, S0_CR_RECV); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, rx_len); + w5100_readbuf(priv, offset + 2, skb->data, rx_len); + w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); + mmiowb(); + w5100_command(priv, S0_CR_RECV); + skb->protocol = eth_type_trans(skb, ndev); + + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + } + + if (rx_count < budget) { + w5100_write(priv, W5100_IMR, IR_S0); + mmiowb(); + napi_complete(napi); + } + + return rx_count; +} + +static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5100_priv *priv = netdev_priv(ndev); + + int ir = w5100_read(priv, W5100_S0_IR); + if (!ir) + return IRQ_NONE; + w5100_write(priv, W5100_S0_IR, ir); + mmiowb(); + + if (ir & S0_IR_SENDOK) { + netif_dbg(priv, tx_done, ndev, "tx done\n"); + netif_wake_queue(ndev); + } + + if (ir & S0_IR_RECV) { + if (napi_schedule_prep(&priv->napi)) { + w5100_write(priv, W5100_IMR, 0); + mmiowb(); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t w5100_detect_link(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + if (gpio_get_value(priv->link_gpio) != 0) { + netif_info(priv, link, ndev, "link is up\n"); + netif_carrier_on(ndev); + } else { + netif_info(priv, link, ndev, "link is down\n"); + netif_carrier_off(ndev); + } + } + + return IRQ_HANDLED; +} + +static void w5100_set_rx_mode(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + bool set_promisc = (ndev->flags & IFF_PROMISC) != 0; + + if (priv->promisc != set_promisc) { + priv->promisc = set_promisc; + w5100_hw_start(priv); + } +} + +static int w5100_set_macaddr(struct net_device *ndev, void *addr) +{ + struct w5100_priv *priv = netdev_priv(ndev); + struct sockaddr *sock_addr = addr; + + if (!is_valid_ether_addr(sock_addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + ndev->addr_assign_type &= ~NET_ADDR_RANDOM; + w5100_write_macaddr(priv); + return 0; +} + +static int w5100_open(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifup, ndev, "enabling\n"); + if (!is_valid_ether_addr(ndev->dev_addr)) + return -EINVAL; + w5100_hw_start(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + return 0; +} + +static int w5100_stop(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifdown, ndev, "shutting down\n"); + w5100_hw_close(priv); + netif_carrier_off(ndev); + netif_stop_queue(ndev); + napi_disable(&priv->napi); + return 0; +} + +static const struct ethtool_ops w5100_ethtool_ops = { + .get_drvinfo = w5100_get_drvinfo, + .get_msglevel = w5100_get_msglevel, + .set_msglevel = w5100_set_msglevel, + .get_link = w5100_get_link, + .get_regs_len = w5100_get_regs_len, + .get_regs = w5100_get_regs, +}; + +static const struct net_device_ops w5100_netdev_ops = { + .ndo_open = w5100_open, + .ndo_stop = w5100_stop, + .ndo_start_xmit = w5100_start_tx, + .ndo_tx_timeout = w5100_tx_timeout, + .ndo_set_rx_mode = w5100_set_rx_mode, + .ndo_set_mac_address = w5100_set_macaddr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __devinit w5100_hw_probe(struct platform_device *pdev) +{ + struct wiznet_platform_data *data = pdev->dev.platform_data; + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + const char *name = netdev_name(ndev); + struct resource *mem; + int mem_size; + int irq; + int ret; + + if (data && is_valid_ether_addr(data->mac_addr)) { + memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + } else { + random_ether_addr(ndev->dev_addr); + ndev->addr_assign_type |= NET_ADDR_RANDOM; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -ENXIO; + mem_size = resource_size(mem); + if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) + return -EBUSY; + priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); + if (!priv->base) + return -EBUSY; + + spin_lock_init(&priv->reg_lock); + priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; + if (priv->indirect) { + priv->read = w5100_read_indirect; + priv->write = w5100_write_indirect; + priv->read16 = w5100_read16_indirect; + priv->write16 = w5100_write16_indirect; + priv->readbuf = w5100_readbuf_indirect; + priv->writebuf = w5100_writebuf_indirect; + } else { + priv->read = w5100_read_direct; + priv->write = w5100_write_direct; + priv->read16 = w5100_read16_direct; + priv->write16 = w5100_write16_direct; + priv->readbuf = w5100_readbuf_direct; + priv->writebuf = w5100_writebuf_direct; + } + + w5100_hw_reset(priv); + if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + ret = request_irq(irq, w5100_interrupt, + IRQ_TYPE_LEVEL_LOW, name, ndev); + if (ret < 0) + return ret; + priv->irq = irq; + + priv->link_gpio = data ? data->link_gpio : -EINVAL; + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); + if (!link_name) + return -ENOMEM; + snprintf(link_name, 16, "%s-link", name); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5100_detect_link, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); + return 0; +} + +static int __devinit w5100_probe(struct platform_device *pdev) +{ + struct w5100_priv *priv; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->ndev = ndev; + + ether_setup(ndev); + ndev->netdev_ops = &w5100_netdev_ops; + ndev->ethtool_ops = &w5100_ethtool_ops; + ndev->watchdog_timeo = HZ; + netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16); + + /* This chip doesn't support VLAN packets with normal MTU, + * so disable VLAN for this device. + */ + ndev->features |= NETIF_F_VLAN_CHALLENGED; + + err = register_netdev(ndev); + if (err < 0) + goto err_register; + + err = w5100_hw_probe(pdev); + if (err < 0) + goto err_hw_probe; + + return 0; + +err_hw_probe: + unregister_netdev(ndev); +err_register: + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return err; +} + +static int __devexit w5100_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + w5100_hw_reset(priv); + free_irq(priv->irq, ndev); + if (gpio_is_valid(priv->link_gpio)) + free_irq(priv->link_irq, ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int w5100_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_carrier_off(ndev); + netif_device_detach(ndev); + + w5100_hw_close(priv); + } + return 0; +} + +static int w5100_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + w5100_hw_reset(priv); + w5100_hw_start(priv); + + netif_device_attach(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + } + return 0; +} +#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); + +static struct platform_driver w5100_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = &w5100_pm_ops, + }, + .probe = w5100_probe, + .remove = __devexit_p(w5100_remove), +}; + +module_platform_driver(w5100_driver); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c new file mode 100644 index 00000000000..3306a20ec21 --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -0,0 +1,720 @@ +/* + * Ethernet driver for the WIZnet W5300 chip. + * + * Copyright (C) 2008-2009 WIZnet Co.,Ltd. + * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com> + * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru> + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kconfig.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/platform_data/wiznet.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/gpio.h> + +#define DRV_NAME "w5300" +#define DRV_VERSION "2012-04-04" + +MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION); +MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>"); +MODULE_ALIAS("platform:"DRV_NAME); +MODULE_LICENSE("GPL"); + +/* + * Registers + */ +#define W5300_MR 0x0000 /* Mode Register */ +#define MR_DBW (1 << 15) /* Data bus width */ +#define MR_MPF (1 << 14) /* Mac layer pause frame */ +#define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */ +#define MR_RDH (1 << 10) /* Read data hold time */ +#define MR_FS (1 << 8) /* FIFO swap */ +#define MR_RST (1 << 7) /* S/W reset */ +#define MR_PB (1 << 4) /* Ping block */ +#define MR_DBS (1 << 2) /* Data bus swap */ +#define MR_IND (1 << 0) /* Indirect mode */ +#define W5300_IR 0x0002 /* Interrupt Register */ +#define W5300_IMR 0x0004 /* Interrupt Mask Register */ +#define IR_S0 0x0001 /* S0 interrupt */ +#define W5300_SHARL 0x0008 /* Source MAC address (0123) */ +#define W5300_SHARH 0x000c /* Source MAC address (45) */ +#define W5300_TMSRL 0x0020 /* Transmit Memory Size (0123) */ +#define W5300_TMSRH 0x0024 /* Transmit Memory Size (4567) */ +#define W5300_RMSRL 0x0028 /* Receive Memory Size (0123) */ +#define W5300_RMSRH 0x002c /* Receive Memory Size (4567) */ +#define W5300_MTYPE 0x0030 /* Memory Type */ +#define W5300_IDR 0x00fe /* Chip ID register */ +#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */ +#define W5300_S0_MR 0x0200 /* S0 Mode Register */ +#define S0_MR_CLOSED 0x0000 /* Close mode */ +#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */ +#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */ +#define W5300_S0_CR 0x0202 /* S0 Command Register */ +#define S0_CR_OPEN 0x0001 /* OPEN command */ +#define S0_CR_CLOSE 0x0010 /* CLOSE command */ +#define S0_CR_SEND 0x0020 /* SEND command */ +#define S0_CR_RECV 0x0040 /* RECV command */ +#define W5300_S0_IMR 0x0204 /* S0 Interrupt Mask Register */ +#define W5300_S0_IR 0x0206 /* S0 Interrupt Register */ +#define S0_IR_RECV 0x0004 /* Receive interrupt */ +#define S0_IR_SENDOK 0x0010 /* Send OK interrupt */ +#define W5300_S0_SSR 0x0208 /* S0 Socket Status Register */ +#define W5300_S0_TX_WRSR 0x0220 /* S0 TX Write Size Register */ +#define W5300_S0_TX_FSR 0x0224 /* S0 TX Free Size Register */ +#define W5300_S0_RX_RSR 0x0228 /* S0 Received data Size */ +#define W5300_S0_TX_FIFO 0x022e /* S0 Transmit FIFO */ +#define W5300_S0_RX_FIFO 0x0230 /* S0 Receive FIFO */ +#define W5300_REGS_LEN 0x0400 + +/* + * Device driver private data structure + */ +struct w5300_priv { + void __iomem *base; + spinlock_t reg_lock; + bool indirect; + u16 (*read) (struct w5300_priv *priv, u16 addr); + void (*write)(struct w5300_priv *priv, u16 addr, u16 data); + int irq; + int link_irq; + int link_gpio; + + struct napi_struct napi; + struct net_device *ndev; + bool promisc; + u32 msg_enable; +}; + +/************************************************************************ + * + * Lowlevel I/O functions + * + ***********************************************************************/ + +/* + * In direct address mode host system can directly access W5300 registers + * after mapping to Memory-Mapped I/O space. + * + * 0x400 bytes are required for memory space. + */ +static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr) +{ + return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static inline void w5300_write_direct(struct w5300_priv *priv, + u16 addr, u16 data) +{ + iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +/* + * In indirect address mode host system indirectly accesses registers by + * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data + * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space. + * Mode Register (MR) is directly accessible. + * + * Only 0x06 bytes are required for memory space. + */ +#define W5300_IDM_AR 0x0002 /* Indirect Mode Address */ +#define W5300_IDM_DR 0x0004 /* Indirect Mode Data */ + +static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr) +{ + unsigned long flags; + u16 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5300_write_direct(priv, W5300_IDM_AR, addr); + mmiowb(); + data = w5300_read_direct(priv, W5300_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5300_write_direct(priv, W5300_IDM_AR, addr); + mmiowb(); + w5300_write_direct(priv, W5300_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +#if defined(CONFIG_WIZNET_BUS_DIRECT) +#define w5300_read w5300_read_direct +#define w5300_write w5300_write_direct + +#elif defined(CONFIG_WIZNET_BUS_INDIRECT) +#define w5300_read w5300_read_indirect +#define w5300_write w5300_write_indirect + +#else /* CONFIG_WIZNET_BUS_ANY */ +#define w5300_read priv->read +#define w5300_write priv->write +#endif + +static u32 w5300_read32(struct w5300_priv *priv, u16 addr) +{ + u32 data; + data = w5300_read(priv, addr) << 16; + data |= w5300_read(priv, addr + 2); + return data; +} + +static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data) +{ + w5300_write(priv, addr, data >> 16); + w5300_write(priv, addr + 2, data); +} + +static int w5300_command(struct w5300_priv *priv, u16 cmd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + w5300_write(priv, W5300_S0_CR, cmd); + mmiowb(); + + while (w5300_read(priv, W5300_S0_CR) != 0) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + return 0; +} + +static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len) +{ + u16 fifo; + int i; + + for (i = 0; i < len; i += 2) { + fifo = w5300_read(priv, W5300_S0_RX_FIFO); + *buf++ = fifo >> 8; + *buf++ = fifo; + } + fifo = w5300_read(priv, W5300_S0_RX_FIFO); + fifo = w5300_read(priv, W5300_S0_RX_FIFO); +} + +static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len) +{ + u16 fifo; + int i; + + for (i = 0; i < len; i += 2) { + fifo = *buf++ << 8; + fifo |= *buf++; + w5300_write(priv, W5300_S0_TX_FIFO, fifo); + } + w5300_write32(priv, W5300_S0_TX_WRSR, len); +} + +static void w5300_write_macaddr(struct w5300_priv *priv) +{ + struct net_device *ndev = priv->ndev; + w5300_write32(priv, W5300_SHARL, + ndev->dev_addr[0] << 24 | + ndev->dev_addr[1] << 16 | + ndev->dev_addr[2] << 8 | + ndev->dev_addr[3]); + w5300_write(priv, W5300_SHARH, + ndev->dev_addr[4] << 8 | + ndev->dev_addr[5]); + mmiowb(); +} + +static void w5300_hw_reset(struct w5300_priv *priv) +{ + w5300_write_direct(priv, W5300_MR, MR_RST); + mmiowb(); + mdelay(5); + w5300_write_direct(priv, W5300_MR, priv->indirect ? + MR_WDF(7) | MR_PB | MR_IND : + MR_WDF(7) | MR_PB); + mmiowb(); + w5300_write(priv, W5300_IMR, 0); + w5300_write_macaddr(priv); + + /* Configure 128K of internal memory + * as 64K RX fifo and 64K TX fifo + */ + w5300_write32(priv, W5300_RMSRL, 64 << 24); + w5300_write32(priv, W5300_RMSRH, 0); + w5300_write32(priv, W5300_TMSRL, 64 << 24); + w5300_write32(priv, W5300_TMSRH, 0); + w5300_write(priv, W5300_MTYPE, 0x00ff); + mmiowb(); +} + +static void w5300_hw_start(struct w5300_priv *priv) +{ + w5300_write(priv, W5300_S0_MR, priv->promisc ? + S0_MR_MACRAW : S0_MR_MACRAW_MF); + mmiowb(); + w5300_command(priv, S0_CR_OPEN); + w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK); + w5300_write(priv, W5300_IMR, IR_S0); + mmiowb(); +} + +static void w5300_hw_close(struct w5300_priv *priv) +{ + w5300_write(priv, W5300_IMR, 0); + mmiowb(); + w5300_command(priv, S0_CR_CLOSE); +} + +/*********************************************************************** + * + * Device driver functions / callbacks + * + ***********************************************************************/ + +static void w5300_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 w5300_get_link(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + if (gpio_is_valid(priv->link_gpio)) + return !!gpio_get_value(priv->link_gpio); + + return 1; +} + +static u32 w5300_get_msglevel(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void w5300_set_msglevel(struct net_device *ndev, u32 value) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static int w5300_get_regs_len(struct net_device *ndev) +{ + return W5300_REGS_LEN; +} + +static void w5300_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *_buf) +{ + struct w5300_priv *priv = netdev_priv(ndev); + u8 *buf = _buf; + u16 addr; + u16 data; + + regs->version = 1; + for (addr = 0; addr < W5300_REGS_LEN; addr += 2) { + switch (addr & 0x23f) { + case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */ + case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */ + data = 0xffff; + break; + default: + data = w5300_read(priv, addr); + break; + } + *buf++ = data >> 8; + *buf++ = data; + } +} + +static void w5300_tx_timeout(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + w5300_hw_reset(priv); + w5300_hw_start(priv); + ndev->stats.tx_errors++; + ndev->trans_start = jiffies; + netif_wake_queue(ndev); +} + +static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + w5300_write_frame(priv, skb->data, skb->len); + mmiowb(); + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + netif_dbg(priv, tx_queued, ndev, "tx queued\n"); + + w5300_command(priv, S0_CR_SEND); + + return NETDEV_TX_OK; +} + +static int w5300_napi_poll(struct napi_struct *napi, int budget) +{ + struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int rx_count; + u16 rx_len; + + for (rx_count = 0; rx_count < budget; rx_count++) { + u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR); + if (rx_fifo_len == 0) + break; + + rx_len = w5300_read(priv, W5300_S0_RX_FIFO); + + skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2)); + if (unlikely(!skb)) { + u32 i; + for (i = 0; i < rx_fifo_len; i += 2) + w5300_read(priv, W5300_S0_RX_FIFO); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, rx_len); + w5300_read_frame(priv, skb->data, rx_len); + skb->protocol = eth_type_trans(skb, ndev); + + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + } + + if (rx_count < budget) { + w5300_write(priv, W5300_IMR, IR_S0); + mmiowb(); + napi_complete(napi); + } + + return rx_count; +} + +static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5300_priv *priv = netdev_priv(ndev); + + int ir = w5300_read(priv, W5300_S0_IR); + if (!ir) + return IRQ_NONE; + w5300_write(priv, W5300_S0_IR, ir); + mmiowb(); + + if (ir & S0_IR_SENDOK) { + netif_dbg(priv, tx_done, ndev, "tx done\n"); + netif_wake_queue(ndev); + } + + if (ir & S0_IR_RECV) { + if (napi_schedule_prep(&priv->napi)) { + w5300_write(priv, W5300_IMR, 0); + mmiowb(); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t w5300_detect_link(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5300_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + if (gpio_get_value(priv->link_gpio) != 0) { + netif_info(priv, link, ndev, "link is up\n"); + netif_carrier_on(ndev); + } else { + netif_info(priv, link, ndev, "link is down\n"); + netif_carrier_off(ndev); + } + } + + return IRQ_HANDLED; +} + +static void w5300_set_rx_mode(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + bool set_promisc = (ndev->flags & IFF_PROMISC) != 0; + + if (priv->promisc != set_promisc) { + priv->promisc = set_promisc; + w5300_hw_start(priv); + } +} + +static int w5300_set_macaddr(struct net_device *ndev, void *addr) +{ + struct w5300_priv *priv = netdev_priv(ndev); + struct sockaddr *sock_addr = addr; + + if (!is_valid_ether_addr(sock_addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + ndev->addr_assign_type &= ~NET_ADDR_RANDOM; + w5300_write_macaddr(priv); + return 0; +} + +static int w5300_open(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifup, ndev, "enabling\n"); + if (!is_valid_ether_addr(ndev->dev_addr)) + return -EINVAL; + w5300_hw_start(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + return 0; +} + +static int w5300_stop(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifdown, ndev, "shutting down\n"); + w5300_hw_close(priv); + netif_carrier_off(ndev); + netif_stop_queue(ndev); + napi_disable(&priv->napi); + return 0; +} + +static const struct ethtool_ops w5300_ethtool_ops = { + .get_drvinfo = w5300_get_drvinfo, + .get_msglevel = w5300_get_msglevel, + .set_msglevel = w5300_set_msglevel, + .get_link = w5300_get_link, + .get_regs_len = w5300_get_regs_len, + .get_regs = w5300_get_regs, +}; + +static const struct net_device_ops w5300_netdev_ops = { + .ndo_open = w5300_open, + .ndo_stop = w5300_stop, + .ndo_start_xmit = w5300_start_tx, + .ndo_tx_timeout = w5300_tx_timeout, + .ndo_set_rx_mode = w5300_set_rx_mode, + .ndo_set_mac_address = w5300_set_macaddr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __devinit w5300_hw_probe(struct platform_device *pdev) +{ + struct wiznet_platform_data *data = pdev->dev.platform_data; + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + const char *name = netdev_name(ndev); + struct resource *mem; + int mem_size; + int irq; + int ret; + + if (data && is_valid_ether_addr(data->mac_addr)) { + memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + } else { + random_ether_addr(ndev->dev_addr); + ndev->addr_assign_type |= NET_ADDR_RANDOM; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -ENXIO; + mem_size = resource_size(mem); + if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) + return -EBUSY; + priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); + if (!priv->base) + return -EBUSY; + + spin_lock_init(&priv->reg_lock); + priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; + if (priv->indirect) { + priv->read = w5300_read_indirect; + priv->write = w5300_write_indirect; + } else { + priv->read = w5300_read_direct; + priv->write = w5300_write_direct; + } + + w5300_hw_reset(priv); + if (w5300_read(priv, W5300_IDR) != IDR_W5300) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + ret = request_irq(irq, w5300_interrupt, + IRQ_TYPE_LEVEL_LOW, name, ndev); + if (ret < 0) + return ret; + priv->irq = irq; + + priv->link_gpio = data ? data->link_gpio : -EINVAL; + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); + if (!link_name) + return -ENOMEM; + snprintf(link_name, 16, "%s-link", name); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5300_detect_link, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); + return 0; +} + +static int __devinit w5300_probe(struct platform_device *pdev) +{ + struct w5300_priv *priv; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->ndev = ndev; + + ether_setup(ndev); + ndev->netdev_ops = &w5300_netdev_ops; + ndev->ethtool_ops = &w5300_ethtool_ops; + ndev->watchdog_timeo = HZ; + netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16); + + /* This chip doesn't support VLAN packets with normal MTU, + * so disable VLAN for this device. + */ + ndev->features |= NETIF_F_VLAN_CHALLENGED; + + err = register_netdev(ndev); + if (err < 0) + goto err_register; + + err = w5300_hw_probe(pdev); + if (err < 0) + goto err_hw_probe; + + return 0; + +err_hw_probe: + unregister_netdev(ndev); +err_register: + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return err; +} + +static int __devexit w5300_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + w5300_hw_reset(priv); + free_irq(priv->irq, ndev); + if (gpio_is_valid(priv->link_gpio)) + free_irq(priv->link_irq, ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int w5300_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_carrier_off(ndev); + netif_device_detach(ndev); + + w5300_hw_close(priv); + } + return 0; +} + +static int w5300_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + if (!netif_running(ndev)) { + w5300_hw_reset(priv); + w5300_hw_start(priv); + + netif_device_attach(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + } + return 0; +} +#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume); + +static struct platform_driver w5300_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = &w5300_pm_ops, + }, + .probe = w5300_probe, + .remove = __devexit_p(w5300_remove), +}; + +module_platform_driver(w5300_driver); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index d21591a2c59..1eaf7128afe 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1000,6 +1000,7 @@ static const struct ethtool_ops temac_ethtool_ops = { .set_settings = temac_set_settings, .nway_reset = temac_nway_reset, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static int __devinit temac_of_probe(struct platform_device *op) diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index cf67352cea1..3f431019e61 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -5,8 +5,8 @@ config NET_VENDOR_XSCALE bool "Intel XScale IXP devices" default y - depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \ - IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611) + depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \ + IXP4XX_NPE && IXP4XX_QMGR) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -27,6 +27,4 @@ config IXP4XX_ETH Say Y here if you want to use built-in Ethernet ports on IXP4xx processor. -source "drivers/net/ethernet/xscale/ixp2000/Kconfig" - endif # NET_VENDOR_XSCALE diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile index b195b9d7fe8..abc3b031fba 100644 --- a/drivers/net/ethernet/xscale/Makefile +++ b/drivers/net/ethernet/xscale/Makefile @@ -2,5 +2,4 @@ # Makefile for the Intel XScale IXP device drivers. # -obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/ obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig deleted file mode 100644 index 58dbc5b876b..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/Kconfig +++ /dev/null @@ -1,6 +0,0 @@ -config ENP2611_MSF_NET - tristate "Radisys ENP2611 MSF network interface support" - depends on ARCH_ENP2611 - ---help--- - This is a driver for the MSF network interface unit in - the IXP2400 on the Radisys ENP2611 platform. diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile deleted file mode 100644 index fd38351ceaa..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o - -enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c deleted file mode 100644 index 7dea5b95012..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/caleb.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611 - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/delay.h> -#include <asm/io.h> -#include "caleb.h" - -#define CALEB_IDLO 0x00 -#define CALEB_IDHI 0x01 -#define CALEB_RID 0x02 -#define CALEB_RESET 0x03 -#define CALEB_INTREN0 0x04 -#define CALEB_INTREN1 0x05 -#define CALEB_INTRSTAT0 0x06 -#define CALEB_INTRSTAT1 0x07 -#define CALEB_PORTEN 0x08 -#define CALEB_BURST 0x09 -#define CALEB_PORTPAUS 0x0A -#define CALEB_PORTPAUSD 0x0B -#define CALEB_PHY0RX 0x10 -#define CALEB_PHY1RX 0x11 -#define CALEB_PHY0TX 0x12 -#define CALEB_PHY1TX 0x13 -#define CALEB_IXPRX_HI_CNTR 0x15 -#define CALEB_PHY0RX_HI_CNTR 0x16 -#define CALEB_PHY1RX_HI_CNTR 0x17 -#define CALEB_IXPRX_CNTR 0x18 -#define CALEB_PHY0RX_CNTR 0x19 -#define CALEB_PHY1RX_CNTR 0x1A -#define CALEB_IXPTX_CNTR 0x1B -#define CALEB_PHY0TX_CNTR 0x1C -#define CALEB_PHY1TX_CNTR 0x1D -#define CALEB_DEBUG0 0x1E -#define CALEB_DEBUG1 0x1F - - -static u8 caleb_reg_read(int reg) -{ - u8 value; - - value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)); - -// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value); - - return value; -} - -static void caleb_reg_write(int reg, u8 value) -{ - u8 dummy; - -// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value); - - *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value; - - dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE); - __asm__ __volatile__("mov %0, %0" : "+r" (dummy)); -} - - -void caleb_reset(void) -{ - /* - * Perform a chip reset. - */ - caleb_reg_write(CALEB_RESET, 0x02); - udelay(1); - - /* - * Enable all interrupt sources. This is needed to get - * meaningful results out of the status bits (register 6 - * and 7.) - */ - caleb_reg_write(CALEB_INTREN0, 0xff); - caleb_reg_write(CALEB_INTREN1, 0x07); - - /* - * Set RX and TX FIFO thresholds to 1.5kb. - */ - caleb_reg_write(CALEB_PHY0RX, 0x11); - caleb_reg_write(CALEB_PHY1RX, 0x11); - caleb_reg_write(CALEB_PHY0TX, 0x11); - caleb_reg_write(CALEB_PHY1TX, 0x11); - - /* - * Program SPI-3 burst size. - */ - caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets -// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets -// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets -} - -void caleb_enable_rx(int port) -{ - u8 temp; - - temp = caleb_reg_read(CALEB_PORTEN); - temp |= 1 << port; - caleb_reg_write(CALEB_PORTEN, temp); -} - -void caleb_disable_rx(int port) -{ - u8 temp; - - temp = caleb_reg_read(CALEB_PORTEN); - temp &= ~(1 << port); - caleb_reg_write(CALEB_PORTEN, temp); -} - -void caleb_enable_tx(int port) -{ - u8 temp; - - temp = caleb_reg_read(CALEB_PORTEN); - temp |= 1 << (port + 4); - caleb_reg_write(CALEB_PORTEN, temp); -} - -void caleb_disable_tx(int port) -{ - u8 temp; - - temp = caleb_reg_read(CALEB_PORTEN); - temp &= ~(1 << (port + 4)); - caleb_reg_write(CALEB_PORTEN, temp); -} diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h deleted file mode 100644 index e93a1ef5b8a..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/caleb.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611 - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __CALEB_H -#define __CALEB_H - -void caleb_reset(void); -void caleb_enable_rx(int port); -void caleb_disable_rx(int port); -void caleb_enable_tx(int port); -void caleb_disable_tx(int port); - - -#endif diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c deleted file mode 100644 index 34a6cfd1793..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * IXP2400 MSF network device driver for the Radisys ENP2611 - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/moduleparam.h> -#include <asm/hardware/uengine.h> -#include <asm/mach-types.h> -#include <asm/io.h> -#include "ixpdev.h" -#include "caleb.h" -#include "ixp2400-msf.h" -#include "pm3386.h" - -/*********************************************************************** - * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC - * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA - * to the IXP2400. - * - * +-------------+ - * SFP GBIC #0 ---+ | +---------+ - * | PM3386 #0 +-------+ | - * SFP GBIC #1 ---+ | | "Caleb" | +---------+ - * +-------------+ | | | | - * | SPI-3 +---------+ IXP2400 | - * +-------------+ | bridge | | | - * SFP GBIC #2 ---+ | | FPGA | +---------+ - * | PM3386 #1 +-------+ | - * | | +---------+ - * +-------------+ - * ^ ^ ^ - * | 1.25Gbaud | 104MHz | 104MHz - * | SERDES ea. | SPI-3 ea. | SPI-3 - * - ***********************************************************************/ -static struct ixp2400_msf_parameters enp2611_msf_parameters = -{ - .rx_mode = IXP2400_RX_MODE_UTOPIA_POS | - IXP2400_RX_MODE_1x32 | - IXP2400_RX_MODE_MPHY | - IXP2400_RX_MODE_MPHY_32 | - IXP2400_RX_MODE_MPHY_POLLED_STATUS | - IXP2400_RX_MODE_MPHY_LEVEL3 | - IXP2400_RX_MODE_RBUF_SIZE_64, - - .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16, - - .rx_poll_ports = 3, - - .rx_channel_mode = { - IXP2400_PORT_RX_MODE_MASTER | - IXP2400_PORT_RX_MODE_POS_PHY | - IXP2400_PORT_RX_MODE_POS_PHY_L3 | - IXP2400_PORT_RX_MODE_ODD_PARITY | - IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_RX_MODE_MASTER | - IXP2400_PORT_RX_MODE_POS_PHY | - IXP2400_PORT_RX_MODE_POS_PHY_L3 | - IXP2400_PORT_RX_MODE_ODD_PARITY | - IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_RX_MODE_MASTER | - IXP2400_PORT_RX_MODE_POS_PHY | - IXP2400_PORT_RX_MODE_POS_PHY_L3 | - IXP2400_PORT_RX_MODE_ODD_PARITY | - IXP2400_PORT_RX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_RX_MODE_MASTER | - IXP2400_PORT_RX_MODE_POS_PHY | - IXP2400_PORT_RX_MODE_POS_PHY_L3 | - IXP2400_PORT_RX_MODE_ODD_PARITY | - IXP2400_PORT_RX_MODE_2_CYCLE_DECODE - }, - - .tx_mode = IXP2400_TX_MODE_UTOPIA_POS | - IXP2400_TX_MODE_1x32 | - IXP2400_TX_MODE_MPHY | - IXP2400_TX_MODE_MPHY_32 | - IXP2400_TX_MODE_MPHY_POLLED_STATUS | - IXP2400_TX_MODE_MPHY_LEVEL3 | - IXP2400_TX_MODE_TBUF_SIZE_64, - - .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16, - - .tx_poll_ports = 3, - - .tx_channel_mode = { - IXP2400_PORT_TX_MODE_MASTER | - IXP2400_PORT_TX_MODE_POS_PHY | - IXP2400_PORT_TX_MODE_ODD_PARITY | - IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_TX_MODE_MASTER | - IXP2400_PORT_TX_MODE_POS_PHY | - IXP2400_PORT_TX_MODE_ODD_PARITY | - IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_TX_MODE_MASTER | - IXP2400_PORT_TX_MODE_POS_PHY | - IXP2400_PORT_TX_MODE_ODD_PARITY | - IXP2400_PORT_TX_MODE_2_CYCLE_DECODE, - - IXP2400_PORT_TX_MODE_MASTER | - IXP2400_PORT_TX_MODE_POS_PHY | - IXP2400_PORT_TX_MODE_ODD_PARITY | - IXP2400_PORT_TX_MODE_2_CYCLE_DECODE - } -}; - -static struct net_device *nds[3]; -static struct timer_list link_check_timer; - -/* @@@ Poll the SFP moddef0 line too. */ -/* @@@ Try to use the pm3386 DOOL interrupt as well. */ -static void enp2611_check_link_status(unsigned long __dummy) -{ - int i; - - for (i = 0; i < 3; i++) { - struct net_device *dev; - int status; - - dev = nds[i]; - if (dev == NULL) - continue; - - status = pm3386_is_link_up(i); - if (status && !netif_carrier_ok(dev)) { - /* @@@ Should report autonegotiation status. */ - printk(KERN_INFO "%s: NIC Link is Up\n", dev->name); - - pm3386_enable_tx(i); - caleb_enable_tx(i); - netif_carrier_on(dev); - } else if (!status && netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: NIC Link is Down\n", dev->name); - - netif_carrier_off(dev); - caleb_disable_tx(i); - pm3386_disable_tx(i); - } - } - - link_check_timer.expires = jiffies + HZ / 10; - add_timer(&link_check_timer); -} - -static void enp2611_set_port_admin_status(int port, int up) -{ - if (up) { - caleb_enable_rx(port); - - pm3386_set_carrier(port, 1); - pm3386_enable_rx(port); - } else { - caleb_disable_tx(port); - pm3386_disable_tx(port); - /* @@@ Flush out pending packets. */ - pm3386_set_carrier(port, 0); - - pm3386_disable_rx(port); - caleb_disable_rx(port); - } -} - -static int __init enp2611_init_module(void) -{ - int ports; - int i; - - if (!machine_is_enp2611()) - return -ENODEV; - - caleb_reset(); - pm3386_reset(); - - ports = pm3386_port_count(); - for (i = 0; i < ports; i++) { - nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv)); - if (nds[i] == NULL) { - while (--i >= 0) - free_netdev(nds[i]); - return -ENOMEM; - } - - pm3386_init_port(i); - pm3386_get_mac(i, nds[i]->dev_addr); - } - - ixp2400_msf_init(&enp2611_msf_parameters); - - if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { - for (i = 0; i < ports; i++) - if (nds[i]) - free_netdev(nds[i]); - return -EINVAL; - } - - init_timer(&link_check_timer); - link_check_timer.function = enp2611_check_link_status; - link_check_timer.expires = jiffies; - add_timer(&link_check_timer); - - return 0; -} - -static void __exit enp2611_cleanup_module(void) -{ - int i; - - del_timer_sync(&link_check_timer); - - ixpdev_deinit(); - for (i = 0; i < 3; i++) - free_netdev(nds[i]); -} - -module_init(enp2611_init_module); -module_exit(enp2611_cleanup_module); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c deleted file mode 100644 index f5ffd7e05d2..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Generic library functions for the MSF (Media and Switch Fabric) unit - * found on the Intel IXP2400 network processor. - * - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of the - * License, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <mach/hardware.h> -#include <mach/ixp2000-regs.h> -#include <asm/delay.h> -#include <asm/io.h> -#include "ixp2400-msf.h" - -/* - * This is the Intel recommended PLL init procedure as described on - * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual. - */ -static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp) -{ - int rx_dual_clock; - int tx_dual_clock; - u32 value; - - /* - * If the RX mode is not 1x32, we have to enable both RX PLLs - * (#0 and #1.) The same thing for the TX direction. - */ - rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK); - tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK); - - /* - * Read initial value. - */ - value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL); - - /* - * Put PLLs in powerdown and bypass mode. - */ - value |= 0x0000f0f0; - ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); - - /* - * Set single or dual clock mode bits. - */ - value &= ~0x03000000; - value |= (rx_dual_clock << 24) | (tx_dual_clock << 25); - - /* - * Set multipliers. - */ - value &= ~0x00ff0000; - value |= mp->rxclk01_multiplier << 16; - value |= mp->rxclk23_multiplier << 18; - value |= mp->txclk01_multiplier << 20; - value |= mp->txclk23_multiplier << 22; - - /* - * And write value. - */ - ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); - - /* - * Disable PLL bypass mode. - */ - value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15); - ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); - - /* - * Turn on PLLs. - */ - value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7); - ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); - - /* - * Wait for PLLs to lock. There are lock status bits, but IXP2400 - * erratum #65 says that these lock bits should not be relied upon - * as they might not accurately reflect the true state of the PLLs. - */ - udelay(100); -} - -/* - * Needed according to p480 of Programmer's Reference Manual. - */ -static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp) -{ - int size_bits; - int i; - - /* - * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer - * corruption) in the Intel-recommended way: do not add the RBUF - * elements susceptible to corruption to the freelist. - */ - size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK; - if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) { - for (i = 1; i < 128; i++) { - if (i == 9 || i == 18 || i == 27) - continue; - ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); - } - } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) { - for (i = 1; i < 64; i++) { - if (i == 4 || i == 9 || i == 13) - continue; - ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); - } - } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) { - for (i = 1; i < 32; i++) { - if (i == 2 || i == 4 || i == 6) - continue; - ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); - } - } -} - -static u32 ixp2400_msf_valid_channels(u32 reg) -{ - u32 channels; - - channels = 0; - switch (reg & IXP2400_RX_MODE_WIDTH_MASK) { - case IXP2400_RX_MODE_1x32: - channels = 0x1; - if (reg & IXP2400_RX_MODE_MPHY && - !(reg & IXP2400_RX_MODE_MPHY_32)) - channels = 0xf; - break; - - case IXP2400_RX_MODE_2x16: - channels = 0x5; - break; - - case IXP2400_RX_MODE_4x8: - channels = 0xf; - break; - - case IXP2400_RX_MODE_1x16_2x8: - channels = 0xd; - break; - } - - return channels; -} - -static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp) -{ - u32 value; - - value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff; - value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28; - ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value); -} - -static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp) -{ - u32 value; - - value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff; - value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28; - ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value); -} - - -void ixp2400_msf_init(struct ixp2400_msf_parameters *mp) -{ - u32 value; - int i; - - /* - * Init the RX/TX PLLs based on the passed parameter block. - */ - ixp2400_pll_init(mp); - - /* - * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF. - */ - value = ixp2000_reg_read(IXP2000_RESET0); - ixp2000_reg_write(IXP2000_RESET0, value | 0x80); - ixp2000_reg_write(IXP2000_RESET0, value & ~0x80); - - /* - * Initialise the RX section. - */ - ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1); - ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode); - for (i = 0; i < 4; i++) { - ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i, - mp->rx_channel_mode[i]); - } - ixp2400_msf_free_rbuf_entries(mp); - ixp2400_msf_enable_rx(mp); - - /* - * Initialise the TX section. - */ - ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1); - ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode); - for (i = 0; i < 4; i++) { - ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i, - mp->tx_channel_mode[i]); - } - ixp2400_msf_enable_tx(mp); -} diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h deleted file mode 100644 index 3ac1af2771d..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Generic library functions for the MSF (Media and Switch Fabric) unit - * found on the Intel IXP2400 network processor. - * - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of the - * License, or (at your option) any later version. - */ - -#ifndef __IXP2400_MSF_H -#define __IXP2400_MSF_H - -struct ixp2400_msf_parameters -{ - u32 rx_mode; - unsigned rxclk01_multiplier:2; - unsigned rxclk23_multiplier:2; - unsigned rx_poll_ports:6; - u32 rx_channel_mode[4]; - - u32 tx_mode; - unsigned txclk01_multiplier:2; - unsigned txclk23_multiplier:2; - unsigned tx_poll_ports:6; - u32 tx_channel_mode[4]; -}; - -void ixp2400_msf_init(struct ixp2400_msf_parameters *mp); - -#define IXP2400_PLL_MULTIPLIER_48 0x00 -#define IXP2400_PLL_MULTIPLIER_24 0x01 -#define IXP2400_PLL_MULTIPLIER_16 0x02 -#define IXP2400_PLL_MULTIPLIER_12 0x03 - -#define IXP2400_RX_MODE_CSIX 0x00400000 -#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000 -#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000 -#define IXP2400_RX_MODE_1x16_2x8 0x00300000 -#define IXP2400_RX_MODE_4x8 0x00200000 -#define IXP2400_RX_MODE_2x16 0x00100000 -#define IXP2400_RX_MODE_1x32 0x00000000 -#define IXP2400_RX_MODE_MPHY 0x00080000 -#define IXP2400_RX_MODE_SPHY 0x00000000 -#define IXP2400_RX_MODE_MPHY_32 0x00040000 -#define IXP2400_RX_MODE_MPHY_4 0x00000000 -#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000 -#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000 -#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000 -#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000 -#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000 -#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000 -#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000 -#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000 -#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200 -#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000 -#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c -#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008 -#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004 -#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000 - -#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040 -#define IXP2400_PORT_RX_MODE_MASTER 0x00000000 -#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020 -#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000 -#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010 -#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000 -#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c -#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008 -#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000 -#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002 -#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000 -#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001 -#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000 - -#define IXP2400_TX_MODE_CSIX 0x00400000 -#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000 -#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000 -#define IXP2400_TX_MODE_1x16_2x8 0x00300000 -#define IXP2400_TX_MODE_4x8 0x00200000 -#define IXP2400_TX_MODE_2x16 0x00100000 -#define IXP2400_TX_MODE_1x32 0x00000000 -#define IXP2400_TX_MODE_MPHY 0x00080000 -#define IXP2400_TX_MODE_SPHY 0x00000000 -#define IXP2400_TX_MODE_MPHY_32 0x00040000 -#define IXP2400_TX_MODE_MPHY_4 0x00000000 -#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000 -#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000 -#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000 -#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000 -#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000 -#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000 -#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000 -#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000 -#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c -#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008 -#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004 -#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000 - -#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040 -#define IXP2400_PORT_TX_MODE_MASTER 0x00000000 -#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010 -#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000 -#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c -#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008 -#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000 -#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002 -#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001 -#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000 - - -#endif diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc deleted file mode 100644 index 42a73e357af..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc +++ /dev/null @@ -1,408 +0,0 @@ -/* - * RX ucode for the Intel IXP2400 in POS-PHY mode. - * Copyright (C) 2004, 2005 Lennert Buytenhek - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Assumptions made in this code: - * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where - * only one full element list is used. This includes, for example, - * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This - * is not an exhaustive list.) - * - The RBUF uses 64-byte mpackets. - * - RX descriptors reside in SRAM, and have the following format: - * struct rx_desc - * { - * // to uengine - * u32 buf_phys_addr; - * u32 buf_length; - * - * // from uengine - * u32 channel; - * u32 pkt_length; - * }; - * - Packet data resides in DRAM. - * - Packet buffer addresses are 8-byte aligned. - * - Scratch ring 0 is rx_pending. - * - Scratch ring 1 is rx_done, and has status condition 'full'. - * - The host triggers rx_done flush and rx_pending refill on seeing INTA. - * - This code is run on all eight threads of the microengine it runs on. - * - * Local memory is used for per-channel RX state. - */ - -#define RX_THREAD_FREELIST_0 0x0030 -#define RBUF_ELEMENT_DONE 0x0044 - -#define CHANNEL_FLAGS *l$index0[0] -#define CHANNEL_FLAG_RECEIVING 1 -#define PACKET_LENGTH *l$index0[1] -#define PACKET_CHECKSUM *l$index0[2] -#define BUFFER_HANDLE *l$index0[3] -#define BUFFER_START *l$index0[4] -#define BUFFER_LENGTH *l$index0[5] - -#define CHANNEL_STATE_SIZE 24 // in bytes -#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size)) - - - .sig volatile sig1 - .sig volatile sig2 - .sig volatile sig3 - - .sig mpacket_arrived - .reg add_to_rx_freelist - .reg read $rsw0, $rsw1 - .xfer_order $rsw0 $rsw1 - - .reg zero - - /* - * Initialise add_to_rx_freelist. - */ - .begin - .reg temp - .reg temp2 - - immed[add_to_rx_freelist, RX_THREAD_FREELIST_0] - immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))] - - local_csr_rd[ACTIVE_CTX_STS] - immed[temp, 0] - alu[temp2, temp, and, 0x1f] - alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20] - alu[temp2, temp, and, 0x80] - alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18] - .end - - immed[zero, 0] - - /* - * Skip context 0 initialisation? - */ - .begin - br!=ctx[0, mpacket_receive_loop#] - .end - - /* - * Initialise local memory. - */ - .begin - .reg addr - .reg temp - - immed[temp, 0] - init_local_mem_loop#: - alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT] - local_csr_wr[ACTIVE_LM_ADDR_0, addr] - nop - nop - nop - - immed[CHANNEL_FLAGS, 0] - - alu[temp, temp, +, 1] - alu[--, temp, and, 0x20] - beq[init_local_mem_loop#] - .end - - /* - * Initialise signal pipeline. - */ - .begin - local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)] - .set_sig sig1 - - local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)] - .set_sig sig2 - - local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)] - .set_sig sig3 - .end - -mpacket_receive_loop#: - /* - * Synchronise and wait for mpacket. - */ - .begin - ctx_arb[sig1] - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))] - - msf[fast_wr, --, add_to_rx_freelist, 0] - .set_sig mpacket_arrived - ctx_arb[mpacket_arrived] - .set $rsw0 $rsw1 - .end - - /* - * We halt if we see {inbparerr,parerr,null,soperror}. - */ - .begin - alu_shf[--, 0x1b, and, $rsw0, >>8] - bne[abort_rswerr#] - .end - - /* - * Point local memory pointer to this channel's state area. - */ - .begin - .reg chanaddr - - alu[chanaddr, $rsw0, and, 0x1f] - alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT] - local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr] - nop - nop - nop - .end - - /* - * Check whether we received a SOP mpacket while we were already - * working on a packet, or a non-SOP mpacket while there was no - * packet pending. (SOP == RECEIVING -> abort) If everything's - * okay, update the RECEIVING flag to reflect our new state. - */ - .begin - .reg temp - .reg eop - - #if CHANNEL_FLAG_RECEIVING != 1 - #error CHANNEL_FLAG_RECEIVING is not 1 - #endif - - alu_shf[temp, 1, and, $rsw0, >>15] - alu[temp, temp, xor, CHANNEL_FLAGS] - alu[--, temp, and, CHANNEL_FLAG_RECEIVING] - beq[abort_proterr#] - - alu_shf[eop, 1, and, $rsw0, >>14] - alu[CHANNEL_FLAGS, temp, xor, eop] - .end - - /* - * Copy the mpacket into the right spot, and in case of EOP, - * write back the descriptor and pass the packet on. - */ - .begin - .reg buffer_offset - .reg _packet_length - .reg _packet_checksum - .reg _buffer_handle - .reg _buffer_start - .reg _buffer_length - - /* - * Determine buffer_offset, _packet_length and - * _packet_checksum. - */ - .begin - .reg temp - - alu[--, 1, and, $rsw0, >>15] - beq[not_sop#] - - immed[PACKET_LENGTH, 0] - immed[PACKET_CHECKSUM, 0] - - not_sop#: - alu[buffer_offset, --, b, PACKET_LENGTH] - alu_shf[temp, 0xff, and, $rsw0, >>16] - alu[_packet_length, buffer_offset, +, temp] - alu[PACKET_LENGTH, --, b, _packet_length] - - immed[temp, 0xffff] - alu[temp, $rsw1, and, temp] - alu[_packet_checksum, PACKET_CHECKSUM, +, temp] - alu[PACKET_CHECKSUM, --, b, _packet_checksum] - .end - - /* - * Allocate buffer in case of SOP. - */ - .begin - .reg temp - - alu[temp, 1, and, $rsw0, >>15] - beq[skip_buffer_alloc#] - - .begin - .sig zzz - .reg read $stemp $stemp2 - .xfer_order $stemp $stemp2 - - rx_nobufs#: - scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz] - alu[_buffer_handle, --, b, $stemp] - beq[rx_nobufs#] - - sram[read, $stemp, _buffer_handle, 0, 2], - ctx_swap[zzz] - alu[_buffer_start, --, b, $stemp] - alu[_buffer_length, --, b, $stemp2] - .end - - skip_buffer_alloc#: - .end - - /* - * Resynchronise. - */ - .begin - ctx_arb[sig2] - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))] - .end - - /* - * Synchronise buffer state. - */ - .begin - .reg temp - - alu[temp, 1, and, $rsw0, >>15] - beq[copy_from_local_mem#] - - alu[BUFFER_HANDLE, --, b, _buffer_handle] - alu[BUFFER_START, --, b, _buffer_start] - alu[BUFFER_LENGTH, --, b, _buffer_length] - br[sync_state_done#] - - copy_from_local_mem#: - alu[_buffer_handle, --, b, BUFFER_HANDLE] - alu[_buffer_start, --, b, BUFFER_START] - alu[_buffer_length, --, b, BUFFER_LENGTH] - - sync_state_done#: - .end - -#if 0 - /* - * Debug buffer state management. - */ - .begin - .reg temp - - alu[temp, 1, and, $rsw0, >>14] - beq[no_poison#] - immed[BUFFER_HANDLE, 0xdead] - immed[BUFFER_START, 0xdead] - immed[BUFFER_LENGTH, 0xdead] - no_poison#: - - immed[temp, 0xdead] - alu[--, _buffer_handle, -, temp] - beq[state_corrupted#] - alu[--, _buffer_start, -, temp] - beq[state_corrupted#] - alu[--, _buffer_length, -, temp] - beq[state_corrupted#] - .end -#endif - - /* - * Check buffer length. - */ - .begin - alu[--, _buffer_length, -, _packet_length] - blo[buffer_overflow#] - .end - - /* - * Copy the mpacket and give back the RBUF element. - */ - .begin - .reg element - .reg xfer_size - .reg temp - .sig copy_sig - - alu_shf[element, 0x7f, and, $rsw0, >>24] - alu_shf[xfer_size, 0xff, and, $rsw0, >>16] - - alu[xfer_size, xfer_size, -, 1] - alu_shf[xfer_size, 0x10, or, xfer_size, >>3] - alu_shf[temp, 0x10, or, xfer_size, <<21] - alu_shf[temp, temp, or, element, <<11] - alu_shf[--, temp, or, 1, <<18] - - dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8], - indirect_ref, sig_done[copy_sig] - ctx_arb[copy_sig] - - alu[temp, RBUF_ELEMENT_DONE, or, element, <<16] - msf[fast_wr, --, temp, 0] - .end - - /* - * If EOP, write back the packet descriptor. - */ - .begin - .reg write $stemp $stemp2 - .xfer_order $stemp $stemp2 - .sig zzz - - alu_shf[--, 1, and, $rsw0, >>14] - beq[no_writeback#] - - alu[$stemp, $rsw0, and, 0x1f] - alu[$stemp2, --, b, _packet_length] - sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz] - - no_writeback#: - .end - - /* - * Resynchronise. - */ - .begin - ctx_arb[sig3] - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))] - .end - - /* - * If EOP, put the buffer back onto the scratch ring. - */ - .begin - .reg write $stemp - .sig zzz - - br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#] - - alu_shf[--, 1, and, $rsw0, >>14] - beq[mpacket_receive_loop#] - - alu[--, 1, and, $rsw0, >>10] - bne[rxerr#] - - alu[$stemp, --, b, _buffer_handle] - scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz] - cap[fast_wr, 0, XSCALE_INT_A] - br[mpacket_receive_loop#] - - rxerr#: - alu[$stemp, --, b, _buffer_handle] - scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz] - br[mpacket_receive_loop#] - .end - .end - - -abort_rswerr#: - halt - -abort_proterr#: - halt - -state_corrupted#: - halt - -buffer_overflow#: - halt - -rx_done_ring_overflow#: - halt - - diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode deleted file mode 100644 index e8aee2f81aa..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode +++ /dev/null @@ -1,130 +0,0 @@ -static struct ixp2000_uengine_code ixp2400_rx = -{ - .cpu_model_bitmask = 0x000003fe, - .cpu_min_revision = 0, - .cpu_max_revision = 255, - - .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS | - IXP2000_UENGINE_PRN_UPDATE_EVERY | - IXP2000_UENGINE_NN_FROM_PREVIOUS | - IXP2000_UENGINE_ASSERT_EMPTY_AT_0 | - IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT | - IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT, - - .initial_reg_values = (struct ixp2000_reg_value []) { - { -1, -1 } - }, - - .num_insns = 109, - .insns = (u8 []) { - 0xf0, 0x00, 0x0c, 0xc0, 0x05, - 0xf4, 0x44, 0x0c, 0x00, 0x05, - 0xfc, 0x04, 0x4c, 0x00, 0x00, - 0xf0, 0x00, 0x00, 0x3b, 0x00, - 0xb4, 0x40, 0xf0, 0x3b, 0x1f, - 0x8a, 0xc0, 0x50, 0x3e, 0x05, - 0xb4, 0x40, 0xf0, 0x3b, 0x80, - 0x9a, 0xe0, 0x00, 0x3e, 0x05, - 0xf0, 0x00, 0x00, 0x07, 0x00, - 0xd8, 0x05, 0xc0, 0x00, 0x11, - 0xf0, 0x00, 0x00, 0x0f, 0x00, - 0x91, 0xb0, 0x20, 0x0e, 0x00, - 0xfc, 0x06, 0x60, 0x0b, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0xf0, 0x00, 0x0c, 0x02, 0x00, - 0xb0, 0xc0, 0x30, 0x0f, 0x01, - 0xa4, 0x70, 0x00, 0x0f, 0x20, - 0xd8, 0x02, 0xc0, 0x01, 0x00, - 0xfc, 0x10, 0xac, 0x23, 0x08, - 0xfc, 0x10, 0xac, 0x43, 0x10, - 0xfc, 0x10, 0xac, 0x63, 0x18, - 0xe0, 0x00, 0x00, 0x00, 0x02, - 0xfc, 0x10, 0xae, 0x23, 0x88, - 0x3d, 0x00, 0x04, 0x03, 0x20, - 0xe0, 0x00, 0x00, 0x00, 0x10, - 0x84, 0x82, 0x02, 0x01, 0x3b, - 0xd8, 0x1a, 0x00, 0x01, 0x01, - 0xb4, 0x00, 0x8c, 0x7d, 0x80, - 0x91, 0xb0, 0x80, 0x22, 0x00, - 0xfc, 0x06, 0x60, 0x23, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0xf0, 0x00, 0x0c, 0x03, 0x00, - 0x94, 0xf0, 0x92, 0x01, 0x21, - 0xac, 0x40, 0x60, 0x26, 0x00, - 0xa4, 0x30, 0x0c, 0x04, 0x06, - 0xd8, 0x1a, 0x40, 0x01, 0x00, - 0x94, 0xe0, 0xa2, 0x01, 0x21, - 0xac, 0x20, 0x00, 0x28, 0x06, - 0x84, 0xf2, 0x02, 0x01, 0x21, - 0xd8, 0x0b, 0x40, 0x01, 0x00, - 0xf0, 0x00, 0x0c, 0x02, 0x01, - 0xf0, 0x00, 0x0c, 0x02, 0x02, - 0xa0, 0x00, 0x08, 0x04, 0x00, - 0x95, 0x00, 0xc6, 0x01, 0xff, - 0xa0, 0x80, 0x10, 0x30, 0x00, - 0xa0, 0x60, 0x1c, 0x00, 0x01, - 0xf0, 0x0f, 0xf0, 0x33, 0xff, - 0xb4, 0x00, 0xc0, 0x31, 0x81, - 0xb0, 0x80, 0xb0, 0x32, 0x02, - 0xa0, 0x20, 0x20, 0x2c, 0x00, - 0x94, 0xf0, 0xd2, 0x01, 0x21, - 0xd8, 0x0f, 0x40, 0x01, 0x00, - 0x19, 0x40, 0x10, 0x04, 0x20, - 0xa0, 0x00, 0x26, 0x04, 0x00, - 0xd8, 0x0d, 0xc0, 0x01, 0x00, - 0x00, 0x42, 0x10, 0x80, 0x02, - 0xb0, 0x00, 0x46, 0x04, 0x00, - 0xb0, 0x00, 0x56, 0x08, 0x00, - 0xe0, 0x00, 0x00, 0x00, 0x04, - 0xfc, 0x10, 0xae, 0x43, 0x90, - 0x84, 0xf0, 0x32, 0x01, 0x21, - 0xd8, 0x11, 0x40, 0x01, 0x00, - 0xa0, 0x60, 0x3c, 0x00, 0x02, - 0xa0, 0x20, 0x40, 0x10, 0x00, - 0xa0, 0x20, 0x50, 0x14, 0x00, - 0xd8, 0x12, 0x00, 0x00, 0x18, - 0xa0, 0x00, 0x28, 0x0c, 0x00, - 0xb0, 0x00, 0x48, 0x10, 0x00, - 0xb0, 0x00, 0x58, 0x14, 0x00, - 0xaa, 0xf0, 0x00, 0x14, 0x01, - 0xd8, 0x1a, 0xc0, 0x01, 0x05, - 0x85, 0x80, 0x42, 0x01, 0xff, - 0x95, 0x00, 0x66, 0x01, 0xff, - 0xba, 0xc0, 0x60, 0x1b, 0x01, - 0x9a, 0x30, 0x60, 0x19, 0x30, - 0x9a, 0xb0, 0x70, 0x1a, 0x30, - 0x9b, 0x50, 0x78, 0x1e, 0x04, - 0x8a, 0xe2, 0x08, 0x1e, 0x21, - 0x6a, 0x4e, 0x00, 0x13, 0x00, - 0xe0, 0x00, 0x00, 0x00, 0x30, - 0x9b, 0x00, 0x7a, 0x92, 0x04, - 0x3d, 0x00, 0x04, 0x1f, 0x20, - 0x84, 0xe2, 0x02, 0x01, 0x21, - 0xd8, 0x16, 0x80, 0x01, 0x00, - 0xa4, 0x18, 0x0c, 0x7d, 0x80, - 0xa0, 0x58, 0x1c, 0x00, 0x01, - 0x01, 0x42, 0x00, 0xa0, 0x02, - 0xe0, 0x00, 0x00, 0x00, 0x08, - 0xfc, 0x10, 0xae, 0x63, 0x98, - 0xd8, 0x1b, 0x00, 0xc2, 0x14, - 0x84, 0xe2, 0x02, 0x01, 0x21, - 0xd8, 0x05, 0xc0, 0x01, 0x00, - 0x84, 0xa2, 0x02, 0x01, 0x21, - 0xd8, 0x19, 0x40, 0x01, 0x01, - 0xa0, 0x58, 0x0c, 0x00, 0x02, - 0x1a, 0x40, 0x00, 0x04, 0x24, - 0x33, 0x00, 0x01, 0x2f, 0x20, - 0xd8, 0x05, 0xc0, 0x00, 0x18, - 0xa0, 0x58, 0x0c, 0x00, 0x02, - 0x1a, 0x40, 0x00, 0x04, 0x20, - 0xd8, 0x05, 0xc0, 0x00, 0x18, - 0xe0, 0x00, 0x02, 0x00, 0x00, - 0xe0, 0x00, 0x02, 0x00, 0x00, - 0xe0, 0x00, 0x02, 0x00, 0x00, - 0xe0, 0x00, 0x02, 0x00, 0x00, - 0xe0, 0x00, 0x02, 0x00, 0x00, - } -}; diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc deleted file mode 100644 index d090d1884fb..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc +++ /dev/null @@ -1,272 +0,0 @@ -/* - * TX ucode for the Intel IXP2400 in POS-PHY mode. - * Copyright (C) 2004, 2005 Lennert Buytenhek - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Assumptions made in this code: - * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where - * only one TBUF partition is used. This includes, for example, - * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This - * is not an exhaustive list.) - * - The TBUF uses 64-byte mpackets. - * - TX descriptors reside in SRAM, and have the following format: - * struct tx_desc - * { - * // to uengine - * u32 buf_phys_addr; - * u32 pkt_length; - * u32 channel; - * }; - * - Packet data resides in DRAM. - * - Packet buffer addresses are 8-byte aligned. - * - Scratch ring 2 is tx_pending. - * - Scratch ring 3 is tx_done, and has status condition 'full'. - * - This code is run on all eight threads of the microengine it runs on. - */ - -#define TX_SEQUENCE_0 0x0060 -#define TBUF_CTRL 0x1800 - -#define PARTITION_SIZE 128 -#define PARTITION_THRESH 96 - - - .sig volatile sig1 - .sig volatile sig2 - .sig volatile sig3 - - .reg @old_tx_seq_0 - .reg @mpkts_in_flight - .reg @next_tbuf_mpacket - - .reg @buffer_handle - .reg @buffer_start - .reg @packet_length - .reg @channel - .reg @packet_offset - - .reg zero - - immed[zero, 0] - - /* - * Skip context 0 initialisation? - */ - .begin - br!=ctx[0, mpacket_tx_loop#] - .end - - /* - * Wait until all pending TBUF elements have been transmitted. - */ - .begin - .reg read $tx - .sig zzz - - loop_empty#: - msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz] - alu_shf[--, --, b, $tx, >>31] - beq[loop_empty#] - - alu[@old_tx_seq_0, --, b, $tx] - .end - - immed[@mpkts_in_flight, 0] - alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)] - - immed[@buffer_handle, 0] - - /* - * Initialise signal pipeline. - */ - .begin - local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)] - .set_sig sig1 - - local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)] - .set_sig sig2 - - local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)] - .set_sig sig3 - .end - -mpacket_tx_loop#: - .begin - .reg tbuf_element_index - .reg buffer_handle - .reg sop_eop - .reg packet_data - .reg channel - .reg mpacket_size - - /* - * If there is no packet currently being transmitted, - * dequeue the next TX descriptor, and fetch the buffer - * address, packet length and destination channel number. - */ - .begin - .reg read $stemp $stemp2 $stemp3 - .xfer_order $stemp $stemp2 $stemp3 - .sig zzz - - ctx_arb[sig1] - - alu[--, --, b, @buffer_handle] - bne[already_got_packet#] - - tx_nobufs#: - scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz] - alu[@buffer_handle, --, b, $stemp] - beq[tx_nobufs#] - - sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz] - alu[@buffer_start, --, b, $stemp] - alu[@packet_length, --, b, $stemp2] - beq[zero_byte_packet#] - alu[@channel, --, b, $stemp3] - immed[@packet_offset, 0] - - already_got_packet#: - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))] - .end - - /* - * Determine tbuf element index, SOP/EOP flags, mpacket - * offset and mpacket size and cache buffer_handle and - * channel number. - */ - .begin - alu[tbuf_element_index, --, b, @next_tbuf_mpacket] - alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1] - alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and, - (PARTITION_SIZE - 1)] - - alu[buffer_handle, --, b, @buffer_handle] - immed[@buffer_handle, 0] - - immed[sop_eop, 1] - - alu[packet_data, --, b, @packet_offset] - bne[no_sop#] - alu[sop_eop, sop_eop, or, 2] - no_sop#: - alu[packet_data, packet_data, +, @buffer_start] - - alu[channel, --, b, @channel] - - alu[mpacket_size, @packet_length, -, @packet_offset] - alu[--, 64, -, mpacket_size] - bhs[eop#] - alu[@buffer_handle, --, b, buffer_handle] - immed[mpacket_size, 64] - alu[sop_eop, sop_eop, and, 2] - eop#: - - alu[@packet_offset, @packet_offset, +, mpacket_size] - .end - - /* - * Wait until there's enough space in the TBUF. - */ - .begin - .reg read $tx - .reg temp - .sig zzz - - ctx_arb[sig2] - - br[test_space#] - - loop_space#: - msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz] - - alu[temp, $tx, -, @old_tx_seq_0] - alu[temp, temp, and, 0xff] - alu[@mpkts_in_flight, @mpkts_in_flight, -, temp] - - alu[@old_tx_seq_0, --, b, $tx] - - test_space#: - alu[--, PARTITION_THRESH, -, @mpkts_in_flight] - blo[loop_space#] - - alu[@mpkts_in_flight, @mpkts_in_flight, +, 1] - - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))] - .end - - /* - * Copy the packet data to the TBUF. - */ - .begin - .reg temp - .sig copy_sig - - alu[temp, mpacket_size, -, 1] - alu_shf[temp, 0x10, or, temp, >>3] - alu_shf[temp, 0x10, or, temp, <<21] - alu_shf[temp, temp, or, tbuf_element_index, <<11] - alu_shf[--, temp, or, 1, <<18] - - dram[tbuf_wr, --, packet_data, 0, max_8], - indirect_ref, sig_done[copy_sig] - ctx_arb[copy_sig] - .end - - /* - * Mark TBUF element as ready-to-be-transmitted. - */ - .begin - .reg write $tsw $tsw2 - .xfer_order $tsw $tsw2 - .reg temp - .sig zzz - - alu_shf[temp, channel, or, mpacket_size, <<24] - alu_shf[$tsw, temp, or, sop_eop, <<8] - immed[$tsw2, 0] - - immed[temp, TBUF_CTRL] - alu_shf[temp, temp, or, tbuf_element_index, <<3] - msf[write, $tsw, temp, 0, 2], ctx_swap[zzz] - .end - - /* - * Resynchronise. - */ - .begin - ctx_arb[sig3] - local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))] - .end - - /* - * If this was an EOP mpacket, recycle the TX buffer - * and signal the host. - */ - .begin - .reg write $stemp - .sig zzz - - alu[--, sop_eop, and, 1] - beq[mpacket_tx_loop#] - - tx_done_ring_full#: - br_inp_state[SCR_Ring3_Status, tx_done_ring_full#] - - alu[$stemp, --, b, buffer_handle] - scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz] - cap[fast_wr, 0, XSCALE_INT_A] - br[mpacket_tx_loop#] - .end - .end - - -zero_byte_packet#: - halt - - diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode deleted file mode 100644 index a433e24b0a5..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode +++ /dev/null @@ -1,98 +0,0 @@ -static struct ixp2000_uengine_code ixp2400_tx = -{ - .cpu_model_bitmask = 0x000003fe, - .cpu_min_revision = 0, - .cpu_max_revision = 255, - - .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS | - IXP2000_UENGINE_PRN_UPDATE_EVERY | - IXP2000_UENGINE_NN_FROM_PREVIOUS | - IXP2000_UENGINE_ASSERT_EMPTY_AT_0 | - IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT | - IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT, - - .initial_reg_values = (struct ixp2000_reg_value []) { - { -1, -1 } - }, - - .num_insns = 77, - .insns = (u8 []) { - 0xf0, 0x00, 0x00, 0x07, 0x00, - 0xd8, 0x03, 0x00, 0x00, 0x11, - 0x3c, 0x40, 0x00, 0x04, 0xe0, - 0x81, 0xf2, 0x02, 0x01, 0x00, - 0xd8, 0x00, 0x80, 0x01, 0x00, - 0xb0, 0x08, 0x06, 0x00, 0x00, - 0xf0, 0x00, 0x0c, 0x00, 0x80, - 0xb4, 0x49, 0x02, 0x03, 0x7f, - 0xf0, 0x00, 0x02, 0x83, 0x00, - 0xfc, 0x10, 0xac, 0x23, 0x08, - 0xfc, 0x10, 0xac, 0x43, 0x10, - 0xfc, 0x10, 0xac, 0x63, 0x18, - 0xe0, 0x00, 0x00, 0x00, 0x02, - 0xa0, 0x30, 0x02, 0x80, 0x00, - 0xd8, 0x06, 0x00, 0x01, 0x01, - 0x19, 0x40, 0x00, 0x04, 0x28, - 0xb0, 0x0a, 0x06, 0x00, 0x00, - 0xd8, 0x03, 0xc0, 0x01, 0x00, - 0x00, 0x44, 0x00, 0x80, 0x80, - 0xa0, 0x09, 0x06, 0x00, 0x00, - 0xb0, 0x0b, 0x06, 0x04, 0x00, - 0xd8, 0x13, 0x00, 0x01, 0x00, - 0xb0, 0x0c, 0x06, 0x08, 0x00, - 0xf0, 0x00, 0x0c, 0x00, 0xa0, - 0xfc, 0x10, 0xae, 0x23, 0x88, - 0xa0, 0x00, 0x12, 0x40, 0x00, - 0xb0, 0xc9, 0x02, 0x43, 0x01, - 0xb4, 0x49, 0x02, 0x43, 0x7f, - 0xb0, 0x00, 0x22, 0x80, 0x00, - 0xf0, 0x00, 0x02, 0x83, 0x00, - 0xf0, 0x00, 0x0c, 0x04, 0x02, - 0xb0, 0x40, 0x6c, 0x00, 0xa0, - 0xd8, 0x08, 0x80, 0x01, 0x01, - 0xaa, 0x00, 0x2c, 0x08, 0x02, - 0xa0, 0xc0, 0x30, 0x18, 0x90, - 0xa0, 0x00, 0x43, 0x00, 0x00, - 0xba, 0xc0, 0x32, 0xc0, 0xa0, - 0xaa, 0xb0, 0x00, 0x0f, 0x40, - 0xd8, 0x0a, 0x80, 0x01, 0x04, - 0xb0, 0x0a, 0x00, 0x08, 0x00, - 0xf0, 0x00, 0x00, 0x0f, 0x40, - 0xa4, 0x00, 0x2c, 0x08, 0x02, - 0xa0, 0x8a, 0x00, 0x0c, 0xa0, - 0xe0, 0x00, 0x00, 0x00, 0x04, - 0xd8, 0x0c, 0x80, 0x00, 0x18, - 0x3c, 0x40, 0x00, 0x04, 0xe0, - 0xba, 0x80, 0x42, 0x01, 0x80, - 0xb4, 0x40, 0x40, 0x13, 0xff, - 0xaa, 0x88, 0x00, 0x10, 0x80, - 0xb0, 0x08, 0x06, 0x00, 0x00, - 0xaa, 0xf0, 0x0d, 0x80, 0x80, - 0xd8, 0x0b, 0x40, 0x01, 0x05, - 0xa0, 0x88, 0x0c, 0x04, 0x80, - 0xfc, 0x10, 0xae, 0x43, 0x90, - 0xba, 0xc0, 0x50, 0x0f, 0x01, - 0x9a, 0x30, 0x50, 0x15, 0x30, - 0x9a, 0xb0, 0x50, 0x16, 0x30, - 0x9b, 0x50, 0x58, 0x16, 0x01, - 0x8a, 0xe2, 0x08, 0x16, 0x21, - 0x6b, 0x4e, 0x00, 0x83, 0x03, - 0xe0, 0x00, 0x00, 0x00, 0x30, - 0x9a, 0x80, 0x70, 0x0e, 0x04, - 0x8b, 0x88, 0x08, 0x1e, 0x02, - 0xf0, 0x00, 0x0c, 0x01, 0x81, - 0xf0, 0x01, 0x80, 0x1f, 0x00, - 0x9b, 0xd0, 0x78, 0x1e, 0x01, - 0x3d, 0x42, 0x00, 0x1c, 0x20, - 0xe0, 0x00, 0x00, 0x00, 0x08, - 0xfc, 0x10, 0xae, 0x63, 0x98, - 0xa4, 0x30, 0x0c, 0x04, 0x02, - 0xd8, 0x03, 0x00, 0x01, 0x00, - 0xd8, 0x11, 0xc1, 0x42, 0x14, - 0xa0, 0x18, 0x00, 0x08, 0x00, - 0x1a, 0x40, 0x00, 0x04, 0x2c, - 0x33, 0x00, 0x01, 0x2f, 0x20, - 0xd8, 0x03, 0x00, 0x00, 0x18, - 0xe0, 0x00, 0x02, 0x00, 0x00, - } -}; diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c deleted file mode 100644 index 45008377c8b..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c +++ /dev/null @@ -1,437 +0,0 @@ -/* - * IXP2000 MSF network device driver - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/moduleparam.h> -#include <linux/gfp.h> -#include <asm/hardware/uengine.h> -#include <asm/io.h> -#include "ixp2400_rx.ucode" -#include "ixp2400_tx.ucode" -#include "ixpdev_priv.h" -#include "ixpdev.h" -#include "pm3386.h" - -#define DRV_MODULE_VERSION "0.2" - -static int nds_count; -static struct net_device **nds; -static int nds_open; -static void (*set_port_admin_status)(int port, int up); - -static struct ixpdev_rx_desc * const rx_desc = - (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE); -static struct ixpdev_tx_desc * const tx_desc = - (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE); -static int tx_pointer; - - -static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct ixpdev_priv *ip = netdev_priv(dev); - struct ixpdev_tx_desc *desc; - int entry; - unsigned long flags; - - if (unlikely(skb->len > PAGE_SIZE)) { - /* @@@ Count drops. */ - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - entry = tx_pointer; - tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT; - - desc = tx_desc + entry; - desc->pkt_length = skb->len; - desc->channel = ip->channel; - - skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr)); - dev_kfree_skb(skb); - - ixp2000_reg_write(RING_TX_PENDING, - TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc))); - - local_irq_save(flags); - ip->tx_queue_entries++; - if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) - netif_stop_queue(dev); - local_irq_restore(flags); - - return NETDEV_TX_OK; -} - - -static int ixpdev_rx(struct net_device *dev, int processed, int budget) -{ - while (processed < budget) { - struct ixpdev_rx_desc *desc; - struct sk_buff *skb; - void *buf; - u32 _desc; - - _desc = ixp2000_reg_read(RING_RX_DONE); - if (_desc == 0) - return 0; - - desc = rx_desc + - ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc)); - buf = phys_to_virt(desc->buf_addr); - - if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) { - printk(KERN_ERR "ixp2000: rx err, length %d\n", - desc->pkt_length); - goto err; - } - - if (desc->channel < 0 || desc->channel >= nds_count) { - printk(KERN_ERR "ixp2000: rx err, channel %d\n", - desc->channel); - goto err; - } - - /* @@@ Make FCS stripping configurable. */ - desc->pkt_length -= 4; - - if (unlikely(!netif_running(nds[desc->channel]))) - goto err; - - skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length); - if (likely(skb != NULL)) { - skb_copy_to_linear_data(skb, buf, desc->pkt_length); - skb_put(skb, desc->pkt_length); - skb->protocol = eth_type_trans(skb, nds[desc->channel]); - - netif_receive_skb(skb); - } - -err: - ixp2000_reg_write(RING_RX_PENDING, _desc); - processed++; - } - - return processed; -} - -/* dev always points to nds[0]. */ -static int ixpdev_poll(struct napi_struct *napi, int budget) -{ - struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi); - struct net_device *dev = ip->dev; - int rx; - - rx = 0; - do { - ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); - - rx = ixpdev_rx(dev, rx, budget); - if (rx >= budget) - break; - } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); - - napi_complete(napi); - ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); - - return rx; -} - -static void ixpdev_tx_complete(void) -{ - int channel; - u32 wake; - - wake = 0; - while (1) { - struct ixpdev_priv *ip; - u32 desc; - int entry; - - desc = ixp2000_reg_read(RING_TX_DONE); - if (desc == 0) - break; - - /* @@@ Check whether entries come back in order. */ - entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc); - channel = tx_desc[entry].channel; - - if (channel < 0 || channel >= nds_count) { - printk(KERN_ERR "ixp2000: txcomp channel index " - "out of bounds (%d, %.8i, %d)\n", - channel, (unsigned int)desc, entry); - continue; - } - - ip = netdev_priv(nds[channel]); - if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) - wake |= 1 << channel; - ip->tx_queue_entries--; - } - - for (channel = 0; wake != 0; channel++) { - if (wake & (1 << channel)) { - netif_wake_queue(nds[channel]); - wake &= ~(1 << channel); - } - } -} - -static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) -{ - u32 status; - - status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0); - if (status == 0) - return IRQ_NONE; - - /* - * Any of the eight receive units signaled RX? - */ - if (status & 0x00ff) { - struct net_device *dev = nds[0]; - struct ixpdev_priv *ip = netdev_priv(dev); - - ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); - if (likely(napi_schedule_prep(&ip->napi))) { - __napi_schedule(&ip->napi); - } else { - printk(KERN_CRIT "ixp2000: irq while polling!!\n"); - } - } - - /* - * Any of the eight transmit units signaled TXdone? - */ - if (status & 0xff00) { - ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00); - ixpdev_tx_complete(); - } - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void ixpdev_poll_controller(struct net_device *dev) -{ - disable_irq(IRQ_IXP2000_THDA0); - ixpdev_interrupt(IRQ_IXP2000_THDA0, dev); - enable_irq(IRQ_IXP2000_THDA0); -} -#endif - -static int ixpdev_open(struct net_device *dev) -{ - struct ixpdev_priv *ip = netdev_priv(dev); - int err; - - napi_enable(&ip->napi); - if (!nds_open++) { - err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt, - IRQF_SHARED, "ixp2000_eth", nds); - if (err) { - nds_open--; - napi_disable(&ip->napi); - return err; - } - - ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff); - } - - set_port_admin_status(ip->channel, 1); - netif_start_queue(dev); - - return 0; -} - -static int ixpdev_close(struct net_device *dev) -{ - struct ixpdev_priv *ip = netdev_priv(dev); - - netif_stop_queue(dev); - napi_disable(&ip->napi); - set_port_admin_status(ip->channel, 0); - - if (!--nds_open) { - ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff); - free_irq(IRQ_IXP2000_THDA0, nds); - } - - return 0; -} - -static struct net_device_stats *ixpdev_get_stats(struct net_device *dev) -{ - struct ixpdev_priv *ip = netdev_priv(dev); - - pm3386_get_stats(ip->channel, &(dev->stats)); - - return &(dev->stats); -} - -static const struct net_device_ops ixpdev_netdev_ops = { - .ndo_open = ixpdev_open, - .ndo_stop = ixpdev_close, - .ndo_start_xmit = ixpdev_xmit, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_get_stats = ixpdev_get_stats, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixpdev_poll_controller, -#endif -}; - -struct net_device *ixpdev_alloc(int channel, int sizeof_priv) -{ - struct net_device *dev; - struct ixpdev_priv *ip; - - dev = alloc_etherdev(sizeof_priv); - if (dev == NULL) - return NULL; - - dev->netdev_ops = &ixpdev_netdev_ops; - - dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - - ip = netdev_priv(dev); - ip->dev = dev; - netif_napi_add(dev, &ip->napi, ixpdev_poll, 64); - ip->channel = channel; - ip->tx_queue_entries = 0; - - return dev; -} - -int ixpdev_init(int __nds_count, struct net_device **__nds, - void (*__set_port_admin_status)(int port, int up)) -{ - int i; - int err; - - BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192); - - printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION); - - nds_count = __nds_count; - nds = __nds; - set_port_admin_status = __set_port_admin_status; - - for (i = 0; i < RX_BUF_COUNT; i++) { - void *buf; - - buf = (void *)get_zeroed_page(GFP_KERNEL); - if (buf == NULL) { - err = -ENOMEM; - while (--i >= 0) - free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr)); - goto err_out; - } - rx_desc[i].buf_addr = virt_to_phys(buf); - rx_desc[i].buf_length = PAGE_SIZE; - } - - /* @@@ Maybe we shouldn't be preallocating TX buffers. */ - for (i = 0; i < TX_BUF_COUNT; i++) { - void *buf; - - buf = (void *)get_zeroed_page(GFP_KERNEL); - if (buf == NULL) { - err = -ENOMEM; - while (--i >= 0) - free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr)); - goto err_free_rx; - } - tx_desc[i].buf_addr = virt_to_phys(buf); - } - - /* 256 entries, ring status set means 'empty', base address 0x0000. */ - ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000); - ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000); - ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000); - - /* 256 entries, ring status set means 'full', base address 0x0400. */ - ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400); - ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000); - ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000); - - for (i = 0; i < RX_BUF_COUNT; i++) { - ixp2000_reg_write(RING_RX_PENDING, - RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc))); - } - - ixp2000_uengine_load(0, &ixp2400_rx); - ixp2000_uengine_start_contexts(0, 0xff); - - /* 256 entries, ring status set means 'empty', base address 0x0800. */ - ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800); - ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000); - ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000); - - /* 256 entries, ring status set means 'full', base address 0x0c00. */ - ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00); - ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000); - ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000); - - ixp2000_uengine_load(1, &ixp2400_tx); - ixp2000_uengine_start_contexts(1, 0xff); - - for (i = 0; i < nds_count; i++) { - err = register_netdev(nds[i]); - if (err) { - while (--i >= 0) - unregister_netdev(nds[i]); - goto err_free_tx; - } - } - - for (i = 0; i < nds_count; i++) { - printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n", - nds[i]->name, i, nds[i]->dev_addr); - } - - return 0; - -err_free_tx: - for (i = 0; i < TX_BUF_COUNT; i++) - free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr)); - -err_free_rx: - for (i = 0; i < RX_BUF_COUNT; i++) - free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr)); - -err_out: - return err; -} - -void ixpdev_deinit(void) -{ - int i; - - /* @@@ Flush out pending packets. */ - - for (i = 0; i < nds_count; i++) - unregister_netdev(nds[i]); - - ixp2000_uengine_stop_contexts(1, 0xff); - ixp2000_uengine_stop_contexts(0, 0xff); - ixp2000_uengine_reset(0x3); - - for (i = 0; i < TX_BUF_COUNT; i++) - free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr)); - - for (i = 0; i < RX_BUF_COUNT; i++) - free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr)); -} diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h deleted file mode 100644 index 391ece62324..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * IXP2000 MSF network device driver - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __IXPDEV_H -#define __IXPDEV_H - -struct ixpdev_priv -{ - struct net_device *dev; - struct napi_struct napi; - int channel; - int tx_queue_entries; -}; - -struct net_device *ixpdev_alloc(int channel, int sizeof_priv); -int ixpdev_init(int num_ports, struct net_device **nds, - void (*set_port_admin_status)(int port, int up)); -void ixpdev_deinit(void); - - -#endif diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h deleted file mode 100644 index 86aa08ea0c3..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * IXP2000 MSF network device driver - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __IXPDEV_PRIV_H -#define __IXPDEV_PRIV_H - -#define RX_BUF_DESC_BASE 0x00001000 -#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc))) -#define TX_BUF_DESC_BASE 0x00002000 -#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc))) -#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4) - -#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE) -#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4)) -#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8)) -#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12)) - -#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x))) -#define RING_RX_PENDING_BASE SCRATCH_REG(0x00) -#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04) -#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08) -#define RING_RX_DONE_BASE SCRATCH_REG(0x10) -#define RING_RX_DONE_HEAD SCRATCH_REG(0x14) -#define RING_RX_DONE_TAIL SCRATCH_REG(0x18) -#define RING_TX_PENDING_BASE SCRATCH_REG(0x20) -#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24) -#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28) -#define RING_TX_DONE_BASE SCRATCH_REG(0x30) -#define RING_TX_DONE_HEAD SCRATCH_REG(0x34) -#define RING_TX_DONE_TAIL SCRATCH_REG(0x38) - -struct ixpdev_rx_desc -{ - u32 buf_addr; - u32 buf_length; - u32 channel; - u32 pkt_length; -}; - -struct ixpdev_tx_desc -{ - u32 buf_addr; - u32 pkt_length; - u32 channel; - u32 unused; -}; - - -#endif diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c deleted file mode 100644 index e08d3f9863b..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Helper functions for the PM3386s on the Radisys ENP2611 - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <asm/io.h> -#include "pm3386.h" - -/* - * Read from register 'reg' of PM3386 device 'pm'. - */ -static u16 pm3386_reg_read(int pm, int reg) -{ - void *_reg; - u16 value; - - _reg = (void *)ENP2611_PM3386_0_VIRT_BASE; - if (pm == 1) - _reg = (void *)ENP2611_PM3386_1_VIRT_BASE; - - value = *((volatile u16 *)(_reg + (reg << 1))); - -// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value); - - return value; -} - -/* - * Write to register 'reg' of PM3386 device 'pm', and perform - * a readback from the identification register. - */ -static void pm3386_reg_write(int pm, int reg, u16 value) -{ - void *_reg; - u16 dummy; - -// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value); - - _reg = (void *)ENP2611_PM3386_0_VIRT_BASE; - if (pm == 1) - _reg = (void *)ENP2611_PM3386_1_VIRT_BASE; - - *((volatile u16 *)(_reg + (reg << 1))) = value; - - dummy = *((volatile u16 *)_reg); - __asm__ __volatile__("mov %0, %0" : "+r" (dummy)); -} - -/* - * Read from port 'port' register 'reg', where the registers - * for the different ports are 'spacing' registers apart. - */ -static u16 pm3386_port_reg_read(int port, int _reg, int spacing) -{ - int reg; - - reg = _reg; - if (port & 1) - reg += spacing; - - return pm3386_reg_read(port >> 1, reg); -} - -/* - * Write to port 'port' register 'reg', where the registers - * for the different ports are 'spacing' registers apart. - */ -static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value) -{ - int reg; - - reg = _reg; - if (port & 1) - reg += spacing; - - pm3386_reg_write(port >> 1, reg, value); -} - -int pm3386_secondary_present(void) -{ - return pm3386_reg_read(1, 0) == 0x3386; -} - -void pm3386_reset(void) -{ - u8 mac[3][6]; - int secondary; - - secondary = pm3386_secondary_present(); - - /* Save programmed MAC addresses. */ - pm3386_get_mac(0, mac[0]); - pm3386_get_mac(1, mac[1]); - if (secondary) - pm3386_get_mac(2, mac[2]); - - /* Assert analog and digital reset. */ - pm3386_reg_write(0, 0x002, 0x0060); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0060); - mdelay(1); - - /* Deassert analog reset. */ - pm3386_reg_write(0, 0x002, 0x0062); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0062); - mdelay(10); - - /* Deassert digital reset. */ - pm3386_reg_write(0, 0x002, 0x0063); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0063); - mdelay(10); - - /* Restore programmed MAC addresses. */ - pm3386_set_mac(0, mac[0]); - pm3386_set_mac(1, mac[1]); - if (secondary) - pm3386_set_mac(2, mac[2]); - - /* Disable carrier on all ports. */ - pm3386_set_carrier(0, 0); - pm3386_set_carrier(1, 0); - if (secondary) - pm3386_set_carrier(2, 0); -} - -static u16 swaph(u16 x) -{ - return ((x << 8) | (x >> 8)) & 0xffff; -} - -int pm3386_port_count(void) -{ - return 2 + pm3386_secondary_present(); -} - -void pm3386_init_port(int port) -{ - int pm = port >> 1; - - /* - * Work around ENP2611 bootloader programming MAC address - * in reverse. - */ - if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 && - (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) { - u16 temp[3]; - - temp[0] = pm3386_port_reg_read(port, 0x308, 0x100); - temp[1] = pm3386_port_reg_read(port, 0x309, 0x100); - temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100); - pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2])); - pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1])); - pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0])); - } - - /* - * Initialise narrowbanding mode. See application note 2010486 - * for more information. (@@@ We also need to issue a reset - * when ROOL or DOOL are detected.) - */ - pm3386_port_reg_write(port, 0x708, 0x10, 0xd055); - udelay(500); - pm3386_port_reg_write(port, 0x708, 0x10, 0x5055); - - /* - * SPI-3 ingress block. Set 64 bytes SPI-3 burst size - * towards SPI-3 bridge. - */ - pm3386_port_reg_write(port, 0x122, 0x20, 0x0002); - - /* - * Enable ingress protocol checking, and soft reset the - * SPI-3 ingress block. - */ - pm3386_reg_write(pm, 0x103, 0x0003); - while (!(pm3386_reg_read(pm, 0x103) & 0x80)) - ; - - /* - * SPI-3 egress block. Gather 12288 bytes of the current - * packet in the TX fifo before initiating transmit on the - * SERDES interface. (Prevents TX underflows.) - */ - pm3386_port_reg_write(port, 0x221, 0x20, 0x0007); - - /* - * Enforce odd parity from the SPI-3 bridge, and soft reset - * the SPI-3 egress block. - */ - pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1))); - while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c) - ; - - /* - * EGMAC block. Set this channels to reject long preambles, - * not send or transmit PAUSE frames, enable preamble checking, - * disable frame length checking, enable FCS appending, enable - * TX frame padding. - */ - pm3386_port_reg_write(port, 0x302, 0x100, 0x0113); - - /* - * Soft reset the EGMAC block. - */ - pm3386_port_reg_write(port, 0x301, 0x100, 0x8000); - pm3386_port_reg_write(port, 0x301, 0x100, 0x0000); - - /* - * Auto-sense autonegotiation status. - */ - pm3386_port_reg_write(port, 0x306, 0x100, 0x0100); - - /* - * Allow reception of jumbo frames. - */ - pm3386_port_reg_write(port, 0x310, 0x100, 9018); - - /* - * Allow transmission of jumbo frames. - */ - pm3386_port_reg_write(port, 0x336, 0x100, 9018); - - /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */ - - /* - * Set autonegotiation parameters to 'no PAUSE, full duplex.' - */ - pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020); - - /* - * Enable and restart autonegotiation. - */ - pm3386_port_reg_write(port, 0x318, 0x100, 0x0003); - pm3386_port_reg_write(port, 0x318, 0x100, 0x0002); -} - -void pm3386_get_mac(int port, u8 *mac) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x308, 0x100); - mac[0] = temp & 0xff; - mac[1] = (temp >> 8) & 0xff; - - temp = pm3386_port_reg_read(port, 0x309, 0x100); - mac[2] = temp & 0xff; - mac[3] = (temp >> 8) & 0xff; - - temp = pm3386_port_reg_read(port, 0x30a, 0x100); - mac[4] = temp & 0xff; - mac[5] = (temp >> 8) & 0xff; -} - -void pm3386_set_mac(int port, u8 *mac) -{ - pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]); - pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]); - pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]); -} - -static u32 pm3386_get_stat(int port, u16 base) -{ - u32 value; - - value = pm3386_port_reg_read(port, base, 0x100); - value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16; - - return value; -} - -void pm3386_get_stats(int port, struct net_device_stats *stats) -{ - /* - * Snapshot statistics counters. - */ - pm3386_port_reg_write(port, 0x500, 0x100, 0x0001); - while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001) - ; - - memset(stats, 0, sizeof(*stats)); - - stats->rx_packets = pm3386_get_stat(port, 0x510); - stats->tx_packets = pm3386_get_stat(port, 0x590); - stats->rx_bytes = pm3386_get_stat(port, 0x514); - stats->tx_bytes = pm3386_get_stat(port, 0x594); - /* @@@ Add other stats. */ -} - -void pm3386_set_carrier(int port, int state) -{ - pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000); -} - -int pm3386_is_link_up(int port) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x31a, 0x100); - temp = pm3386_port_reg_read(port, 0x31a, 0x100); - - return !!(temp & 0x0002); -} - -void pm3386_enable_rx(int port) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x303, 0x100); - temp |= 0x1000; - pm3386_port_reg_write(port, 0x303, 0x100, temp); -} - -void pm3386_disable_rx(int port) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x303, 0x100); - temp &= 0xefff; - pm3386_port_reg_write(port, 0x303, 0x100, temp); -} - -void pm3386_enable_tx(int port) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x303, 0x100); - temp |= 0x4000; - pm3386_port_reg_write(port, 0x303, 0x100, temp); -} - -void pm3386_disable_tx(int port) -{ - u16 temp; - - temp = pm3386_port_reg_read(port, 0x303, 0x100); - temp &= 0xbfff; - pm3386_port_reg_write(port, 0x303, 0x100, temp); -} - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h deleted file mode 100644 index cc4183dca91..00000000000 --- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Helper functions for the PM3386s on the Radisys ENP2611 - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __PM3386_H -#define __PM3386_H - -void pm3386_reset(void); -int pm3386_port_count(void); -void pm3386_init_port(int port); -void pm3386_get_mac(int port, u8 *mac); -void pm3386_set_mac(int port, u8 *mac); -void pm3386_get_stats(int port, struct net_device_stats *stats); -void pm3386_set_carrier(int port, int state); -int pm3386_is_link_up(int port); -void pm3386_enable_rx(int port); -void pm3386_disable_rx(int port); -void pm3386_enable_tx(int port); -void pm3386_disable_tx(int port); - - -#endif diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 41a8b5a9849..482648fcf0b 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1002,12 +1002,41 @@ static int ixp4xx_nway_reset(struct net_device *dev) return phy_start_aneg(port->phydev); } +int ixp46x_phc_index = -1; + +static int ixp4xx_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + if (!cpu_is_ixp46x()) { + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; + } + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = ixp46x_phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); + return 0; +} + static const struct ethtool_ops ixp4xx_ethtool_ops = { .get_drvinfo = ixp4xx_get_drvinfo, .get_settings = ixp4xx_get_settings, .set_settings = ixp4xx_set_settings, .nway_reset = ixp4xx_nway_reset, .get_link = ethtool_op_get_link, + .get_ts_info = ixp4xx_get_ts_info, }; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 168c8f41d09..d4719632ffc 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -113,10 +113,9 @@ static int __devinit rr_init_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); - if (pci_request_regions(pdev, "rrunner")) { - ret = -EIO; + ret = pci_request_regions(pdev, "rrunner"); + if (ret < 0) goto out; - } pci_set_drvdata(pdev, dev); @@ -124,11 +123,8 @@ static int __devinit rr_init_one(struct pci_dev *pdev, spin_lock_init(&rrpriv->lock); - dev->irq = pdev->irq; dev->netdev_ops = &rr_netdev_ops; - dev->base_addr = pci_resource_start(pdev, 0); - /* display version info if adapter is found */ if (!version_disp) { /* set display flag to TRUE so that */ @@ -146,16 +142,15 @@ static int __devinit rr_init_one(struct pci_dev *pdev, pci_set_master(pdev); printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " - "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, - dev->base_addr, dev->irq, pci_latency); + "at 0x%llx, irq %i, PCI latency %i\n", dev->name, + (unsigned long long)pci_resource_start(pdev, 0), + pdev->irq, pci_latency); /* - * Remap the regs into kernel space. + * Remap the MMIO regs into kernel space. */ - - rrpriv->regs = ioremap(dev->base_addr, 0x1000); - - if (!rrpriv->regs){ + rrpriv->regs = pci_iomap(pdev, 0, 0x1000); + if (!rrpriv->regs) { printk(KERN_ERR "%s: Unable to map I/O register, " "RoadRunner will be disabled.\n", dev->name); ret = -EIO; @@ -202,8 +197,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, rr_init(dev); - dev->base_addr = 0; - ret = register_netdev(dev); if (ret) goto out; @@ -217,7 +210,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev, pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, rrpriv->tx_ring_dma); if (rrpriv->regs) - iounmap(rrpriv->regs); + pci_iounmap(pdev, rrpriv->regs); if (pdev) { pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); @@ -231,29 +224,26 @@ static int __devinit rr_init_one(struct pci_dev *pdev, static void __devexit rr_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); + struct rr_private *rr = netdev_priv(dev); - if (dev) { - struct rr_private *rr = netdev_priv(dev); - - if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ - printk(KERN_ERR "%s: trying to unload running NIC\n", - dev->name); - writel(HALT_NIC, &rr->regs->HostCtrl); - } - - pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, - rr->evt_ring_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, - rr->rx_ring_dma); - pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, - rr->tx_ring_dma); - unregister_netdev(dev); - iounmap(rr->regs); - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) { + printk(KERN_ERR "%s: trying to unload running NIC\n", + dev->name); + writel(HALT_NIC, &rr->regs->HostCtrl); } + + unregister_netdev(dev); + pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, + rr->evt_ring_dma); + pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, + rr->rx_ring_dma); + pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, + rr->tx_ring_dma); + pci_iounmap(pdev, rr->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); } @@ -1229,9 +1219,9 @@ static int rr_open(struct net_device *dev) readl(®s->HostCtrl); spin_unlock_irqrestore(&rrpriv->lock, flags); - if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { + if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", - dev->name, dev->irq); + dev->name, pdev->irq); ecode = -EAGAIN; goto error; } @@ -1338,16 +1328,15 @@ static void rr_dump(struct net_device *dev) static int rr_close(struct net_device *dev) { - struct rr_private *rrpriv; - struct rr_regs __iomem *regs; + struct rr_private *rrpriv = netdev_priv(dev); + struct rr_regs __iomem *regs = rrpriv->regs; + struct pci_dev *pdev = rrpriv->pci_dev; unsigned long flags; u32 tmp; short i; netif_stop_queue(dev); - rrpriv = netdev_priv(dev); - regs = rrpriv->regs; /* * Lock to make sure we are not cleaning up while another CPU @@ -1386,15 +1375,15 @@ static int rr_close(struct net_device *dev) rr_raz_tx(rrpriv, dev); rr_raz_rx(rrpriv, dev); - pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; - pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), - rrpriv->info, rrpriv->info_dma); + pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, + rrpriv->info_dma); rrpriv->info = NULL; - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); spin_unlock_irqrestore(&rrpriv->lock, flags); return 0; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index c3582455279..4ffcd57b011 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -27,6 +27,7 @@ #include <linux/list.h> #include <linux/hyperv.h> +#include <linux/rndis.h> /* Fwd declaration */ struct hv_netvsc_packet; @@ -506,295 +507,6 @@ struct netvsc_device { void *extension; }; - -/* Status codes */ - - -#ifndef STATUS_SUCCESS -#define STATUS_SUCCESS (0x00000000L) -#endif - -#ifndef STATUS_UNSUCCESSFUL -#define STATUS_UNSUCCESSFUL (0xC0000001L) -#endif - -#ifndef STATUS_PENDING -#define STATUS_PENDING (0x00000103L) -#endif - -#ifndef STATUS_INSUFFICIENT_RESOURCES -#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL) -#endif - -#ifndef STATUS_BUFFER_OVERFLOW -#define STATUS_BUFFER_OVERFLOW (0x80000005L) -#endif - -#ifndef STATUS_NOT_SUPPORTED -#define STATUS_NOT_SUPPORTED (0xC00000BBL) -#endif - -#define RNDIS_STATUS_SUCCESS (STATUS_SUCCESS) -#define RNDIS_STATUS_PENDING (STATUS_PENDING) -#define RNDIS_STATUS_NOT_RECOGNIZED (0x00010001L) -#define RNDIS_STATUS_NOT_COPIED (0x00010002L) -#define RNDIS_STATUS_NOT_ACCEPTED (0x00010003L) -#define RNDIS_STATUS_CALL_ACTIVE (0x00010007L) - -#define RNDIS_STATUS_ONLINE (0x40010003L) -#define RNDIS_STATUS_RESET_START (0x40010004L) -#define RNDIS_STATUS_RESET_END (0x40010005L) -#define RNDIS_STATUS_RING_STATUS (0x40010006L) -#define RNDIS_STATUS_CLOSED (0x40010007L) -#define RNDIS_STATUS_WAN_LINE_UP (0x40010008L) -#define RNDIS_STATUS_WAN_LINE_DOWN (0x40010009L) -#define RNDIS_STATUS_WAN_FRAGMENT (0x4001000AL) -#define RNDIS_STATUS_MEDIA_CONNECT (0x4001000BL) -#define RNDIS_STATUS_MEDIA_DISCONNECT (0x4001000CL) -#define RNDIS_STATUS_HARDWARE_LINE_UP (0x4001000DL) -#define RNDIS_STATUS_HARDWARE_LINE_DOWN (0x4001000EL) -#define RNDIS_STATUS_INTERFACE_UP (0x4001000FL) -#define RNDIS_STATUS_INTERFACE_DOWN (0x40010010L) -#define RNDIS_STATUS_MEDIA_BUSY (0x40010011L) -#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION (0x40010012L) -#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION -#define RNDIS_STATUS_LINK_SPEED_CHANGE (0x40010013L) - -#define RNDIS_STATUS_NOT_RESETTABLE (0x80010001L) -#define RNDIS_STATUS_SOFT_ERRORS (0x80010003L) -#define RNDIS_STATUS_HARD_ERRORS (0x80010004L) -#define RNDIS_STATUS_BUFFER_OVERFLOW (STATUS_BUFFER_OVERFLOW) - -#define RNDIS_STATUS_FAILURE (STATUS_UNSUCCESSFUL) -#define RNDIS_STATUS_RESOURCES (STATUS_INSUFFICIENT_RESOURCES) -#define RNDIS_STATUS_CLOSING (0xC0010002L) -#define RNDIS_STATUS_BAD_VERSION (0xC0010004L) -#define RNDIS_STATUS_BAD_CHARACTERISTICS (0xC0010005L) -#define RNDIS_STATUS_ADAPTER_NOT_FOUND (0xC0010006L) -#define RNDIS_STATUS_OPEN_FAILED (0xC0010007L) -#define RNDIS_STATUS_DEVICE_FAILED (0xC0010008L) -#define RNDIS_STATUS_MULTICAST_FULL (0xC0010009L) -#define RNDIS_STATUS_MULTICAST_EXISTS (0xC001000AL) -#define RNDIS_STATUS_MULTICAST_NOT_FOUND (0xC001000BL) -#define RNDIS_STATUS_REQUEST_ABORTED (0xC001000CL) -#define RNDIS_STATUS_RESET_IN_PROGRESS (0xC001000DL) -#define RNDIS_STATUS_CLOSING_INDICATING (0xC001000EL) -#define RNDIS_STATUS_NOT_SUPPORTED (STATUS_NOT_SUPPORTED) -#define RNDIS_STATUS_INVALID_PACKET (0xC001000FL) -#define RNDIS_STATUS_OPEN_LIST_FULL (0xC0010010L) -#define RNDIS_STATUS_ADAPTER_NOT_READY (0xC0010011L) -#define RNDIS_STATUS_ADAPTER_NOT_OPEN (0xC0010012L) -#define RNDIS_STATUS_NOT_INDICATING (0xC0010013L) -#define RNDIS_STATUS_INVALID_LENGTH (0xC0010014L) -#define RNDIS_STATUS_INVALID_DATA (0xC0010015L) -#define RNDIS_STATUS_BUFFER_TOO_SHORT (0xC0010016L) -#define RNDIS_STATUS_INVALID_OID (0xC0010017L) -#define RNDIS_STATUS_ADAPTER_REMOVED (0xC0010018L) -#define RNDIS_STATUS_UNSUPPORTED_MEDIA (0xC0010019L) -#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE (0xC001001AL) -#define RNDIS_STATUS_FILE_NOT_FOUND (0xC001001BL) -#define RNDIS_STATUS_ERROR_READING_FILE (0xC001001CL) -#define RNDIS_STATUS_ALREADY_MAPPED (0xC001001DL) -#define RNDIS_STATUS_RESOURCE_CONFLICT (0xC001001EL) -#define RNDIS_STATUS_NO_CABLE (0xC001001FL) - -#define RNDIS_STATUS_INVALID_SAP (0xC0010020L) -#define RNDIS_STATUS_SAP_IN_USE (0xC0010021L) -#define RNDIS_STATUS_INVALID_ADDRESS (0xC0010022L) -#define RNDIS_STATUS_VC_NOT_ACTIVATED (0xC0010023L) -#define RNDIS_STATUS_DEST_OUT_OF_ORDER (0xC0010024L) -#define RNDIS_STATUS_VC_NOT_AVAILABLE (0xC0010025L) -#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE (0xC0010026L) -#define RNDIS_STATUS_INCOMPATABLE_QOS (0xC0010027L) -#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED (0xC0010028L) -#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION (0xC0010029L) - -#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR (0xC0011000L) - -/* Object Identifiers used by NdisRequest Query/Set Information */ -/* General Objects */ -#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101 -#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102 -#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103 -#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104 -#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 -#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 -#define RNDIS_OID_GEN_LINK_SPEED 0x00010107 -#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 -#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 -#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A -#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B -#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C -#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D -#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E -#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F -#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110 -#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 -#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112 -#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113 -#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 -#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 -#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 -#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 -#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 -#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A -#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B - -#define RNDIS_OID_GEN_XMIT_OK 0x00020101 -#define RNDIS_OID_GEN_RCV_OK 0x00020102 -#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103 -#define RNDIS_OID_GEN_RCV_ERROR 0x00020104 -#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105 - -#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 -#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 -#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 -#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 -#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 -#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 -#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207 -#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 -#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209 -#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A -#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B -#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C - -#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D -#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E - -#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F -#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210 - -/* These are connection-oriented general OIDs. */ -/* These replace the above OIDs for connection-oriented media. */ -#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101 -#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102 -#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103 -#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104 -#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105 -#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106 -#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107 -#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108 -#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109 -#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A -#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B -#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C -#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D - -#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201 -#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202 - -/* These are connection-oriented statistics OIDs. */ -#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101 -#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102 -#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103 -#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104 -#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105 - - -#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201 -#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202 -#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203 -#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204 -#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205 -#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206 - -/* These are objects for Connection-oriented media call-managers. */ -#define RNDIS_OID_CO_ADD_PVC 0xFF000001 -#define RNDIS_OID_CO_DELETE_PVC 0xFF000002 -#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003 -#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004 -#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005 -#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006 -#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007 -#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008 -#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009 - -/* 802.3 Objects (Ethernet) */ -#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101 -#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102 -#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103 -#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 -#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105 - -#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001 - -#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 -#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102 -#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 - -#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201 -#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 -#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203 -#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204 -#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 -#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 -#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 - -/* Remote NDIS message types */ -#define REMOTE_NDIS_PACKET_MSG 0x00000001 -#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002 -#define REMOTE_NDIS_HALT_MSG 0x00000003 -#define REMOTE_NDIS_QUERY_MSG 0x00000004 -#define REMOTE_NDIS_SET_MSG 0x00000005 -#define REMOTE_NDIS_RESET_MSG 0x00000006 -#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007 -#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008 - -#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001 -#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002 -#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005 -#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006 -#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007 - -/* Remote NDIS message completion types */ -#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002 -#define REMOTE_NDIS_QUERY_CMPLT 0x80000004 -#define REMOTE_NDIS_SET_CMPLT 0x80000005 -#define REMOTE_NDIS_RESET_CMPLT 0x80000006 -#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008 - -#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001 -#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002 -#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005 -#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006 - -/* - * Reserved message type for private communication between lower-layer host - * driver and remote device, if necessary. - */ -#define REMOTE_NDIS_BUS_MSG 0xff000001 - -/* Defines for DeviceFlags in struct rndis_initialize_complete */ -#define RNDIS_DF_CONNECTIONLESS 0x00000001 -#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002 -#define RNDIS_DF_RAW_DATA 0x00000004 - -/* Remote NDIS medium types. */ -#define RNDIS_MEDIUM_802_3 0x00000000 -#define RNDIS_MEDIUM_802_5 0x00000001 -#define RNDIS_MEDIUM_FDDI 0x00000002 -#define RNDIS_MEDIUM_WAN 0x00000003 -#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004 -#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006 -#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007 -#define RNDIS_MEDIUM_ATM 0x00000008 -#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009 -#define RNDIS_MEDIUM_IRDA 0x0000000a -#define RNDIS_MEDIUM_CO_WAN 0x0000000b -/* Not a real medium, defined as an upper-bound */ -#define RNDIS_MEDIUM_MAX 0x0000000d - - -/* Remote NDIS medium connection states. */ -#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000 -#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001 - -/* Remote NDIS version numbers */ -#define RNDIS_MAJOR_VERSION 0x00000001 -#define RNDIS_MINOR_VERSION 0x00000000 - - /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d025c83cd12..8b919471472 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device) return 0; } + +#define RING_AVAIL_PERCENT_HIWATER 20 +#define RING_AVAIL_PERCENT_LOWATER 10 + +/* + * Get the percentage of available bytes to write in the ring. + * The return value is in range from 0 to 100. + */ +static inline u32 hv_ringbuf_avail_percent( + struct hv_ring_buffer_info *ring_info) +{ + u32 avail_read, avail_write; + + hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); + + return avail_write * 100 / ring_info->ring_datasize; +} + static void netvsc_send_completion(struct hv_device *device, struct vmpacket_descriptor *packet) { @@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device, complete(&net_device->channel_init_wait); } else if (nvsp_packet->hdr.msg_type == NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { + int num_outstanding_sends; + /* Get the send context */ nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) packet->trans_id; @@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device, nvsc_packet->completion.send.send_completion( nvsc_packet->completion.send.send_completion_ctx); - atomic_dec(&net_device->num_outstanding_sends); + num_outstanding_sends = + atomic_dec_return(&net_device->num_outstanding_sends); - if (netif_queue_stopped(ndev) && !net_device->start_remove) - netif_wake_queue(ndev); + if (netif_queue_stopped(ndev) && !net_device->start_remove && + (hv_ringbuf_avail_percent(&device->channel->outbound) + > RING_AVAIL_PERCENT_HIWATER || + num_outstanding_sends < 1)) + netif_wake_queue(ndev); } else { netdev_err(ndev, "Unknown send completion packet type- " "%d received!!\n", nvsp_packet->hdr.msg_type); @@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device, if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); + if (hv_ringbuf_avail_percent(&device->channel->outbound) < + RING_AVAIL_PERCENT_LOWATER) { + netif_stop_queue(ndev); + if (atomic_read(&net_device-> + num_outstanding_sends) < 1) + netif_wake_queue(ndev); + } } else if (ret == -EAGAIN) { netif_stop_queue(ndev); - if (atomic_read(&net_device->num_outstanding_sends) < 1) + if (atomic_read(&net_device->num_outstanding_sends) < 1) { netif_wake_queue(ndev); + ret = -ENOSPC; + } } else { netdev_err(ndev, "Unable to send packet %p ret %d\n", packet, ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 2d59138db7f..8f8ed332042 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -211,9 +211,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) net->stats.tx_packets++; } else { kfree(packet); + if (ret != -EAGAIN) { + dev_kfree_skb_any(skb); + net->stats.tx_dropped++; + } } - return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK; + return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; } /* diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index d6be64bcefd..981ebb11563 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -129,8 +129,8 @@ static void dump_rndis_message(struct hv_device *hv_dev, netdev = net_device->ndev; switch (rndis_msg->ndis_msg_type) { - case REMOTE_NDIS_PACKET_MSG: - netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, " + case RNDIS_MSG_PACKET: + netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, " "data offset %u data len %u, # oob %u, " "oob offset %u, oob len %u, pkt offset %u, " "pkt len %u\n", @@ -144,8 +144,8 @@ static void dump_rndis_message(struct hv_device *hv_dev, rndis_msg->msg.pkt.per_pkt_info_len); break; - case REMOTE_NDIS_INITIALIZE_CMPLT: - netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT " + case RNDIS_MSG_INIT_C: + netdev_dbg(netdev, "RNDIS_MSG_INIT_C " "(len %u, id 0x%x, status 0x%x, major %d, minor %d, " "device flags %d, max xfer size 0x%x, max pkts %u, " "pkt aligned %u)\n", @@ -162,8 +162,8 @@ static void dump_rndis_message(struct hv_device *hv_dev, pkt_alignment_factor); break; - case REMOTE_NDIS_QUERY_CMPLT: - netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT " + case RNDIS_MSG_QUERY_C: + netdev_dbg(netdev, "RNDIS_MSG_QUERY_C " "(len %u, id 0x%x, status 0x%x, buf len %u, " "buf offset %u)\n", rndis_msg->msg_len, @@ -175,16 +175,16 @@ static void dump_rndis_message(struct hv_device *hv_dev, info_buf_offset); break; - case REMOTE_NDIS_SET_CMPLT: + case RNDIS_MSG_SET_C: netdev_dbg(netdev, - "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n", + "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n", rndis_msg->msg_len, rndis_msg->msg.set_complete.req_id, rndis_msg->msg.set_complete.status); break; - case REMOTE_NDIS_INDICATE_STATUS_MSG: - netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG " + case RNDIS_MSG_INDICATE: + netdev_dbg(netdev, "RNDIS_MSG_INDICATE " "(len %u, status 0x%x, buf len %u, buf offset %u)\n", rndis_msg->msg_len, rndis_msg->msg.indicate_status.status, @@ -264,14 +264,14 @@ static void rndis_filter_receive_response(struct rndis_device *dev, sizeof(struct rndis_filter_packet)); if (resp->ndis_msg_type == - REMOTE_NDIS_RESET_CMPLT) { + RNDIS_MSG_RESET_C) { /* does not have a request id field */ request->response_msg.msg.reset_complete. - status = STATUS_BUFFER_OVERFLOW; + status = RNDIS_STATUS_BUFFER_OVERFLOW; } else { request->response_msg.msg. init_complete.status = - STATUS_BUFFER_OVERFLOW; + RNDIS_STATUS_BUFFER_OVERFLOW; } } @@ -415,19 +415,19 @@ int rndis_filter_receive(struct hv_device *dev, dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { - case REMOTE_NDIS_PACKET_MSG: + case RNDIS_MSG_PACKET: /* data msg */ rndis_filter_receive_data(rndis_dev, rndis_msg, pkt); break; - case REMOTE_NDIS_INITIALIZE_CMPLT: - case REMOTE_NDIS_QUERY_CMPLT: - case REMOTE_NDIS_SET_CMPLT: + case RNDIS_MSG_INIT_C: + case RNDIS_MSG_QUERY_C: + case RNDIS_MSG_SET_C: /* completion msgs */ rndis_filter_receive_response(rndis_dev, rndis_msg); break; - case REMOTE_NDIS_INDICATE_STATUS_MSG: + case RNDIS_MSG_INDICATE: /* notification msgs */ rndis_filter_receive_indicate_status(rndis_dev, rndis_msg); break; @@ -456,7 +456,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, return -EINVAL; *result_size = 0; - request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG, + request = get_rndis_request(dev, RNDIS_MSG_QUERY, RNDIS_MESSAGE_SIZE(struct rndis_query_request)); if (!request) { ret = -ENOMEM; @@ -536,7 +536,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) ndev = dev->net_dev->ndev; - request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG, + request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32)); if (!request) { @@ -588,7 +588,7 @@ static int rndis_filter_init_device(struct rndis_device *dev) u32 status; int ret, t; - request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG, + request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); if (!request) { ret = -ENOMEM; @@ -641,7 +641,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) struct rndis_halt_request *halt; /* Attempt to do a rndis device halt */ - request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG, + request = get_rndis_request(dev, RNDIS_MSG_HALT, RNDIS_MESSAGE_SIZE(struct rndis_halt_request)); if (!request) goto cleanup; @@ -805,7 +805,7 @@ int rndis_filter_send(struct hv_device *dev, if (isvlan) rndis_msg_size += NDIS_VLAN_PPI_SIZE; - rndis_msg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG; + rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = pkt->total_data_buflen + rndis_msg_size; diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 468047866c8..35758445297 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -321,8 +321,8 @@ config AU1000_FIR Say M to build a module; it will be called au1k_ir.ko config SMC_IRCC_FIR - tristate "SMSC IrCC (EXPERIMENTAL)" - depends on EXPERIMENTAL && IRDA && ISA_DMA_API + tristate "SMSC IrCC" + depends on IRDA && ISA_DMA_API help Say Y here if you want to build support for the SMC Infrared Communications Controller. It is used in a wide variety of diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 4351296dde3..510b9c8d23a 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1710,7 +1710,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap) /* Flush all packets */ while ((i--) && (self->txpending)) - udelay (10000); + msleep(10); spin_lock_irqsave(&self->spinlock, flags); diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 725d6b36782..eb315b8d07a 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -737,7 +737,7 @@ static int sh_irda_stop(struct net_device *ndev) netif_stop_queue(ndev); pm_runtime_put_sync(&self->pdev->dev); - dev_info(&ndev->dev, "stoped\n"); + dev_info(&ndev->dev, "stopped\n"); return 0; } diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index e6661b5c1f8..256eddf1f75 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -685,7 +685,7 @@ static int sh_sir_stop(struct net_device *ndev) netif_stop_queue(ndev); - dev_info(&ndev->dev, "stoped\n"); + dev_info(&ndev->dev, "stopped\n"); return 0; } diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 6c95d4087b2..a926813ee91 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -1,7 +1,6 @@ /********************************************************************* * * Description: Driver for the SMC Infrared Communications Controller - * Status: Experimental. * Author: Daniele Peri (peri@csai.unipa.it) * Created at: * Modified at: diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 025367a94ad..66a9bfe7b1c 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -57,7 +57,7 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, struct hlist_node *n; hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { - if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr)) + if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; @@ -96,7 +96,7 @@ static int macvlan_addr_busy(const struct macvlan_port *port, * currently in use by the underlying device or * another macvlan. */ - if (!compare_ether_addr_64bits(port->dev->dev_addr, addr)) + if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) return 1; if (macvlan_hash_lookup(port, addr)) @@ -118,8 +118,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, return vlan->forward(dev, skb); skb->dev = dev; - if (!compare_ether_addr_64bits(eth->h_dest, - dev->broadcast)) + if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; @@ -312,7 +311,8 @@ static int macvlan_open(struct net_device *dev) int err; if (vlan->port->passthru) { - dev_set_promiscuity(lowerdev, 1); + if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(lowerdev, 1); goto hash_add; } @@ -344,12 +344,15 @@ static int macvlan_stop(struct net_device *dev) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; + dev_uc_unsync(lowerdev, dev); + dev_mc_unsync(lowerdev, dev); + if (vlan->port->passthru) { - dev_set_promiscuity(lowerdev, -1); + if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(lowerdev, -1); goto hash_del; } - dev_mc_unsync(lowerdev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); @@ -399,10 +402,11 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); } -static void macvlan_set_multicast_list(struct net_device *dev) +static void macvlan_set_mac_lists(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); + dev_uc_sync(vlan->lowerdev, dev); dev_mc_sync(vlan->lowerdev, dev); } @@ -542,6 +546,43 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev, return 0; } +static int macvlan_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + int err = -EINVAL; + + if (!vlan->port->passthru) + return -EOPNOTSUPP; + + if (is_unicast_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + return err; +} + +static int macvlan_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + int err = -EINVAL; + + if (!vlan->port->passthru) + return -EOPNOTSUPP; + + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { @@ -572,11 +613,14 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_change_mtu = macvlan_change_mtu, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, - .ndo_set_rx_mode = macvlan_set_multicast_list, + .ndo_set_rx_mode = macvlan_set_mac_lists, .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, + .ndo_fdb_add = macvlan_fdb_add, + .ndo_fdb_del = macvlan_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, }; void macvlan_common_setup(struct net_device *dev) @@ -711,6 +755,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); + if (data && data[IFLA_MACVLAN_FLAGS]) + vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); + if (vlan->mode == MACVLAN_MODE_PASSTHRU) { if (port->count) return -EINVAL; @@ -760,6 +807,16 @@ static int macvlan_changelink(struct net_device *dev, struct macvlan_dev *vlan = netdev_priv(dev); if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); + if (data && data[IFLA_MACVLAN_FLAGS]) { + __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); + bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; + + if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(vlan->lowerdev, -1); + else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) + dev_set_promiscuity(vlan->lowerdev, 1); + vlan->flags = flags; + } return 0; } @@ -773,7 +830,10 @@ static int macvlan_fill_info(struct sk_buff *skb, { struct macvlan_dev *vlan = netdev_priv(dev); - NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); + if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) + goto nla_put_failure; return 0; nla_put_failure: @@ -781,7 +841,8 @@ nla_put_failure: } static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { - [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, + [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, + [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index cb8fd5069db..2ee56de7b0c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -506,10 +506,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, if (copy > size) { ++from; --count; - } + offset = 0; + } else + offset += size; copy -= size; offset1 += size; - offset = 0; } if (len == offset1) @@ -519,24 +520,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, struct page *page[MAX_SKB_FRAGS]; int num_pages; unsigned long base; + unsigned long truesize; - len = from->iov_len - offset1; + len = from->iov_len - offset; if (!len) { - offset1 = 0; + offset = 0; ++from; continue; } - base = (unsigned long)from->iov_base + offset1; + base = (unsigned long)from->iov_base + offset; size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + if (i + size > MAX_SKB_FRAGS) + return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); - if ((num_pages != size) || - (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) - /* put_page is in skb free */ + if (num_pages != size) { + for (i = 0; i < num_pages; i++) + put_page(page[i]); return -EFAULT; + } + truesize = size * PAGE_SIZE; skb->data_len += len; skb->len += len; - skb->truesize += len; - atomic_add(len, &skb->sk->sk_wmem_alloc); + skb->truesize += truesize; + atomic_add(truesize, &skb->sk->sk_wmem_alloc); while (len) { int off = base & ~PAGE_MASK; int size = min_t(int, len, PAGE_SIZE - off); @@ -547,7 +553,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, len -= size; i++; } - offset1 = 0; + offset = 0; ++from; } return 0; @@ -647,7 +653,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, int err; struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; - int copylen; + int copylen = 0; bool zerocopy = false; if (q->flags & IFF_VNET_HDR) { @@ -676,15 +682,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (unlikely(len < ETH_HLEN)) goto err; + err = -EMSGSIZE; + if (unlikely(count > UIO_MAXIOV)) + goto err; + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) zerocopy = true; if (zerocopy) { + /* Userspace may produce vectors with count greater than + * MAX_SKB_FRAGS, so we need to linearize parts of the skb + * to let the rest of data to be fit in the frags. + */ + if (count > MAX_SKB_FRAGS) { + copylen = iov_length(iv, count - MAX_SKB_FRAGS); + if (copylen < vnet_hdr_len) + copylen = 0; + else + copylen -= vnet_hdr_len; + } /* There are 256 bytes to be copied in skb, so there is enough * room for skb expand head in case it is used. * The rest buffer is mapped from userspace. */ - copylen = vnet_hdr.hdr_len; + if (copylen < vnet_hdr.hdr_len) + copylen = vnet_hdr.hdr_len; if (!copylen) copylen = GOODCOPY_LEN; } else @@ -695,10 +717,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!skb) goto err; - if (zerocopy) { + if (zerocopy) err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; - } else + else err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len); if (err) @@ -717,8 +738,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, rcu_read_lock_bh(); vlan = rcu_dereference_bh(q->vlan); /* copy skb_ubuf_info for callback when skb has no error */ - if (zerocopy) + if (zerocopy) { skb_shinfo(skb)->destructor_arg = m->msg_control; + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + } if (vlan) macvlan_start_xmit(skb, vlan->dev); else diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 0e01f4e5cd6..944cdfb80fe 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -135,6 +135,25 @@ config MDIO_OCTEON If in doubt, say Y. +config MDIO_BUS_MUX + tristate + depends on OF_MDIO + help + This module provides a driver framework for MDIO bus + multiplexers which connect one of several child MDIO busses + to a parent bus. Switching between child busses is done by + device specific drivers. + +config MDIO_BUS_MUX_GPIO + tristate "Support for GPIO controlled MDIO bus multiplexers" + depends on OF_GPIO && OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via GPIO lines. The multiplexer connects one of + several child MDIO busses to a parent bus. Child bus + selection is under the control of GPIO lines. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index b7438b1b94b..f51af688ef8 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_AMD_PHY) += amd.o +obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index e16f98cb4f0..cd802eb25fd 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -39,10 +39,7 @@ static int bcm63xx_config_init(struct phy_device *phydev) MII_BCM63XX_IR_SPEED | MII_BCM63XX_IR_LINK) | MII_BCM63XX_IR_EN; - err = phy_write(phydev, MII_BCM63XX_IR, reg); - if (err < 0) - return err; - return 0; + return phy_write(phydev, MII_BCM63XX_IR, reg); } static int bcm63xx_ack_interrupt(struct phy_device *phydev) diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 2f774acdb55..5f59cc06477 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -134,12 +134,7 @@ static int dm9161_config_init(struct phy_device *phydev) return err; /* Reconnect the PHY, and enable Autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE); - - if (err < 0) - return err; - - return 0; + return phy_write(phydev, MII_BMCR, BMCR_ANENABLE); } static int dm9161_ack_interrupt(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index dd7ae19579d..940b29022d0 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1215,6 +1215,36 @@ static void dp83640_txtstamp(struct phy_device *phydev, } } +static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info) +{ + struct dp83640_private *dp83640 = dev->priv; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock); + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + return 0; +} + static struct phy_driver dp83640_driver = { .phy_id = DP83640_PHY_ID, .phy_id_mask = 0xfffffff0, @@ -1225,6 +1255,7 @@ static struct phy_driver dp83640_driver = { .remove = dp83640_remove, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ts_info = dp83640_ts_info, .hwtstamp = dp83640_hwtstamp, .rxtstamp = dp83640_rxtstamp, .txtstamp = dp83640_txtstamp, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e8b9c53c304..418928d644b 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -455,11 +455,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - if (err < 0) - return err; - - return 0; + return phy_write(phydev, MII_BMCR, BMCR_RESET); } static int m88e1118_config_aneg(struct phy_device *phydev) @@ -515,11 +511,7 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - if (err < 0) - return err; - - return 0; + return phy_write(phydev, MII_BMCR, BMCR_RESET); } static int m88e1149_config_init(struct phy_device *phydev) @@ -545,11 +537,7 @@ static int m88e1149_config_init(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - if (err < 0) - return err; - - return 0; + return phy_write(phydev, MII_BMCR, BMCR_RESET); } static int m88e1145_config_init(struct phy_device *phydev) diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c new file mode 100644 index 00000000000..e0cc4ef33de --- /dev/null +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -0,0 +1,142 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011, 2012 Cavium, Inc. + */ + +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/phy.h> +#include <linux/mdio-mux.h> +#include <linux/of_gpio.h> + +#define DRV_VERSION "1.0" +#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" + +#define MDIO_MUX_GPIO_MAX_BITS 8 + +struct mdio_mux_gpio_state { + int gpio[MDIO_MUX_GPIO_MAX_BITS]; + unsigned int num_gpios; + void *mux_handle; +}; + +static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, + void *data) +{ + int change; + unsigned int n; + struct mdio_mux_gpio_state *s = data; + + if (current_child == desired_child) + return 0; + + change = current_child == -1 ? -1 : current_child ^ desired_child; + + for (n = 0; n < s->num_gpios; n++) { + if (change & 1) + gpio_set_value_cansleep(s->gpio[n], + (desired_child & 1) != 0); + change >>= 1; + desired_child >>= 1; + } + + return 0; +} + +static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev) +{ + enum of_gpio_flags f; + struct mdio_mux_gpio_state *s; + unsigned int num_gpios; + unsigned int n; + int r; + + if (!pdev->dev.of_node) + return -ENODEV; + + num_gpios = of_gpio_count(pdev->dev.of_node); + if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) + return -ENODEV; + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->num_gpios = num_gpios; + + for (n = 0; n < num_gpios; ) { + int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f); + if (gpio < 0) { + r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio; + goto err; + } + s->gpio[n] = gpio; + + n++; + + r = gpio_request(gpio, "mdio_mux_gpio"); + if (r) + goto err; + + r = gpio_direction_output(gpio, 0); + if (r) + goto err; + } + + r = mdio_mux_init(&pdev->dev, + mdio_mux_gpio_switch_fn, &s->mux_handle, s); + + if (r == 0) { + pdev->dev.platform_data = s; + return 0; + } +err: + while (n) { + n--; + gpio_free(s->gpio[n]); + } + devm_kfree(&pdev->dev, s); + return r; +} + +static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev) +{ + struct mdio_mux_gpio_state *s = pdev->dev.platform_data; + mdio_mux_uninit(s->mux_handle); + return 0; +} + +static struct of_device_id mdio_mux_gpio_match[] = { + { + .compatible = "mdio-mux-gpio", + }, + { + /* Legacy compatible property. */ + .compatible = "cavium,mdio-mux-sn74cbtlv3253", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match); + +static struct platform_driver mdio_mux_gpio_driver = { + .driver = { + .name = "mdio-mux-gpio", + .owner = THIS_MODULE, + .of_match_table = mdio_mux_gpio_match, + }, + .probe = mdio_mux_gpio_probe, + .remove = __devexit_p(mdio_mux_gpio_remove), +}; + +module_platform_driver(mdio_mux_gpio_driver); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c new file mode 100644 index 00000000000..39ea0674dcd --- /dev/null +++ b/drivers/net/phy/mdio-mux.c @@ -0,0 +1,192 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011, 2012 Cavium, Inc. + */ + +#include <linux/platform_device.h> +#include <linux/mdio-mux.h> +#include <linux/of_mdio.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/phy.h> + +#define DRV_VERSION "1.0" +#define DRV_DESCRIPTION "MDIO bus multiplexer driver" + +struct mdio_mux_child_bus; + +struct mdio_mux_parent_bus { + struct mii_bus *mii_bus; + int current_child; + int parent_id; + void *switch_data; + int (*switch_fn)(int current_child, int desired_child, void *data); + + /* List of our children linked through their next fields. */ + struct mdio_mux_child_bus *children; +}; + +struct mdio_mux_child_bus { + struct mii_bus *mii_bus; + struct mdio_mux_parent_bus *parent; + struct mdio_mux_child_bus *next; + int bus_number; + int phy_irq[PHY_MAX_ADDR]; +}; + +/* + * The parent bus' lock is used to order access to the switch_fn. + */ +static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct mdio_mux_child_bus *cb = bus->priv; + struct mdio_mux_parent_bus *pb = cb->parent; + int r; + + mutex_lock(&pb->mii_bus->mdio_lock); + r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); + if (r) + goto out; + + pb->current_child = cb->bus_number; + + r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum); +out: + mutex_unlock(&pb->mii_bus->mdio_lock); + + return r; +} + +/* + * The parent bus' lock is used to order access to the switch_fn. + */ +static int mdio_mux_write(struct mii_bus *bus, int phy_id, + int regnum, u16 val) +{ + struct mdio_mux_child_bus *cb = bus->priv; + struct mdio_mux_parent_bus *pb = cb->parent; + + int r; + + mutex_lock(&pb->mii_bus->mdio_lock); + r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); + if (r) + goto out; + + pb->current_child = cb->bus_number; + + r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val); +out: + mutex_unlock(&pb->mii_bus->mdio_lock); + + return r; +} + +static int parent_count; + +int mdio_mux_init(struct device *dev, + int (*switch_fn)(int cur, int desired, void *data), + void **mux_handle, + void *data) +{ + struct device_node *parent_bus_node; + struct device_node *child_bus_node; + int r, ret_val; + struct mii_bus *parent_bus; + struct mdio_mux_parent_bus *pb; + struct mdio_mux_child_bus *cb; + + if (!dev->of_node) + return -ENODEV; + + parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0); + + if (!parent_bus_node) + return -ENODEV; + + parent_bus = of_mdio_find_bus(parent_bus_node); + if (parent_bus == NULL) { + ret_val = -EPROBE_DEFER; + goto err_parent_bus; + } + + pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); + if (pb == NULL) { + ret_val = -ENOMEM; + goto err_parent_bus; + } + + pb->switch_data = data; + pb->switch_fn = switch_fn; + pb->current_child = -1; + pb->parent_id = parent_count++; + pb->mii_bus = parent_bus; + + ret_val = -ENODEV; + for_each_child_of_node(dev->of_node, child_bus_node) { + u32 v; + + r = of_property_read_u32(child_bus_node, "reg", &v); + if (r) + continue; + + cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); + if (cb == NULL) { + dev_err(dev, + "Error: Failed to allocate memory for child\n"); + ret_val = -ENOMEM; + break; + } + cb->bus_number = v; + cb->parent = pb; + cb->mii_bus = mdiobus_alloc(); + cb->mii_bus->priv = cb; + + cb->mii_bus->irq = cb->phy_irq; + cb->mii_bus->name = "mdio_mux"; + snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", + pb->parent_id, v); + cb->mii_bus->parent = dev; + cb->mii_bus->read = mdio_mux_read; + cb->mii_bus->write = mdio_mux_write; + r = of_mdiobus_register(cb->mii_bus, child_bus_node); + if (r) { + mdiobus_free(cb->mii_bus); + devm_kfree(dev, cb); + } else { + of_node_get(child_bus_node); + cb->next = pb->children; + pb->children = cb; + } + } + if (pb->children) { + *mux_handle = pb; + dev_info(dev, "Version " DRV_VERSION "\n"); + return 0; + } +err_parent_bus: + of_node_put(parent_bus_node); + return ret_val; +} +EXPORT_SYMBOL_GPL(mdio_mux_init); + +void mdio_mux_uninit(void *mux_handle) +{ + struct mdio_mux_parent_bus *pb = mux_handle; + struct mdio_mux_child_bus *cb = pb->children; + + while (cb) { + mdiobus_unregister(cb->mii_bus); + mdiobus_free(cb->mii_bus); + cb = cb->next; + } +} +EXPORT_SYMBOL_GPL(mdio_mux_uninit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 8985cc62cf4..683ef1ce551 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -88,6 +88,38 @@ static struct class mdio_bus_class = { .dev_release = mdiobus_release, }; +#if IS_ENABLED(CONFIG_OF_MDIO) +/* Helper function for of_mdio_find_bus */ +static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np) +{ + return dev->of_node == mdio_bus_np; +} +/** + * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. + * @mdio_np: Pointer to the mii_bus. + * + * Returns a pointer to the mii_bus, or NULL if none found. + * + * Because the association of a device_node and mii_bus is made via + * of_mdiobus_register(), the mii_bus cannot be found before it is + * registered with of_mdiobus_register(). + * + */ +struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) +{ + struct device *d; + + if (!mdio_bus_np) + return NULL; + + d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np, + of_mdio_bus_match); + + return d ? to_mii_bus(d) : NULL; +} +EXPORT_SYMBOL(of_mdio_find_bus); +#endif + /** * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e8c42d6a7d1..de86a558222 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -207,7 +207,7 @@ static struct phy_device* phy_device_create(struct mii_bus *bus, * Description: Reads the ID registers of the PHY at @addr on the * @bus, stores it in @phy_id and returns zero on success. */ -int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) +static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) { int phy_reg; @@ -230,7 +230,6 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) return 0; } -EXPORT_SYMBOL(get_phy_id); /** * get_phy_device - reads the specified PHY device and returns its @phy_device struct diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 116a2dd7c87..4eb98bc52a0 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -348,7 +348,6 @@ static int __devexit ks8995_remove(struct spi_device *spi) static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ks8995_probe, diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index af95a98fd86..a031f6b456b 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -613,7 +613,7 @@ ppp_async_encode(struct asyncppp *ap) *buf++ = PPP_FLAG; ap->olim = buf; - kfree_skb(ap->tpkt); + consume_skb(ap->tpkt); ap->tpkt = NULL; return 1; } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 21d7151fb0a..5c0557222f2 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1092,13 +1092,13 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) new_skb->data, skb->len + 2, compressor_skb_size); if (len > 0 && (ppp->flags & SC_CCP_UP)) { - kfree_skb(skb); + consume_skb(skb); skb = new_skb; skb_put(skb, len); skb_pull(skb, 2); /* pull off A/C bytes */ } else if (len == 0) { /* didn't compress, or CCP not up yet */ - kfree_skb(new_skb); + consume_skb(new_skb); new_skb = skb; } else { /* @@ -1112,7 +1112,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) if (net_ratelimit()) netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); kfree_skb(skb); - kfree_skb(new_skb); + consume_skb(new_skb); new_skb = NULL; } return new_skb; @@ -1178,7 +1178,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) !(ppp->flags & SC_NO_TCP_CCID)); if (cp == skb->data + 2) { /* didn't compress */ - kfree_skb(new_skb); + consume_skb(new_skb); } else { if (cp[0] & SL_TYPE_COMPRESSED_TCP) { proto = PPP_VJC_COMP; @@ -1187,7 +1187,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) proto = PPP_VJC_UNCOMP; cp[0] = skb->data[2]; } - kfree_skb(skb); + consume_skb(skb); skb = new_skb; cp = skb_put(skb, len + 2); cp[0] = 0; @@ -1703,7 +1703,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) } skb_reserve(ns, 2); skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); - kfree_skb(skb); + consume_skb(skb); skb = ns; } else @@ -1851,7 +1851,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) goto err; } - kfree_skb(skb); + consume_skb(skb); skb = ns; skb_put(skb, len); skb_pull(skb, 2); /* pull off the A/C bytes */ diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 55e466c511d..1a12033d2ef 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -588,7 +588,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) skb_reserve(npkt,2); skb_copy_from_linear_data(skb, skb_put(npkt, skb->len), skb->len); - kfree_skb(skb); + consume_skb(skb); skb = npkt; } skb_push(skb,2); @@ -656,7 +656,7 @@ ppp_sync_push(struct syncppp *ap) if (sent < ap->tpkt->len) { tty_stuffed = 1; } else { - kfree_skb(ap->tpkt); + consume_skb(ap->tpkt); ap->tpkt = NULL; clear_bit(XMIT_FULL, &ap->xmit_flags); done = 1; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2fa1a9b6f49..cbf7047decc 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -201,7 +201,7 @@ static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) return 0; } -static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, +static void __delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); @@ -220,8 +220,6 @@ static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, src = &ret->next; ret = ret->next; } - - return ret; } /********************************************************************** @@ -264,16 +262,12 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net, return pppox_sock; } -static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, +static inline void delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { - struct pppox_sock *ret; - write_lock_bh(&pn->hash_lock); - ret = __delete_item(pn, sid, addr, ifindex); + __delete_item(pn, sid, addr, ifindex); write_unlock_bh(&pn->hash_lock); - - return ret; } /*************************************************************************** @@ -990,8 +984,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); - if (error == 0) - error = total_len; + if (error == 0) { + consume_skb(skb); + return total_len; + } } kfree_skb(skb); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 885dbdd9c39..1c98321b56c 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -116,8 +116,8 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr) int i; rcu_read_lock(); - for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID; - i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) { + i = 1; + for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { sock = rcu_dereference(callid_sock[i]); if (!sock) continue; @@ -209,7 +209,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); - kfree_skb(skb); + consume_skb(skb); skb = new_skb; } diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig index 248a144033c..89024d5fc33 100644 --- a/drivers/net/team/Kconfig +++ b/drivers/net/team/Kconfig @@ -40,4 +40,15 @@ config NET_TEAM_MODE_ACTIVEBACKUP To compile this team mode as a module, choose M here: the module will be called team_mode_activebackup. +config NET_TEAM_MODE_LOADBALANCE + tristate "Load-balance mode support" + depends on NET_TEAM + ---help--- + This mode provides load balancing functionality. Tx port selection + is done using BPF function set up from userspace (bpf_hash_func + option) + + To compile this team mode as a module, choose M here: the module + will be called team_mode_loadbalance. + endif # NET_TEAM diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile index 85f2028a87a..fb9f4c1c51f 100644 --- a/drivers/net/team/Makefile +++ b/drivers/net/team/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_NET_TEAM) += team.o obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o +obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 8f81805c682..c61ae35a53c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -65,7 +65,7 @@ static int __set_port_mac(struct net_device *port_dev, return dev_set_mac_address(port_dev, &addr); } -int team_port_set_orig_mac(struct team_port *port) +static int team_port_set_orig_mac(struct team_port *port) { return __set_port_mac(port->dev, port->orig.dev_addr); } @@ -76,12 +76,26 @@ int team_port_set_team_mac(struct team_port *port) } EXPORT_SYMBOL(team_port_set_team_mac); +static void team_refresh_port_linkup(struct team_port *port) +{ + port->linkup = port->user.linkup_enabled ? port->user.linkup : + port->state.linkup; +} /******************* * Options handling *******************/ -struct team_option *__team_find_option(struct team *team, const char *opt_name) +struct team_option_inst { /* One for each option instance */ + struct list_head list; + struct team_option *option; + struct team_port *port; /* != NULL if per-port */ + bool changed; + bool removed; +}; + +static struct team_option *__team_find_option(struct team *team, + const char *opt_name) { struct team_option *option; @@ -92,9 +106,121 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name) return NULL; } -int __team_options_register(struct team *team, - const struct team_option *option, - size_t option_count) +static int __team_option_inst_add(struct team *team, struct team_option *option, + struct team_port *port) +{ + struct team_option_inst *opt_inst; + + opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); + if (!opt_inst) + return -ENOMEM; + opt_inst->option = option; + opt_inst->port = port; + opt_inst->changed = true; + opt_inst->removed = false; + list_add_tail(&opt_inst->list, &team->option_inst_list); + return 0; +} + +static void __team_option_inst_del(struct team_option_inst *opt_inst) +{ + list_del(&opt_inst->list); + kfree(opt_inst); +} + +static void __team_option_inst_del_option(struct team *team, + struct team_option *option) +{ + struct team_option_inst *opt_inst, *tmp; + + list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { + if (opt_inst->option == option) + __team_option_inst_del(opt_inst); + } +} + +static int __team_option_inst_add_option(struct team *team, + struct team_option *option) +{ + struct team_port *port; + int err; + + if (!option->per_port) + return __team_option_inst_add(team, option, 0); + + list_for_each_entry(port, &team->port_list, list) { + err = __team_option_inst_add(team, option, port); + if (err) + goto inst_del_option; + } + return 0; + +inst_del_option: + __team_option_inst_del_option(team, option); + return err; +} + +static void __team_option_inst_mark_removed_option(struct team *team, + struct team_option *option) +{ + struct team_option_inst *opt_inst; + + list_for_each_entry(opt_inst, &team->option_inst_list, list) { + if (opt_inst->option == option) { + opt_inst->changed = true; + opt_inst->removed = true; + } + } +} + +static void __team_option_inst_del_port(struct team *team, + struct team_port *port) +{ + struct team_option_inst *opt_inst, *tmp; + + list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { + if (opt_inst->option->per_port && + opt_inst->port == port) + __team_option_inst_del(opt_inst); + } +} + +static int __team_option_inst_add_port(struct team *team, + struct team_port *port) +{ + struct team_option *option; + int err; + + list_for_each_entry(option, &team->option_list, list) { + if (!option->per_port) + continue; + err = __team_option_inst_add(team, option, port); + if (err) + goto inst_del_port; + } + return 0; + +inst_del_port: + __team_option_inst_del_port(team, port); + return err; +} + +static void __team_option_inst_mark_removed_port(struct team *team, + struct team_port *port) +{ + struct team_option_inst *opt_inst; + + list_for_each_entry(opt_inst, &team->option_inst_list, list) { + if (opt_inst->port == port) { + opt_inst->changed = true; + opt_inst->removed = true; + } + } +} + +static int __team_options_register(struct team *team, + const struct team_option *option, + size_t option_count) { int i; struct team_option **dst_opts; @@ -107,26 +233,32 @@ int __team_options_register(struct team *team, for (i = 0; i < option_count; i++, option++) { if (__team_find_option(team, option->name)) { err = -EEXIST; - goto rollback; + goto alloc_rollback; } dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); if (!dst_opts[i]) { err = -ENOMEM; - goto rollback; + goto alloc_rollback; } } for (i = 0; i < option_count; i++) { - dst_opts[i]->changed = true; - dst_opts[i]->removed = false; + err = __team_option_inst_add_option(team, dst_opts[i]); + if (err) + goto inst_rollback; list_add_tail(&dst_opts[i]->list, &team->option_list); } kfree(dst_opts); return 0; -rollback: - for (i = 0; i < option_count; i++) +inst_rollback: + for (i--; i >= 0; i--) + __team_option_inst_del_option(team, dst_opts[i]); + + i = option_count - 1; +alloc_rollback: + for (i--; i >= 0; i--) kfree(dst_opts[i]); kfree(dst_opts); @@ -143,10 +275,8 @@ static void __team_options_mark_removed(struct team *team, struct team_option *del_opt; del_opt = __team_find_option(team, option->name); - if (del_opt) { - del_opt->changed = true; - del_opt->removed = true; - } + if (del_opt) + __team_option_inst_mark_removed_option(team, del_opt); } } @@ -161,6 +291,7 @@ static void __team_options_unregister(struct team *team, del_opt = __team_find_option(team, option->name); if (del_opt) { + __team_option_inst_del_option(team, del_opt); list_del(&del_opt->list); kfree(del_opt); } @@ -193,22 +324,42 @@ void team_options_unregister(struct team *team, } EXPORT_SYMBOL(team_options_unregister); -static int team_option_get(struct team *team, struct team_option *option, - void *arg) +static int team_option_port_add(struct team *team, struct team_port *port) +{ + int err; + + err = __team_option_inst_add_port(team, port); + if (err) + return err; + __team_options_change_check(team); + return 0; +} + +static void team_option_port_del(struct team *team, struct team_port *port) +{ + __team_option_inst_mark_removed_port(team, port); + __team_options_change_check(team); + __team_option_inst_del_port(team, port); +} + +static int team_option_get(struct team *team, + struct team_option_inst *opt_inst, + struct team_gsetter_ctx *ctx) { - return option->getter(team, arg); + return opt_inst->option->getter(team, ctx); } -static int team_option_set(struct team *team, struct team_option *option, - void *arg) +static int team_option_set(struct team *team, + struct team_option_inst *opt_inst, + struct team_gsetter_ctx *ctx) { int err; - err = option->setter(team, arg); + err = opt_inst->option->setter(team, ctx); if (err) return err; - option->changed = true; + opt_inst->changed = true; __team_options_change_check(team); return err; } @@ -408,6 +559,8 @@ static int team_change_mode(struct team *team, const char *kind) * Rx path frame handler ************************/ +static bool team_port_enabled(struct team_port *port); + /* note: already called with rcu_read_lock */ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) { @@ -424,8 +577,12 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) port = team_port_get_rcu(skb->dev); team = port->team; - - res = team->ops.receive(team, port, skb); + if (!team_port_enabled(port)) { + /* allow exact match delivery for disabled ports */ + res = RX_HANDLER_EXACT; + } else { + res = team->ops.receive(team, port, skb); + } if (res == RX_HANDLER_ANOTHER) { struct team_pcpu_stats *pcpu_stats; @@ -461,17 +618,25 @@ static bool team_port_find(const struct team *team, return false; } +static bool team_port_enabled(struct team_port *port) +{ + return port->index != -1; +} + /* - * Add/delete port to the team port list. Write guarded by rtnl_lock. - * Takes care of correct port->index setup (might be racy). + * Enable/disable port by adding to enabled port hashlist and setting + * port->index (Might be racy so reader could see incorrect ifindex when + * processing a flying packet, but that is not a problem). Write guarded + * by team->lock. */ -static void team_port_list_add_port(struct team *team, - struct team_port *port) +static void team_port_enable(struct team *team, + struct team_port *port) { - port->index = team->port_count++; + if (team_port_enabled(port)) + return; + port->index = team->en_port_count++; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); - list_add_tail_rcu(&port->list, &team->port_list); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -479,7 +644,7 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index) int i; struct team_port *port; - for (i = rm_index + 1; i < team->port_count; i++) { + for (i = rm_index + 1; i < team->en_port_count; i++) { port = team_get_port_by_index(team, i); hlist_del_rcu(&port->hlist); port->index--; @@ -488,15 +653,17 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index) } } -static void team_port_list_del_port(struct team *team, - struct team_port *port) +static void team_port_disable(struct team *team, + struct team_port *port) { int rm_index = port->index; + if (!team_port_enabled(port)) + return; hlist_del_rcu(&port->hlist); - list_del_rcu(&port->list); __reconstruct_port_hlist(team, rm_index); - team->port_count--; + team->en_port_count--; + port->index = -1; } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ @@ -642,7 +809,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } - team_port_list_add_port(team, port); + err = team_option_port_add(team, port); + if (err) { + netdev_err(dev, "Device %s failed to add per-port options\n", + portname); + goto err_option_port_add; + } + + port->index = -1; + team_port_enable(team, port); + list_add_tail_rcu(&port->list, &team->port_list); team_adjust_ops(team); __team_compute_features(team); __team_port_change_check(port, !!netif_carrier_ok(port_dev)); @@ -651,6 +827,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return 0; +err_option_port_add: + netdev_rx_handler_unregister(port_dev); + err_handler_register: netdev_set_master(port_dev, NULL); @@ -688,8 +867,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) port->removed = true; __team_port_change_check(port, false); - team_port_list_del_port(team, port); + team_port_disable(team, port); + list_del_rcu(&port->list); team_adjust_ops(team); + team_option_port_del(team, port); netdev_rx_handler_unregister(port_dev); netdev_set_master(port_dev, NULL); vlan_vids_del_by_dev(port_dev, dev); @@ -712,19 +893,66 @@ static int team_port_del(struct team *team, struct net_device *port_dev) static const char team_no_mode_kind[] = "*NOMODE*"; -static int team_mode_option_get(struct team *team, void *arg) +static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) +{ + ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind; + return 0; +} + +static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) +{ + return team_change_mode(team, ctx->data.str_val); +} + +static int team_port_en_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.bool_val = team_port_enabled(ctx->port); + return 0; +} + +static int team_port_en_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + if (ctx->data.bool_val) + team_port_enable(team, ctx->port); + else + team_port_disable(team, ctx->port); + return 0; +} + +static int team_user_linkup_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.bool_val = ctx->port->user.linkup; + return 0; +} + +static int team_user_linkup_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->port->user.linkup = ctx->data.bool_val; + team_refresh_port_linkup(ctx->port); + return 0; +} + +static int team_user_linkup_en_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { - const char **str = arg; + struct team_port *port = ctx->port; - *str = team->mode ? team->mode->kind : team_no_mode_kind; + ctx->data.bool_val = port->user.linkup_enabled; return 0; } -static int team_mode_option_set(struct team *team, void *arg) +static int team_user_linkup_en_option_set(struct team *team, + struct team_gsetter_ctx *ctx) { - const char **str = arg; + struct team_port *port = ctx->port; - return team_change_mode(team, *str); + port->user.linkup_enabled = ctx->data.bool_val; + team_refresh_port_linkup(ctx->port); + return 0; } static const struct team_option team_options[] = { @@ -734,6 +962,27 @@ static const struct team_option team_options[] = { .getter = team_mode_option_get, .setter = team_mode_option_set, }, + { + .name = "enabled", + .type = TEAM_OPTION_TYPE_BOOL, + .per_port = true, + .getter = team_port_en_option_get, + .setter = team_port_en_option_set, + }, + { + .name = "user_linkup", + .type = TEAM_OPTION_TYPE_BOOL, + .per_port = true, + .getter = team_user_linkup_option_get, + .setter = team_user_linkup_option_set, + }, + { + .name = "user_linkup_enabled", + .type = TEAM_OPTION_TYPE_BOOL, + .per_port = true, + .getter = team_user_linkup_en_option_get, + .setter = team_user_linkup_en_option_set, + }, }; static int team_init(struct net_device *dev) @@ -750,12 +999,13 @@ static int team_init(struct net_device *dev) return -ENOMEM; for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) - INIT_HLIST_HEAD(&team->port_hlist[i]); + INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); team_adjust_ops(team); INIT_LIST_HEAD(&team->option_list); + INIT_LIST_HEAD(&team->option_inst_list); err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) goto err_options_register; @@ -1145,10 +1395,7 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { }, [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, - [TEAM_ATTR_OPTION_DATA] = { - .type = NLA_BINARY, - .len = TEAM_STRING_MAX_LEN, - }, + [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, }; static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) @@ -1241,46 +1488,86 @@ static int team_nl_fill_options_get(struct sk_buff *skb, { struct nlattr *option_list; void *hdr; - struct team_option *option; + struct team_option_inst *opt_inst; + int err; hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, TEAM_CMD_OPTIONS_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); + if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) + goto nla_put_failure; option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); if (!option_list) return -EMSGSIZE; - list_for_each_entry(option, &team->option_list, list) { + list_for_each_entry(opt_inst, &team->option_inst_list, list) { struct nlattr *option_item; - long arg; + struct team_option *option = opt_inst->option; + struct team_gsetter_ctx ctx; /* Include only changed options if fill all mode is not on */ - if (!fillall && !option->changed) + if (!fillall && !opt_inst->changed) continue; option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); if (!option_item) goto nla_put_failure; - NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); - if (option->changed) { - NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); - option->changed = false; + if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) + goto nla_put_failure; + if (opt_inst->changed) { + if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) + goto nla_put_failure; + opt_inst->changed = false; } - if (option->removed) - NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); + if (opt_inst->removed && + nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) + goto nla_put_failure; + if (opt_inst->port && + nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, + opt_inst->port->dev->ifindex)) + goto nla_put_failure; + ctx.port = opt_inst->port; switch (option->type) { case TEAM_OPTION_TYPE_U32: - NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); - team_option_get(team, option, &arg); - NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.u32_val)) + goto nla_put_failure; break; case TEAM_OPTION_TYPE_STRING: - NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); - team_option_get(team, option, &arg); - NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, - (char *) arg); + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.str_val)) + goto nla_put_failure; + break; + case TEAM_OPTION_TYPE_BINARY: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.bin_val.len, ctx.data.bin_val.ptr)) + goto nla_put_failure; + break; + case TEAM_OPTION_TYPE_BOOL: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (ctx.data.bool_val && + nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) + goto nla_put_failure; break; default: BUG(); @@ -1292,8 +1579,10 @@ static int team_nl_fill_options_get(struct sk_buff *skb, return genlmsg_end(skb, hdr); nla_put_failure: + err = -EMSGSIZE; +errout: genlmsg_cancel(skb, hdr); - return -EMSGSIZE; + return err; } static int team_nl_fill_options_get_all(struct sk_buff *skb, @@ -1339,9 +1628,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) } nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { - struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; + struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; + struct nlattr *attr_port_ifindex; + struct nlattr *attr_data; enum team_option_type opt_type; - struct team_option *option; + int opt_port_ifindex = 0; /* != 0 for per-port options */ + struct team_option_inst *opt_inst; char *opt_name; bool opt_found = false; @@ -1349,48 +1641,78 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) err = -EINVAL; goto team_put; } - err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, + err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, nl_option, team_nl_option_policy); if (err) goto team_put; - if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || - !mode_attrs[TEAM_ATTR_OPTION_TYPE] || - !mode_attrs[TEAM_ATTR_OPTION_DATA]) { + if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || + !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { err = -EINVAL; goto team_put; } - switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { + switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { case NLA_U32: opt_type = TEAM_OPTION_TYPE_U32; break; case NLA_STRING: opt_type = TEAM_OPTION_TYPE_STRING; break; + case NLA_BINARY: + opt_type = TEAM_OPTION_TYPE_BINARY; + break; + case NLA_FLAG: + opt_type = TEAM_OPTION_TYPE_BOOL; + break; default: goto team_put; } - opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); - list_for_each_entry(option, &team->option_list, list) { - long arg; - struct nlattr *opt_data_attr; + attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; + if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { + err = -EINVAL; + goto team_put; + } + + opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); + attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; + if (attr_port_ifindex) + opt_port_ifindex = nla_get_u32(attr_port_ifindex); + + list_for_each_entry(opt_inst, &team->option_inst_list, list) { + struct team_option *option = opt_inst->option; + struct team_gsetter_ctx ctx; + int tmp_ifindex; + tmp_ifindex = opt_inst->port ? + opt_inst->port->dev->ifindex : 0; if (option->type != opt_type || - strcmp(option->name, opt_name)) + strcmp(option->name, opt_name) || + tmp_ifindex != opt_port_ifindex) continue; opt_found = true; - opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; + ctx.port = opt_inst->port; switch (opt_type) { case TEAM_OPTION_TYPE_U32: - arg = nla_get_u32(opt_data_attr); + ctx.data.u32_val = nla_get_u32(attr_data); break; case TEAM_OPTION_TYPE_STRING: - arg = (long) nla_data(opt_data_attr); + if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { + err = -EINVAL; + goto team_put; + } + ctx.data.str_val = nla_data(attr_data); + break; + case TEAM_OPTION_TYPE_BINARY: + ctx.data.bin_val.len = nla_len(attr_data); + ctx.data.bin_val.ptr = nla_data(attr_data); + break; + case TEAM_OPTION_TYPE_BOOL: + ctx.data.bool_val = attr_data ? true : false; break; default: BUG(); } - err = team_option_set(team, option, &arg); + err = team_option_set(team, opt_inst, &ctx); if (err) goto team_put; } @@ -1420,7 +1742,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); + if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) + goto nla_put_failure; port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); if (!port_list) return -EMSGSIZE; @@ -1434,17 +1757,20 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); if (!port_item) goto nla_put_failure; - NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); + if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) + goto nla_put_failure; if (port->changed) { - NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); + if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) + goto nla_put_failure; port->changed = false; } - if (port->removed) - NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); - if (port->linkup) - NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); - NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); - NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); + if ((port->removed && + nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || + (port->state.linkup && + nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || + nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || + nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) + goto nla_put_failure; nla_nest_end(skb, port_item); } @@ -1603,23 +1929,24 @@ static void __team_port_change_check(struct team_port *port, bool linkup) { int err; - if (!port->removed && port->linkup == linkup) + if (!port->removed && port->state.linkup == linkup) return; port->changed = true; - port->linkup = linkup; + port->state.linkup = linkup; + team_refresh_port_linkup(port); if (linkup) { struct ethtool_cmd ecmd; err = __ethtool_get_settings(port->dev, &ecmd); if (!err) { - port->speed = ethtool_cmd_speed(&ecmd); - port->duplex = ecmd.duplex; + port->state.speed = ethtool_cmd_speed(&ecmd); + port->state.duplex = ecmd.duplex; goto send_event; } } - port->speed = 0; - port->duplex = 0; + port->state.speed = 0; + port->state.duplex = 0; send_event: err = team_nl_send_event_port_list_get(port->team); diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c index f4d960e82e2..fd6bd03aaa8 100644 --- a/drivers/net/team/team_mode_activebackup.c +++ b/drivers/net/team/team_mode_activebackup.c @@ -59,23 +59,21 @@ static void ab_port_leave(struct team *team, struct team_port *port) RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); } -static int ab_active_port_get(struct team *team, void *arg) +static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) { - u32 *ifindex = arg; - - *ifindex = 0; if (ab_priv(team)->active_port) - *ifindex = ab_priv(team)->active_port->dev->ifindex; + ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex; + else + ctx->data.u32_val = 0; return 0; } -static int ab_active_port_set(struct team *team, void *arg) +static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx) { - u32 *ifindex = arg; struct team_port *port; - list_for_each_entry_rcu(port, &team->port_list, list) { - if (port->dev->ifindex == *ifindex) { + list_for_each_entry(port, &team->port_list, list) { + if (port->dev->ifindex == ctx->data.u32_val) { rcu_assign_pointer(ab_priv(team)->active_port, port); return 0; } @@ -92,12 +90,12 @@ static const struct team_option ab_options[] = { }, }; -int ab_init(struct team *team) +static int ab_init(struct team *team) { return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); } -void ab_exit(struct team *team) +static void ab_exit(struct team *team) { team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); } diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c new file mode 100644 index 00000000000..86e8183c8e3 --- /dev/null +++ b/drivers/net/team/team_mode_loadbalance.c @@ -0,0 +1,174 @@ +/* + * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team + * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/if_team.h> + +struct lb_priv { + struct sk_filter __rcu *fp; + struct sock_fprog *orig_fprog; +}; + +static struct lb_priv *lb_priv(struct team *team) +{ + return (struct lb_priv *) &team->mode_priv; +} + +static bool lb_transmit(struct team *team, struct sk_buff *skb) +{ + struct sk_filter *fp; + struct team_port *port; + unsigned int hash; + int port_index; + + fp = rcu_dereference(lb_priv(team)->fp); + if (unlikely(!fp)) + goto drop; + hash = SK_RUN_FILTER(fp, skb); + port_index = hash % team->en_port_count; + port = team_get_port_by_index_rcu(team, port_index); + if (unlikely(!port)) + goto drop; + skb->dev = port->dev; + if (dev_queue_xmit(skb)) + return false; + return true; + +drop: + dev_kfree_skb_any(skb); + return false; +} + +static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) +{ + if (!lb_priv(team)->orig_fprog) { + ctx->data.bin_val.len = 0; + ctx->data.bin_val.ptr = NULL; + return 0; + } + ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len * + sizeof(struct sock_filter); + ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter; + return 0; +} + +static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, + const void *data) +{ + struct sock_fprog *fprog; + struct sock_filter *filter = (struct sock_filter *) data; + + if (data_len % sizeof(struct sock_filter)) + return -EINVAL; + fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); + if (!fprog) + return -ENOMEM; + fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); + if (!fprog->filter) { + kfree(fprog); + return -ENOMEM; + } + fprog->len = data_len / sizeof(struct sock_filter); + *pfprog = fprog; + return 0; +} + +static void __fprog_destroy(struct sock_fprog *fprog) +{ + kfree(fprog->filter); + kfree(fprog); +} + +static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) +{ + struct sk_filter *fp = NULL; + struct sock_fprog *fprog = NULL; + int err; + + if (ctx->data.bin_val.len) { + err = __fprog_create(&fprog, ctx->data.bin_val.len, + ctx->data.bin_val.ptr); + if (err) + return err; + err = sk_unattached_filter_create(&fp, fprog); + if (err) { + __fprog_destroy(fprog); + return err; + } + } + + if (lb_priv(team)->orig_fprog) { + /* Clear old filter data */ + __fprog_destroy(lb_priv(team)->orig_fprog); + sk_unattached_filter_destroy(lb_priv(team)->fp); + } + + rcu_assign_pointer(lb_priv(team)->fp, fp); + lb_priv(team)->orig_fprog = fprog; + return 0; +} + +static const struct team_option lb_options[] = { + { + .name = "bpf_hash_func", + .type = TEAM_OPTION_TYPE_BINARY, + .getter = lb_bpf_func_get, + .setter = lb_bpf_func_set, + }, +}; + +static int lb_init(struct team *team) +{ + return team_options_register(team, lb_options, + ARRAY_SIZE(lb_options)); +} + +static void lb_exit(struct team *team) +{ + team_options_unregister(team, lb_options, + ARRAY_SIZE(lb_options)); +} + +static const struct team_mode_ops lb_mode_ops = { + .init = lb_init, + .exit = lb_exit, + .transmit = lb_transmit, +}; + +static struct team_mode lb_mode = { + .kind = "loadbalance", + .owner = THIS_MODULE, + .priv_size = sizeof(struct lb_priv), + .ops = &lb_mode_ops, +}; + +static int __init lb_init_module(void) +{ + return team_mode_register(&lb_mode); +} + +static void __exit lb_cleanup_module(void) +{ + team_mode_unregister(&lb_mode); +} + +module_init(lb_init_module); +module_exit(lb_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); +MODULE_DESCRIPTION("Load-balancing mode for team"); +MODULE_ALIAS("team-mode-loadbalance"); diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index a0e8f806331..6abfbdc96be 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -50,7 +50,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) struct team_port *port; int port_index; - port_index = rr_priv(team)->sent_packets++ % team->port_count; + port_index = rr_priv(team)->sent_packets++ % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); port = __get_first_port_up(team, port); if (unlikely(!port)) diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c deleted file mode 100644 index b15ac81d46f..00000000000 --- a/drivers/net/tokenring/3c359.c +++ /dev/null @@ -1,1843 +0,0 @@ -/* - * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved - * - * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC - * - * Base Driver Olympic: - * Written 1999 Peter De Schrijver & Mike Phillips - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world. - * - * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel. - * 3/05/01 - Last clean up stuff before submission. - * 2/15/01 - Finally, update to new pci api. - * - * To Do: - */ - -/* - * Technical Card Details - * - * All access to data is done with 16/8 bit transfers. The transfer - * method really sucks. You can only read or write one location at a time. - * - * Also, the microcode for the card must be uploaded if the card does not have - * the flashrom on board. This is a 28K bloat in the driver when compiled - * as a module. - * - * Rx is very simple, status into a ring of descriptors, dma data transfer, - * interrupts to tell us when a packet is received. - * - * Tx is a little more interesting. Similar scenario, descriptor and dma data - * transfers, but we don't have to interrupt the card to tell it another packet - * is ready for transmission, we are just doing simple memory writes, not io or mmio - * writes. The card can be set up to simply poll on the next - * descriptor pointer and when this value is non-zero will automatically download - * the next packet. The card then interrupts us when the packet is done. - * - */ - -#define XL_DEBUG 0 - -#include <linux/jiffies.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/in.h> -#include <linux/ioport.h> -#include <linux/string.h> -#include <linux/proc_fs.h> -#include <linux/ptrace.h> -#include <linux/skbuff.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/stddef.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/spinlock.h> -#include <linux/bitops.h> -#include <linux/firmware.h> -#include <linux/slab.h> - -#include <net/checksum.h> - -#include <asm/io.h> - -#include "3c359.h" - -static char version[] __devinitdata = -"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ; - -#define FW_NAME "3com/3C359.bin" -MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; -MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ; -MODULE_FIRMWARE(FW_NAME); - -/* Module parameters */ - -/* Ring Speed 0,4,16 - * 0 = Autosense - * 4,16 = Selected speed only, no autosense - * This allows the card to be the first on the ring - * and become the active monitor. - * - * WARNING: Some hubs will allow you to insert - * at the wrong speed. - * - * The adapter will _not_ fail to open if there are no - * active monitors on the ring, it will simply open up in - * its last known ringspeed if no ringspeed is specified. - */ - -static int ringspeed[XL_MAX_ADAPTERS] = {0,} ; - -module_param_array(ringspeed, int, NULL, 0); -MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ; - -/* Packet buffer size */ - -static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ; - -module_param_array(pkt_buf_sz, int, NULL, 0) ; -MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ; -/* Message Level */ - -static int message_level[XL_MAX_ADAPTERS] = {0,} ; - -module_param_array(message_level, int, NULL, 0) ; -MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ; -/* - * This is a real nasty way of doing this, but otherwise you - * will be stuck with 1555 lines of hex #'s in the code. - */ - -static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) = -{ - {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, - { } /* terminate list */ -}; -MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ; - -static int xl_init(struct net_device *dev); -static int xl_open(struct net_device *dev); -static int xl_open_hw(struct net_device *dev) ; -static int xl_hw_reset(struct net_device *dev); -static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev); -static void xl_dn_comp(struct net_device *dev); -static int xl_close(struct net_device *dev); -static void xl_set_rx_mode(struct net_device *dev); -static irqreturn_t xl_interrupt(int irq, void *dev_id); -static int xl_set_mac_address(struct net_device *dev, void *addr) ; -static void xl_arb_cmd(struct net_device *dev); -static void xl_asb_cmd(struct net_device *dev) ; -static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ; -static void xl_wait_misr_flags(struct net_device *dev) ; -static int xl_change_mtu(struct net_device *dev, int mtu); -static void xl_srb_bh(struct net_device *dev) ; -static void xl_asb_bh(struct net_device *dev) ; -static void xl_reset(struct net_device *dev) ; -static void xl_freemem(struct net_device *dev) ; - - -/* EEProm Access Functions */ -static u16 xl_ee_read(struct net_device *dev, int ee_addr) ; -static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ; - -/* Debugging functions */ -#if XL_DEBUG -static void print_tx_state(struct net_device *dev) ; -static void print_rx_state(struct net_device *dev) ; - -static void print_tx_state(struct net_device *dev) -{ - - struct xl_private *xl_priv = netdev_priv(dev); - struct xl_tx_desc *txd ; - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - int i ; - - printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head, - xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; - printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n"); - for (i = 0; i < 16; i++) { - txd = &(xl_priv->xl_tx_ring[i]) ; - printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd), - txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; - } - - printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) ); - - printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) ); - printk("Queue status = %0x\n",netif_running(dev) ) ; -} - -static void print_rx_state(struct net_device *dev) -{ - - struct xl_private *xl_priv = netdev_priv(dev); - struct xl_rx_desc *rxd ; - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - int i ; - - printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail); - printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n"); - for (i = 0; i < 16; i++) { - /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */ - rxd = &(xl_priv->xl_rx_ring[i]) ; - printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd), - rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; - } - - printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR)); - - printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL)); - printk("Queue status = %0x\n",netif_running(dev)); -} -#endif - -/* - * Read values from the on-board EEProm. This looks very strange - * but you have to wait for the EEProm to get/set the value before - * passing/getting the next value from the nic. As with all requests - * on this nic it has to be done in two stages, a) tell the nic which - * memory address you want to access and b) pass/get the value from the nic. - * With the EEProm, you have to wait before and between access a) and b). - * As this is only read at initialization time and the wait period is very - * small we shouldn't have to worry about scheduling issues. - */ - -static u16 xl_ee_read(struct net_device *dev, int ee_addr) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - - /* Wait for EEProm to not be busy */ - writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; - - /* Tell EEProm what we want to do and where */ - writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; - - /* Wait for EEProm to not be busy */ - writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; - - /* Tell EEProm what we want to do and where */ - writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; - - /* Finally read the value from the EEProm */ - writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - return readw(xl_mmio + MMIO_MACDATA) ; -} - -/* - * Write values to the onboard eeprom. As with eeprom read you need to - * set which location to write, wait, value to write, wait, with the - * added twist of having to enable eeprom writes as well. - */ - -static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - - /* Wait for EEProm to not be busy */ - writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; - - /* Enable write/erase */ - writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ; - - /* Wait for EEProm to not be busy */ - writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; - - /* Put the value we want to write into EEDATA */ - writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(ee_value, xl_mmio + MMIO_MACDATA) ; - - /* Tell EEProm to write eevalue into ee_addr */ - writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ; - - /* Wait for EEProm to not be busy, to ensure write gets done */ - writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; - - return ; -} - -static const struct net_device_ops xl_netdev_ops = { - .ndo_open = xl_open, - .ndo_stop = xl_close, - .ndo_start_xmit = xl_xmit, - .ndo_change_mtu = xl_change_mtu, - .ndo_set_rx_mode = xl_set_rx_mode, - .ndo_set_mac_address = xl_set_mac_address, -}; - -static int __devinit xl_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev ; - struct xl_private *xl_priv ; - static int card_no = -1 ; - int i ; - - card_no++ ; - - if (pci_enable_device(pdev)) { - return -ENODEV ; - } - - pci_set_master(pdev); - - if ((i = pci_request_regions(pdev,"3c359"))) { - return i ; - } - - /* - * Allowing init_trdev to allocate the private data will align - * xl_private on a 32 bytes boundary which we need for the rx/tx - * descriptors - */ - - dev = alloc_trdev(sizeof(struct xl_private)) ; - if (!dev) { - pci_release_regions(pdev) ; - return -ENOMEM ; - } - xl_priv = netdev_priv(dev); - -#if XL_DEBUG - printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n", - pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start); -#endif - - dev->irq=pdev->irq; - dev->base_addr=pci_resource_start(pdev,0) ; - xl_priv->xl_card_name = pci_name(pdev); - xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE); - xl_priv->pdev = pdev ; - - if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) ) - xl_priv->pkt_buf_sz = PKT_BUF_SZ ; - else - xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; - - dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ; - xl_priv->xl_ring_speed = ringspeed[card_no] ; - xl_priv->xl_message_level = message_level[card_no] ; - xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ; - xl_priv->xl_copy_all_options = 0 ; - - if((i = xl_init(dev))) { - iounmap(xl_priv->xl_mmio) ; - free_netdev(dev) ; - pci_release_regions(pdev) ; - return i ; - } - - dev->netdev_ops = &xl_netdev_ops; - SET_NETDEV_DEV(dev, &pdev->dev); - - pci_set_drvdata(pdev,dev) ; - if ((i = register_netdev(dev))) { - printk(KERN_ERR "3C359, register netdev failed\n") ; - pci_set_drvdata(pdev,NULL) ; - iounmap(xl_priv->xl_mmio) ; - free_netdev(dev) ; - pci_release_regions(pdev) ; - return i ; - } - - printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ; - - return 0; -} - -static int xl_init_firmware(struct xl_private *xl_priv) -{ - int err; - - err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev); - if (err) { - printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME); - return err; - } - - if (xl_priv->fw->size < 16) { - printk(KERN_ERR "Bogus length %zu in \"%s\"\n", - xl_priv->fw->size, FW_NAME); - release_firmware(xl_priv->fw); - err = -EINVAL; - } - - return err; -} - -static int __devinit xl_init(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - int err; - - printk(KERN_INFO "%s\n", version); - printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", - xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq); - - spin_lock_init(&xl_priv->xl_lock) ; - - err = xl_init_firmware(xl_priv); - if (err == 0) - err = xl_hw_reset(dev); - - return err; -} - - -/* - * Hardware reset. This needs to be a separate entity as we need to reset the card - * when we change the EEProm settings. - */ - -static int xl_hw_reset(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - unsigned long t ; - u16 i ; - u16 result_16 ; - u8 result_8 ; - u16 start ; - int j ; - - if (xl_priv->fw == NULL) - return -EINVAL; - - /* - * Reset the card. If the card has got the microcode on board, we have - * missed the initialization interrupt, so we must always do this. - */ - - writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; - - /* - * Must wait for cmdInProgress bit (12) to clear before continuing with - * card configuration. - */ - - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 40 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name); - return -ENODEV; - } - } - - /* - * Enable pmbar by setting bit in CPAttention - */ - - writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - result_8 = readb(xl_mmio + MMIO_MACDATA) ; - result_8 = result_8 | CPA_PMBARVIS ; - writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(result_8, xl_mmio + MMIO_MACDATA) ; - - /* - * Read cpHold bit in pmbar, if cleared we have got Flashrom on board. - * If not, we need to upload the microcode to the card - */ - - writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); - -#if XL_DEBUG - printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA)); -#endif - - if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) { - - /* Set PmBar, privateMemoryBase bits (8:2) to 0 */ - - writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); - result_16 = readw(xl_mmio + MMIO_MACDATA) ; - result_16 = result_16 & ~((0x7F) << 2) ; - writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(result_16,xl_mmio + MMIO_MACDATA) ; - - /* Set CPAttention, memWrEn bit */ - - writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - result_8 = readb(xl_mmio + MMIO_MACDATA) ; - result_8 = result_8 | CPA_MEMWREN ; - writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(result_8, xl_mmio + MMIO_MACDATA) ; - - /* - * Now to write the microcode into the shared ram - * The microcode must finish at position 0xFFFF, - * so we must subtract to get the start position for the code - * - * Looks strange but ensures compiler only uses - * 16 bit unsigned int - */ - start = (0xFFFF - (xl_priv->fw->size) + 1) ; - - printk(KERN_INFO "3C359: Uploading Microcode: "); - - for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) { - writel(MEM_BYTE_WRITE | 0XD0000 | i, - xl_mmio + MMIO_MAC_ACCESS_CMD); - writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA); - if (j % 1024 == 0) - printk("."); - } - printk("\n") ; - - for (i = 0; i < 16; i++) { - writel((MEM_BYTE_WRITE | 0xDFFF0) + i, - xl_mmio + MMIO_MAC_ACCESS_CMD); - writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i], - xl_mmio + MMIO_MACDATA); - } - - /* - * Have to write the start address of the upload to FFF4, but - * the address must be >> 4. You do not want to know how long - * it took me to discover this. - */ - - writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(start >> 4, xl_mmio + MMIO_MACDATA); - - /* Clear the CPAttention, memWrEn Bit */ - - writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - result_8 = readb(xl_mmio + MMIO_MACDATA) ; - result_8 = result_8 & ~CPA_MEMWREN ; - writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(result_8, xl_mmio + MMIO_MACDATA) ; - - /* Clear the cpHold bit in pmbar */ - - writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); - result_16 = readw(xl_mmio + MMIO_MACDATA) ; - result_16 = result_16 & ~PMB_CPHOLD ; - writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(result_16,xl_mmio + MMIO_MACDATA) ; - - - } /* If microcode upload required */ - - /* - * The card should now go though a self test procedure and get itself ready - * to be opened, we must wait for an srb response with the initialization - * information. - */ - -#if XL_DEBUG - printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name); -#endif - - writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ; - - t=jiffies; - while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { - schedule(); - if (time_after(jiffies, t + 15 * HZ)) { - printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); - return -ENODEV; - } - } - - /* - * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh, - * DnPriReqThresh, read the tech docs if you want to know what - * values they need to be. - */ - - writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(0xD000, xl_mmio + MMIO_MACDATA) ; - - writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(0X0020, xl_mmio + MMIO_MACDATA) ; - - writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ; - - writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ; - writeb(0x04, xl_mmio + DNPRIREQTHRESH) ; - - /* - * Read WRBR to provide the location of the srb block, have to use byte reads not word reads. - * Tech docs have this wrong !!!! - */ - - writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ; - writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ; - -#if XL_DEBUG - writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if ( readw(xl_mmio + MMIO_MACDATA) & 2) { - printk(KERN_INFO "Default ring speed 4 mbps\n"); - } else { - printk(KERN_INFO "Default ring speed 16 mbps\n"); - } - printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb); -#endif - - return 0; -} - -static int xl_open(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - u8 i ; - __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ - int open_err ; - - u16 switchsettings, switchsettings_eeprom ; - - if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev)) - return -EAGAIN; - - /* - * Read the information from the EEPROM that we need. - */ - - hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10)); - hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11)); - hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12)); - - /* Ring speed */ - - switchsettings_eeprom = xl_ee_read(dev,0x08) ; - switchsettings = switchsettings_eeprom ; - - if (xl_priv->xl_ring_speed != 0) { - if (xl_priv->xl_ring_speed == 4) - switchsettings = switchsettings | 0x02 ; - else - switchsettings = switchsettings & ~0x02 ; - } - - /* Only write EEProm if there has been a change */ - if (switchsettings != switchsettings_eeprom) { - xl_ee_write(dev,0x08,switchsettings) ; - /* Hardware reset after changing EEProm */ - xl_hw_reset(dev) ; - } - - memcpy(dev->dev_addr,hwaddr,dev->addr_len) ; - - open_err = xl_open_hw(dev) ; - - /* - * This really needs to be cleaned up with better error reporting. - */ - - if (open_err != 0) { /* Something went wrong with the open command */ - if (open_err & 0x07) { /* Wrong speed, retry at different speed */ - printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name); - switchsettings = switchsettings ^ 2 ; - xl_ee_write(dev,0x08,switchsettings) ; - xl_hw_reset(dev) ; - open_err = xl_open_hw(dev) ; - if (open_err != 0) { - printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name); - free_irq(dev->irq,dev) ; - return -ENODEV ; - } - } else { - printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ; - free_irq(dev->irq,dev) ; - return -ENODEV ; - } - } - - /* - * Now to set up the Rx and Tx buffer structures - */ - /* These MUST be on 8 byte boundaries */ - xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); - if (xl_priv->xl_tx_ring == NULL) { - free_irq(dev->irq,dev); - return -ENOMEM; - } - xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); - if (xl_priv->xl_rx_ring == NULL) { - free_irq(dev->irq,dev); - kfree(xl_priv->xl_tx_ring); - return -ENOMEM; - } - - /* Setup Rx Ring */ - for (i=0 ; i < XL_RX_RING_SIZE ; i++) { - struct sk_buff *skb ; - - skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; - if (skb==NULL) - break ; - - skb->dev = dev ; - xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); - xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG; - xl_priv->rx_ring_skb[i] = skb ; - } - - if (i==0) { - printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name); - free_irq(dev->irq,dev) ; - kfree(xl_priv->xl_tx_ring); - kfree(xl_priv->xl_rx_ring); - return -EIO ; - } - - xl_priv->rx_ring_no = i ; - xl_priv->rx_ring_tail = 0 ; - xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ; - for (i=0;i<(xl_priv->rx_ring_no-1);i++) { - xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1))); - } - xl_priv->xl_rx_ring[i].upnextptr = 0 ; - - writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ; - - /* Setup Tx Ring */ - - xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ; - - xl_priv->tx_ring_head = 1 ; - xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */ - xl_priv->free_ring_entries = XL_TX_RING_SIZE ; - - /* - * Setup the first dummy DPD entry for polling to start working. - */ - - xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY; - xl_priv->xl_tx_ring[0].buffer = 0 ; - xl_priv->xl_tx_ring[0].buffer_length = 0 ; - xl_priv->xl_tx_ring[0].dnnextptr = 0 ; - - writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ; - writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ; - writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ; - writel(DNENABLE, xl_mmio + MMIO_COMMAND) ; - writeb(0x40, xl_mmio + MMIO_DNPOLL) ; - - /* - * Enable interrupts on the card - */ - - writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; - writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; - - netif_start_queue(dev) ; - return 0; - -} - -static int xl_open_hw(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - u8 __iomem *xl_mmio = xl_priv->xl_mmio ; - u16 vsoff ; - char ver_str[33]; - int open_err ; - int i ; - unsigned long t ; - - /* - * Okay, let's build up the Open.NIC srb command - * - */ - - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ; - - /* - * Use this as a test byte, if it comes back with the same value, the command didn't work - */ - - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0xff,xl_mmio + MMIO_MACDATA) ; - - /* Open options */ - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0x00, xl_mmio + MMIO_MACDATA) ; - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0x00, xl_mmio + MMIO_MACDATA) ; - - /* - * Node address, be careful here, the docs say you can just put zeros here and it will use - * the hardware address, it doesn't, you must include the node address in the open command. - */ - - if (xl_priv->xl_laa[0]) { /* If using a LAA address */ - for (i=10;i<16;i++) { - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ; - } - memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; - } else { /* Regular hardware address */ - for (i=10;i<16;i++) { - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ; - } - } - - /* Default everything else to 0 */ - for (i = 16; i < 34; i++) { - writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0x00,xl_mmio + MMIO_MACDATA) ; - } - - /* - * Set the csrb bit in the MISR register - */ - - xl_wait_misr_flags(dev) ; - writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0xFF, xl_mmio + MMIO_MACDATA) ; - writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ; - - /* - * Now wait for the command to run - */ - - t=jiffies; - while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { - schedule(); - if (time_after(jiffies, t + 40 * HZ)) { - printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); - break ; - } - } - - /* - * Let's interpret the open response - */ - - writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if (readb(xl_mmio + MMIO_MACDATA)!=0) { - open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ; - writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - open_err |= readb(xl_mmio + MMIO_MACDATA) ; - return open_err ; - } else { - writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; - printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ; - printk("ASB: %04x",xl_priv->asb ) ; - writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ; - - writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; - printk(", ARB: %04x\n",xl_priv->arb ); - writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ; - - /* - * Interesting, sending the individual characters directly to printk was causing klogd to use - * use 100% of processor time, so we build up the string and print that instead. - */ - - for (i=0;i<0x20;i++) { - writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; - } - ver_str[i] = '\0' ; - printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str); - } - - /* - * Issue the AckInterrupt - */ - writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - - return 0 ; -} - -/* - * There are two ways of implementing rx on the 359 NIC, either - * interrupt driven or polling. We are going to uses interrupts, - * it is the easier way of doing things. - * - * The Rx works with a ring of Rx descriptors. At initialise time the ring - * entries point to the next entry except for the last entry in the ring - * which points to 0. The card is programmed with the location of the first - * available descriptor and keeps reading the next_ptr until next_ptr is set - * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr - * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers - * and then point the end of the ring to our current position and point our current - * position to 0, therefore making the current position the last position on the ring. - * The last position on the ring therefore loops continually loops around the rx ring. - * - * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head - * expands as the card adds new packets and we go around eating the tail processing the - * packets.) - * - * Undoubtably it could be streamlined and improved upon, but at the moment it works - * and the fast path through the routine is fine. - * - * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times - * in xl_rx so would increase the size of the function significantly. - */ - -static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ -{ - struct xl_private *xl_priv=netdev_priv(dev); - int n = xl_priv->rx_ring_tail; - int prev_ring_loc; - - prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1); - xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n)); - xl_priv->xl_rx_ring[n].framestatus = 0; - xl_priv->xl_rx_ring[n].upnextptr = 0; - xl_priv->rx_ring_tail++; - xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1); -} - -static void xl_rx(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - struct sk_buff *skb, *skb2 ; - int frame_length = 0, copy_len = 0 ; - int temp_ring_loc ; - - /* - * Receive the next frame, loop around the ring until all frames - * have been received. - */ - - while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */ - - if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */ - - /* - * This is a pain, you need to go through all the descriptors until the last one - * for this frame to find the framelength - */ - - temp_ring_loc = xl_priv->rx_ring_tail ; - - while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) { - temp_ring_loc++ ; - temp_ring_loc &= (XL_RX_RING_SIZE-1) ; - } - - frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF; - - skb = dev_alloc_skb(frame_length) ; - - if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */ - printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ; - while (xl_priv->rx_ring_tail != temp_ring_loc) - adv_rx_ring(dev) ; - - adv_rx_ring(dev) ; /* One more time just for luck :) */ - dev->stats.rx_dropped++ ; - - writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; - return ; - } - - while (xl_priv->rx_ring_tail != temp_ring_loc) { - copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF; - frame_length -= copy_len ; - pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], - skb_put(skb, copy_len), - copy_len); - pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); - adv_rx_ring(dev) ; - } - - /* Now we have found the last fragment */ - pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], - skb_put(skb,copy_len), frame_length); -/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ - pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE); - adv_rx_ring(dev) ; - skb->protocol = tr_type_trans(skb,dev) ; - netif_rx(skb) ; - - } else { /* Single Descriptor Used, simply swap buffers over, fast path */ - - frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF; - - skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; - - if (skb==NULL) { /* Still need to fix the rx ring */ - printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name); - adv_rx_ring(dev) ; - dev->stats.rx_dropped++ ; - writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; - return ; - } - - skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; - pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - skb_put(skb2, frame_length) ; - skb2->protocol = tr_type_trans(skb2,dev) ; - - xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ; - xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); - xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG; - adv_rx_ring(dev) ; - dev->stats.rx_packets++ ; - dev->stats.rx_bytes += frame_length ; - - netif_rx(skb2) ; - } /* if multiple buffers */ - } /* while packet to do */ - - /* Clear the updComplete interrupt */ - writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; - return ; -} - -/* - * This is ruthless, it doesn't care what state the card is in it will - * completely reset the adapter. - */ - -static void xl_reset(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - unsigned long t; - - writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; - - /* - * Must wait for cmdInProgress bit (12) to clear before continuing with - * card configuration. - */ - - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - if (time_after(jiffies, t + 40 * HZ)) { - printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); - break ; - } - } - -} - -static void xl_freemem(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - int i ; - - for (i=0;i<XL_RX_RING_SIZE;i++) { - dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ; - pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); - xl_priv->rx_ring_tail++ ; - xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1; - } - - /* unmap ring */ - pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ; - - pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ; - - kfree(xl_priv->xl_rx_ring) ; - kfree(xl_priv->xl_tx_ring) ; - - return ; -} - -static irqreturn_t xl_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct xl_private *xl_priv =netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - u16 intstatus, macstatus ; - - intstatus = readw(xl_mmio + MMIO_INTSTATUS) ; - - if (!(intstatus & 1)) /* We didn't generate the interrupt */ - return IRQ_NONE; - - spin_lock(&xl_priv->xl_lock) ; - - /* - * Process the interrupt - */ - /* - * Something fishy going on here, we shouldn't get 0001 ints, not fatal though. - */ - if (intstatus == 0x0001) { - writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - printk(KERN_INFO "%s: 00001 int received\n",dev->name); - } else { - if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { - - /* - * Host Error. - * It may be possible to recover from this, but usually it means something - * is seriously fubar, so we just close the adapter. - */ - - if (intstatus & HOSTERRINT) { - printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus); - writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; - printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); - netif_stop_queue(dev) ; - xl_freemem(dev) ; - free_irq(dev->irq,dev); - xl_reset(dev) ; - writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - spin_unlock(&xl_priv->xl_lock) ; - return IRQ_HANDLED; - } /* Host Error */ - - if (intstatus & SRBRINT ) { /* Srbc interrupt */ - writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - if (xl_priv->srb_queued) - xl_srb_bh(dev) ; - } /* SRBR Interrupt */ - - if (intstatus & TXUNDERRUN) { /* Issue DnReset command */ - writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */ - /* !!! FIX-ME !!!! - Must put a timeout check here ! */ - /* Empty Loop */ - } - printk(KERN_WARNING "%s: TX Underrun received\n",dev->name); - writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - } /* TxUnderRun */ - - if (intstatus & ARBCINT ) { /* Arbc interrupt */ - xl_arb_cmd(dev) ; - } /* Arbc */ - - if (intstatus & ASBFINT) { - if (xl_priv->asb_queued == 1) { - xl_asb_cmd(dev) ; - } else if (xl_priv->asb_queued == 2) { - xl_asb_bh(dev) ; - } else { - writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; - } - } /* Asbf */ - - if (intstatus & UPCOMPINT ) /* UpComplete */ - xl_rx(dev) ; - - if (intstatus & DNCOMPINT ) /* DnComplete */ - xl_dn_comp(dev) ; - - if (intstatus & HARDERRINT ) { /* Hardware error */ - writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - macstatus = readw(xl_mmio + MMIO_MACDATA) ; - printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name); - if (macstatus & (1<<14)) - printk(KERN_WARNING "tchk error: Unrecoverable error\n"); - if (macstatus & (1<<3)) - printk(KERN_WARNING "eint error: Internal watchdog timer expired\n"); - if (macstatus & (1<<2)) - printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n"); - printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; - printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); - netif_stop_queue(dev) ; - xl_freemem(dev) ; - free_irq(dev->irq,dev); - unregister_netdev(dev) ; - free_netdev(dev) ; - xl_reset(dev) ; - writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - spin_unlock(&xl_priv->xl_lock) ; - return IRQ_HANDLED; - } - } else { - printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus); - writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - } - } - - /* Turn interrupts back on */ - - writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; - writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; - - spin_unlock(&xl_priv->xl_lock) ; - return IRQ_HANDLED; -} - -/* - * Tx - Polling configuration - */ - -static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - struct xl_tx_desc *txd ; - int tx_head, tx_tail, tx_prev ; - unsigned long flags ; - - spin_lock_irqsave(&xl_priv->xl_lock,flags) ; - - netif_stop_queue(dev) ; - - if (xl_priv->free_ring_entries > 1 ) { - /* - * Set up the descriptor for the packet - */ - tx_head = xl_priv->tx_ring_head ; - tx_tail = xl_priv->tx_ring_tail ; - - txd = &(xl_priv->xl_tx_ring[tx_head]) ; - txd->dnnextptr = 0 ; - txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE; - txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); - txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST; - xl_priv->tx_ring_skb[tx_head] = skb ; - dev->stats.tx_packets++ ; - dev->stats.tx_bytes += skb->len ; - - /* - * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1 - * to ensure no negative numbers in unsigned locations. - */ - - tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ; - - xl_priv->tx_ring_head++ ; - xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ; - xl_priv->free_ring_entries-- ; - - xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head)); - - /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */ - /* readl(xl_mmio + MMIO_DNLISTPTR) ; */ - - netif_wake_queue(dev) ; - - spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; - - return NETDEV_TX_OK; - } else { - spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; - return NETDEV_TX_BUSY; - } - -} - -/* - * The NIC has told us that a packet has been downloaded onto the card, we must - * find out which packet it has done, clear the skb and information for the packet - * then advance around the ring for all transmitted packets - */ - -static void xl_dn_comp(struct net_device *dev) -{ - struct xl_private *xl_priv=netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - struct xl_tx_desc *txd ; - - - if (xl_priv->tx_ring_tail == 255) {/* First time */ - xl_priv->xl_tx_ring[0].framestartheader = 0 ; - xl_priv->xl_tx_ring[0].dnnextptr = 0 ; - xl_priv->tx_ring_tail = 1 ; - } - - while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) { - txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ; - pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE); - txd->framestartheader = 0 ; - txd->buffer = cpu_to_le32(0xdeadbeef); - txd->buffer_length = 0 ; - dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ; - xl_priv->tx_ring_tail++ ; - xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ; - xl_priv->free_ring_entries++ ; - } - - netif_wake_queue(dev) ; - - writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; -} - -/* - * Close the adapter properly. - * This srb reply cannot be handled from interrupt context as we have - * to free the interrupt from the driver. - */ - -static int xl_close(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - unsigned long t ; - - netif_stop_queue(dev) ; - - /* - * Close the adapter, need to stall the rx and tx queues. - */ - - writew(DNSTALL, xl_mmio + MMIO_COMMAND) ; - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name); - break ; - } - } - writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ; - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name); - break ; - } - } - writew(UPSTALL, xl_mmio + MMIO_COMMAND) ; - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name); - break ; - } - } - - /* Turn off interrupts, we will still get the indication though - * so we can trap it - */ - - writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ; - - xl_srb_cmd(dev,CLOSE_NIC) ; - - t=jiffies; - while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name); - break ; - } - } - /* Read the srb response from the adapter */ - - writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD); - if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { - printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name); - } else { - writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if (readb(xl_mmio + MMIO_MACDATA)==0) { - printk(KERN_INFO "%s: Adapter has been closed\n",dev->name); - writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - - xl_freemem(dev) ; - free_irq(dev->irq,dev) ; - } else { - printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ; - } - } - - /* Reset the upload and download logic */ - - writew(UPRESET, xl_mmio + MMIO_COMMAND) ; - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name); - break ; - } - } - writew(DNRESET, xl_mmio + MMIO_COMMAND) ; - t=jiffies; - while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { - schedule(); - if (time_after(jiffies, t + 10 * HZ)) { - printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name); - break ; - } - } - xl_hw_reset(dev) ; - return 0 ; -} - -static void xl_set_rx_mode(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - struct netdev_hw_addr *ha; - unsigned char dev_mc_address[4] ; - u16 options ; - - if (dev->flags & IFF_PROMISC) - options = 0x0004 ; - else - options = 0x0000 ; - - if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */ - xl_priv->xl_copy_all_options = options ; - xl_srb_cmd(dev, SET_RECEIVE_MODE) ; - return ; - } - - dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - - netdev_for_each_mc_addr(ha, dev) { - dev_mc_address[0] |= ha->addr[2]; - dev_mc_address[1] |= ha->addr[3]; - dev_mc_address[2] |= ha->addr[4]; - dev_mc_address[3] |= ha->addr[5]; - } - - if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ - memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ; - xl_srb_cmd(dev, SET_FUNC_ADDRESS) ; - } - return ; -} - - -/* - * We issued an srb command and now we must read - * the response from the completed command. - */ - -static void xl_srb_bh(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - u8 srb_cmd, ret_code ; - int i ; - - writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - srb_cmd = readb(xl_mmio + MMIO_MACDATA) ; - writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - ret_code = readb(xl_mmio + MMIO_MACDATA) ; - - /* Ret_code is standard across all commands */ - - switch (ret_code) { - case 1: - printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; - break ; - case 4: - printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd); - break ; - - case 6: - printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd); - break ; - - case 0: /* Successful command execution */ - switch (srb_cmd) { - case READ_LOG: /* Returns 14 bytes of data from the NIC */ - if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ; - /* - * We still have to read the log even if message_level = 0 and we don't want - * to see it - */ - for (i=0;i<14;i++) { - writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if(xl_priv->xl_message_level) - printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ; - } - printk("\n") ; - break ; - case SET_FUNC_ADDRESS: - if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: Functional Address Set\n",dev->name); - break ; - case CLOSE_NIC: - if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name); - break ; - case SET_MULTICAST_MODE: - if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ; - break ; - case SET_RECEIVE_MODE: - if(xl_priv->xl_message_level) { - if (xl_priv->xl_copy_all_options == 0x0004) - printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name); - else - printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name); - } - break ; - - } /* switch */ - break ; - } /* switch */ - return ; -} - -static int xl_set_mac_address (struct net_device *dev, void *addr) -{ - struct sockaddr *saddr = addr ; - struct xl_private *xl_priv = netdev_priv(dev); - - if (netif_running(dev)) { - printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; - return -EIO ; - } - - memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ; - - if (xl_priv->xl_message_level) { - printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0], - xl_priv->xl_laa[1], xl_priv->xl_laa[2], - xl_priv->xl_laa[3], xl_priv->xl_laa[4], - xl_priv->xl_laa[5]); - } - - return 0 ; -} - -static void xl_arb_cmd(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - u8 arb_cmd ; - u16 lan_status, lan_status_diff ; - - writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - arb_cmd = readb(xl_mmio + MMIO_MACDATA) ; - - if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */ - writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - - printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ; - - lan_status = swab16(readw(xl_mmio + MMIO_MACDATA)); - - /* Acknowledge interrupt, this tells nic we are done with the arb */ - writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - - lan_status_diff = xl_priv->xl_lan_status ^ lan_status ; - - if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { - if (lan_status_diff & LSC_LWF) - printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name); - if (lan_status_diff & LSC_ARW) - printk(KERN_WARNING "%s: Auto removal error\n",dev->name); - if (lan_status_diff & LSC_FPE) - printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name); - if (lan_status_diff & LSC_RR) - printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name); - - /* Adapter has been closed by the hardware */ - - netif_stop_queue(dev); - xl_freemem(dev) ; - free_irq(dev->irq,dev); - - printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); - } /* If serious error */ - - if (xl_priv->xl_message_level) { - if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); - if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing\n",dev->name); - if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); - if (lan_status_diff & LSC_TRAN_BCN) - printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); - if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); - if (lan_status_diff & LSC_RING_REC) - printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); - if (lan_status_diff & LSC_FDX_MODE) - printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name); - } - - if (lan_status_diff & LSC_CO) { - if (xl_priv->xl_message_level) - printk(KERN_INFO "%s: Counter Overflow\n", dev->name); - /* Issue READ.LOG command */ - xl_srb_cmd(dev, READ_LOG) ; - } - - /* There is no command in the tech docs to issue the read_sr_counters */ - if (lan_status_diff & LSC_SR_CO) { - if (xl_priv->xl_message_level) - printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); - } - - xl_priv->xl_lan_status = lan_status ; - - } /* Lan.change.status */ - else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */ -#if XL_DEBUG - printk(KERN_INFO "Received.Data\n"); -#endif - writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ; - - /* Now we are going to be really basic here and not do anything - * with the data at all. The tech docs do not give me enough - * information to calculate the buffers properly so we're - * just going to tell the nic that we've dealt with the frame - * anyway. - */ - - /* Acknowledge interrupt, this tells nic we are done with the arb */ - writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - - /* Is the ASB free ? */ - - xl_priv->asb_queued = 0 ; - writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if (readb(xl_mmio + MMIO_MACDATA) != 0xff) { - xl_priv->asb_queued = 1 ; - - xl_wait_misr_flags(dev) ; - - writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD); - writeb(0xff, xl_mmio + MMIO_MACDATA) ; - writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ; - return ; - /* Drop out and wait for the bottom half to be run */ - } - - xl_asb_cmd(dev) ; - - } else { - printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd); - } - - /* Acknowledge the arb interrupt */ - - writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; - - return ; -} - - -/* - * There is only one asb command, but we can get called from different - * places. - */ - -static void xl_asb_cmd(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - - if (xl_priv->asb_queued == 1) - writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; - - writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0x81, xl_mmio + MMIO_MACDATA) ; - - writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ; - - xl_wait_misr_flags(dev) ; - - writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD); - writeb(0xff, xl_mmio + MMIO_MACDATA) ; - - writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ; - - xl_priv->asb_queued = 2 ; - - return ; -} - -/* - * This will only get called if there was an error - * from the asb cmd. - */ -static void xl_asb_bh(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - u8 ret_code ; - - writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - ret_code = readb(xl_mmio + MMIO_MACDATA) ; - switch (ret_code) { - case 0x01: - printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name); - break ; - case 0x26: - printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name); - break ; - case 0x40: - printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name); - break ; - } - xl_priv->asb_queued = 0 ; - writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; - return ; -} - -/* - * Issue srb commands to the nic - */ - -static void xl_srb_cmd(struct net_device *dev, int srb_cmd) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - - switch (srb_cmd) { - case READ_LOG: - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ; - break; - - case CLOSE_NIC: - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ; - break ; - - case SET_RECEIVE_MODE: - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ; - writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ; - break ; - - case SET_FUNC_ADDRESS: - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ; - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ; - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ; - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ; - writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ; - break ; - } /* switch */ - - - xl_wait_misr_flags(dev) ; - - /* Write 0xff to the CSRB flag */ - writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0xFF, xl_mmio + MMIO_MACDATA) ; - /* Set csrb bit in MISR register to process command */ - writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ; - xl_priv->srb_queued = 1 ; - - return ; -} - -/* - * This is nasty, to use the MISR command you have to wait for 6 memory locations - * to be zero. This is the way the driver does on other OS'es so we should be ok with - * the empty loop. - */ - -static void xl_wait_misr_flags(struct net_device *dev) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u8 __iomem * xl_mmio = xl_priv->xl_mmio ; - - int i ; - - writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */ - for (i=0; i<6; i++) { - writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - while (readb(xl_mmio + MMIO_MACDATA) != 0) { - ; /* Empty Loop */ - } - } - } - - writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(0x80, xl_mmio + MMIO_MACDATA) ; - - return ; -} - -/* - * Change mtu size, this should work the same as olympic - */ - -static int xl_change_mtu(struct net_device *dev, int mtu) -{ - struct xl_private *xl_priv = netdev_priv(dev); - u16 max_mtu ; - - if (xl_priv->xl_ring_speed == 4) - max_mtu = 4500 ; - else - max_mtu = 18000 ; - - if (mtu > max_mtu) - return -EINVAL ; - if (mtu < 100) - return -EINVAL ; - - dev->mtu = mtu ; - xl_priv->pkt_buf_sz = mtu + TR_HLEN ; - - return 0 ; -} - -static void __devexit xl_remove_one (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct xl_private *xl_priv=netdev_priv(dev); - - release_firmware(xl_priv->fw); - unregister_netdev(dev); - iounmap(xl_priv->xl_mmio) ; - pci_release_regions(pdev) ; - pci_set_drvdata(pdev,NULL) ; - free_netdev(dev); - return ; -} - -static struct pci_driver xl_3c359_driver = { - .name = "3c359", - .id_table = xl_pci_tbl, - .probe = xl_probe, - .remove = __devexit_p(xl_remove_one), -}; - -static int __init xl_pci_init (void) -{ - return pci_register_driver(&xl_3c359_driver); -} - - -static void __exit xl_pci_cleanup (void) -{ - pci_unregister_driver (&xl_3c359_driver); -} - -module_init(xl_pci_init); -module_exit(xl_pci_cleanup); - -MODULE_LICENSE("GPL") ; diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h deleted file mode 100644 index bcb1a6b4a4c..00000000000 --- a/drivers/net/tokenring/3c359.h +++ /dev/null @@ -1,291 +0,0 @@ -/* - * 3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved - * - * Linux driver for 3Com 3C359 Token Link PCI XL cards. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License Version 2 or (at your option) - * any later verion, incorporated herein by reference. - */ - -/* Memory Access Commands */ -#define IO_BYTE_READ 0x28 << 24 -#define IO_BYTE_WRITE 0x18 << 24 -#define IO_WORD_READ 0x20 << 24 -#define IO_WORD_WRITE 0x10 << 24 -#define MMIO_BYTE_READ 0x88 << 24 -#define MMIO_BYTE_WRITE 0x48 << 24 -#define MMIO_WORD_READ 0x80 << 24 -#define MMIO_WORD_WRITE 0x40 << 24 -#define MEM_BYTE_READ 0x8C << 24 -#define MEM_BYTE_WRITE 0x4C << 24 -#define MEM_WORD_READ 0x84 << 24 -#define MEM_WORD_WRITE 0x44 << 24 - -#define PMBAR 0x1C80 -#define PMB_CPHOLD (1<<10) - -#define CPATTENTION 0x180D -#define CPA_PMBARVIS (1<<7) -#define CPA_MEMWREN (1<<6) - -#define SWITCHSETTINGS 0x1C88 -#define EECONTROL 0x1C8A -#define EEDATA 0x1C8C -#define EEREAD 0x0080 -#define EEWRITE 0x0040 -#define EEERASE 0x0060 -#define EE_ENABLE_WRITE 0x0030 -#define EEBUSY (1<<15) - -#define WRBR 0xCDE02 -#define WWOR 0xCDE04 -#define WWCR 0xCDE06 -#define MACSTATUS 0xCDE08 -#define MISR_RW 0xCDE0B -#define MISR_AND 0xCDE2B -#define MISR_SET 0xCDE4B -#define RXBUFAREA 0xCDE10 -#define RXEARLYTHRESH 0xCDE12 -#define TXSTARTTHRESH 0x58 -#define DNPRIREQTHRESH 0x2C - -#define MISR_CSRB (1<<5) -#define MISR_RASB (1<<4) -#define MISR_SRBFR (1<<3) -#define MISR_ASBFR (1<<2) -#define MISR_ARBF (1<<1) - -/* MISR Flags memory locations */ -#define MF_SSBF 0xDFFE0 -#define MF_ARBF 0xDFFE1 -#define MF_ASBFR 0xDFFE2 -#define MF_SRBFR 0xDFFE3 -#define MF_RASB 0xDFFE4 -#define MF_CSRB 0xDFFE5 - -#define MMIO_MACDATA 0x10 -#define MMIO_MAC_ACCESS_CMD 0x14 -#define MMIO_TIMER 0x1A -#define MMIO_DMA_CTRL 0x20 -#define MMIO_DNLISTPTR 0x24 -#define MMIO_HASHFILTER 0x28 -#define MMIO_CONFIG 0x29 -#define MMIO_DNPRIREQTHRESH 0x2C -#define MMIO_DNPOLL 0x2D -#define MMIO_UPPKTSTATUS 0x30 -#define MMIO_FREETIMER 0x34 -#define MMIO_COUNTDOWN 0x36 -#define MMIO_UPLISTPTR 0x38 -#define MMIO_UPPOLL 0x3C -#define MMIO_UPBURSTTHRESH 0x40 -#define MMIO_DNBURSTTHRESH 0x41 -#define MMIO_INTSTATUS_AUTO 0x56 -#define MMIO_TXSTARTTHRESH 0x58 -#define MMIO_INTERRUPTENABLE 0x5A -#define MMIO_INDICATIONENABLE 0x5C -#define MMIO_COMMAND 0x5E /* These two are meant to be the same */ -#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */ -#define INTSTAT_CMD_IN_PROGRESS (1<<12) -#define INTSTAT_SRB (1<<14) -#define INTSTAT_INTLATCH (1<<0) - -/* Indication / Interrupt Mask - * Annoyingly the bits to be set in the indication and interrupt enable - * do not match with the actual bits received in the interrupt, although - * they are in the same order. - * The mapping for the indication / interrupt are: - * Bit Indication / Interrupt - * 0 HostError - * 1 txcomplete - * 2 updneeded - * 3 rxcomplete - * 4 intrequested - * 5 macerror - * 6 dncomplete - * 7 upcomplete - * 8 txunderrun - * 9 asbf - * 10 srbr - * 11 arbc - * - * The only ones we don't want to receive are txcomplete and rxcomplete - * we use dncomplete and upcomplete instead. - */ - -#define INT_MASK 0xFF5 - -/* Note the subtle difference here, IND and INT */ - -#define SETINDENABLE (8<<12) -#define SETINTENABLE (7<<12) -#define SRBBIT (1<<10) -#define ASBBIT (1<<9) -#define ARBBIT (1<<11) - -#define SRB 0xDFE90 -#define ASB 0xDFED0 -#define ARB 0xD0000 -#define SCRATCH 0xDFEF0 - -#define INT_REQUEST 0x6000 /* (6 << 12) */ -#define ACK_INTERRUPT 0x6800 /* (13 <<11) */ -#define GLOBAL_RESET 0x00 -#define DNDISABLE 0x5000 -#define DNENABLE 0x4800 -#define DNSTALL 0x3002 -#define DNRESET 0x5800 -#define DNUNSTALL 0x3003 -#define UPRESET 0x2800 -#define UPSTALL 0x3000 -#define UPUNSTALL 0x3001 -#define SETCONFIG 0x4000 -#define SETTXSTARTTHRESH 0x9800 - -/* Received Interrupts */ -#define ASBFINT (1<<13) -#define SRBRINT (1<<14) -#define ARBCINT (1<<15) -#define TXUNDERRUN (1<<11) - -#define UPCOMPINT (1<<10) -#define DNCOMPINT (1<<9) -#define HARDERRINT (1<<7) -#define RXCOMPLETE (1<<4) -#define TXCOMPINT (1<<2) -#define HOSTERRINT (1<<1) - -/* Receive descriptor bits */ -#define RXOVERRUN cpu_to_le32(1<<19) -#define RXFC cpu_to_le32(1<<21) -#define RXAR cpu_to_le32(1<<22) -#define RXUPDCOMPLETE cpu_to_le32(1<<23) -#define RXUPDFULL cpu_to_le32(1<<24) -#define RXUPLASTFRAG cpu_to_le32(1<<31) - -/* Transmit descriptor bits */ -#define TXDNCOMPLETE cpu_to_le32(1<<16) -#define TXTXINDICATE cpu_to_le32(1<<27) -#define TXDPDEMPTY cpu_to_le32(1<<29) -#define TXDNINDICATE cpu_to_le32(1<<31) -#define TXDNFRAGLAST cpu_to_le32(1<<31) - -/* Interrupts to Acknowledge */ -#define LATCH_ACK 1 -#define TXCOMPACK (1<<1) -#define INTREQACK (1<<2) -#define DNCOMPACK (1<<3) -#define UPCOMPACK (1<<4) -#define ASBFACK (1<<5) -#define SRBRACK (1<<6) -#define ARBCACK (1<<7) - -#define XL_IO_SPACE 128 -#define SRB_COMMAND_SIZE 50 - -/* Adapter Commands */ -#define REQUEST_INT 0x00 -#define MODIFY_OPEN_PARMS 0x01 -#define RESTORE_OPEN_PARMS 0x02 -#define OPEN_NIC 0x03 -#define CLOSE_NIC 0x04 -#define SET_SLEEP_MODE 0x05 -#define SET_GROUP_ADDRESS 0x06 -#define SET_FUNC_ADDRESS 0x07 -#define READ_LOG 0x08 -#define SET_MULTICAST_MODE 0x0C -#define CHANGE_WAKEUP_PATTERN 0x0D -#define GET_STATISTICS 0x13 -#define SET_RECEIVE_MODE 0x1F - -/* ARB Commands */ -#define RECEIVE_DATA 0x81 -#define RING_STATUS_CHANGE 0x84 - -/* ASB Commands */ -#define ASB_RECEIVE_DATE 0x81 - -/* Defines for LAN STATUS CHANGE reports */ -#define LSC_SIG_LOSS 0x8000 -#define LSC_HARD_ERR 0x4000 -#define LSC_SOFT_ERR 0x2000 -#define LSC_TRAN_BCN 0x1000 -#define LSC_LWF 0x0800 -#define LSC_ARW 0x0400 -#define LSC_FPE 0x0200 -#define LSC_RR 0x0100 -#define LSC_CO 0x0080 -#define LSC_SS 0x0040 -#define LSC_RING_REC 0x0020 -#define LSC_SR_CO 0x0010 -#define LSC_FDX_MODE 0x0004 - -#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ - -/* 3c359 defaults for buffers */ - -#define XL_RX_RING_SIZE 16 /* must be a power of 2 */ -#define XL_TX_RING_SIZE 16 /* must be a power of 2 */ - -#define PKT_BUF_SZ 4096 /* Default packet size */ - -/* 3c359 data structures */ - -struct xl_tx_desc { - __le32 dnnextptr; - __le32 framestartheader; - __le32 buffer; - __le32 buffer_length; -}; - -struct xl_rx_desc { - __le32 upnextptr; - __le32 framestatus; - __le32 upfragaddr; - __le32 upfraglen; -}; - -struct xl_private { - - - /* These two structures must be aligned on 8 byte boundaries */ - - /* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */ - /* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */ - struct xl_rx_desc *xl_rx_ring ; - struct xl_tx_desc *xl_tx_ring ; - struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE]; - int tx_ring_head, tx_ring_tail ; - int rx_ring_tail, rx_ring_no ; - int free_ring_entries ; - - u16 srb; - u16 arb; - u16 asb; - - u8 __iomem *xl_mmio; - const char *xl_card_name; - struct pci_dev *pdev ; - - spinlock_t xl_lock ; - - volatile int srb_queued; - struct wait_queue *srb_wait; - volatile int asb_queued; - - u16 mac_buffer ; - u16 xl_lan_status ; - u8 xl_ring_speed ; - u16 pkt_buf_sz ; - u8 xl_message_level; - u16 xl_copy_all_options ; - unsigned char xl_functional_addr[4] ; - u16 xl_addr_table_addr, xl_parms_addr ; - u8 xl_laa[6] ; - u32 rx_ring_dma_addr ; - u32 tx_ring_dma_addr ; - - /* firmware section */ - const struct firmware *fw; -}; - diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig deleted file mode 100644 index 45550d42b36..00000000000 --- a/drivers/net/tokenring/Kconfig +++ /dev/null @@ -1,199 +0,0 @@ -# -# Token Ring driver configuration -# - -# So far, we only have PCI, ISA, and MCA token ring devices -menuconfig TR - bool "Token Ring driver support" - depends on NETDEVICES && !UML - depends on (PCI || ISA || MCA || CCW || PCMCIA) - help - Token Ring is IBM's way of communication on a local network; the - rest of the world uses Ethernet. To participate on a Token Ring - network, you need a special Token ring network card. If you are - connected to such a Token Ring network and want to use your Token - Ring card under Linux, say Y here and to the driver for your - particular card below and read the Token-Ring mini-HOWTO, available - from <http://www.tldp.org/docs.html#howto>. Most people can - say N here. - -if TR - -config WANT_LLC - def_bool y - select LLC - -config PCMCIA_IBMTR - tristate "IBM PCMCIA tokenring adapter support" - depends on IBMTR!=y && PCMCIA - ---help--- - Say Y here if you intend to attach this type of Token Ring PCMCIA - card to your computer. You then also need to say Y to "Token Ring - driver support". - - To compile this driver as a module, choose M here: the module will be - called ibmtr_cs. - -config IBMTR - tristate "IBM Tropic chipset based adapter support" - depends on ISA || MCA - ---help--- - This is support for all IBM Token Ring cards that don't use DMA. If - you have such a beast, say Y and read the Token-Ring mini-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - - Warning: this driver will almost definitely fail if more than one - active Token Ring card is present. - - To compile this driver as a module, choose M here: the module will be - called ibmtr. - -config IBMOL - tristate "IBM Olympic chipset PCI adapter support" - depends on PCI - ---help--- - This is support for all non-Lanstreamer IBM PCI Token Ring Cards. - Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II - Wake On Lan, and PCI 100/16/4 adapters. - - If you have such an adapter, say Y and read the Token-Ring - mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here: the module will be - called olympic. - - Also read <file:Documentation/networking/olympic.txt> or check the - Linux Token Ring Project site for the latest information at - <http://www.linuxtr.net/>. - -config IBMLS - tristate "IBM Lanstreamer chipset PCI adapter support" - depends on PCI && !64BIT - help - This is support for IBM Lanstreamer PCI Token Ring Cards. - - If you have such an adapter, say Y and read the Token-Ring - mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here: the module will be - called lanstreamer. - -config 3C359 - tristate "3Com 3C359 Token Link Velocity XL adapter support" - depends on PCI - ---help--- - This is support for the 3Com PCI Velocity XL cards, specifically - the 3Com 3C359, please note this is not for the 3C339 cards, you - should use the tms380 driver instead. - - If you have such an adapter, say Y and read the Token-Ring - mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>. - - To compile this driver as a module, choose M here: the module will be - called 3c359. - - Also read the file <file:Documentation/networking/3c359.txt> or check the - Linux Token Ring Project site for the latest information at - <http://www.linuxtr.net> - -config TMS380TR - tristate "Generic TMS380 Token Ring ISA/PCI adapter support" - depends on PCI || ISA && ISA_DMA_API || MCA - select FW_LOADER - ---help--- - This driver provides generic support for token ring adapters - based on the Texas Instruments TMS380 series chipsets. This - includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect - TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591), - Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several - Madge adapters. If you say Y here, you will be asked to select - which cards to support below. If you're using modules, each - class of card will be supported by a separate module. - - If you have such an adapter and would like to use it, say Y and - read the Token-Ring mini-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - - Also read the file <file:Documentation/networking/tms380tr.txt> or - check <http://www.auk.cx/tms380tr/>. - - To compile this driver as a module, choose M here: the module will be - called tms380tr. - -config TMSPCI - tristate "Generic TMS380 PCI support" - depends on TMS380TR && PCI - ---help--- - This tms380 module supports generic TMS380-based PCI cards. - - These cards are known to work: - - Compaq 4/16 TR PCI - - SysKonnect TR4/16 PCI (SK-4590/SK-4591) - - Thomas-Conrad TC4048 PCI 4/16 - - 3Com Token Link Velocity - - To compile this driver as a module, choose M here: the module will be - called tmspci. - -config SKISA - tristate "SysKonnect TR4/16 ISA support" - depends on TMS380TR && ISA - help - This tms380 module supports SysKonnect TR4/16 ISA cards. - - These cards are known to work: - - SysKonnect TR4/16 ISA (SK-4190) - - To compile this driver as a module, choose M here: the module will be - called skisa. - -config PROTEON - tristate "Proteon ISA support" - depends on TMS380TR && ISA - help - This tms380 module supports Proteon ISA cards. - - These cards are known to work: - - Proteon 1392 - - Proteon 1392 plus - - To compile this driver as a module, choose M here: the module will be - called proteon. - -config ABYSS - tristate "Madge Smart 16/4 PCI Mk2 support" - depends on TMS380TR && PCI - help - This tms380 module supports the Madge Smart 16/4 PCI Mk2 - cards (51-02). - - To compile this driver as a module, choose M here: the module will be - called abyss. - -config MADGEMC - tristate "Madge Smart 16/4 Ringnode MicroChannel" - depends on TMS380TR && MCA - help - This tms380 module supports the Madge Smart 16/4 MC16 and MC32 - MicroChannel adapters. - - To compile this driver as a module, choose M here: the module will be - called madgemc. - -config SMCTR - tristate "SMC ISA/MCA adapter support" - depends on (ISA || MCA_LEGACY) && (BROKEN || !64BIT) - ---help--- - This is support for the ISA and MCA SMC Token Ring cards, - specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A - (8115T/A) adapters. - - If you have such an adapter and would like to use it, say Y or M and - read the Token-Ring mini-HOWTO, available from - <http://www.tldp.org/docs.html#howto> and the file - <file:Documentation/networking/smctr.txt>. - - To compile this driver as a module, choose M here: the module will be - called smctr. - -endif # TR diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile deleted file mode 100644 index f1be8d97b7a..00000000000 --- a/drivers/net/tokenring/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# -# Makefile for drivers/net/tokenring -# - -obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o -obj-$(CONFIG_IBMTR) += ibmtr.o -obj-$(CONFIG_IBMOL) += olympic.o -obj-$(CONFIG_IBMLS) += lanstreamer.o -obj-$(CONFIG_TMS380TR) += tms380tr.o -obj-$(CONFIG_ABYSS) += abyss.o -obj-$(CONFIG_MADGEMC) += madgemc.o -obj-$(CONFIG_PROTEON) += proteon.o -obj-$(CONFIG_TMSPCI) += tmspci.o -obj-$(CONFIG_SKISA) += skisa.o -obj-$(CONFIG_SMCTR) += smctr.o -obj-$(CONFIG_3C359) += 3c359.o diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c deleted file mode 100644 index b715e6b444d..00000000000 --- a/drivers/net/tokenring/abyss.c +++ /dev/null @@ -1,468 +0,0 @@ -/* - * abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card. - * - * Written 1999-2000 by Adam Fritzler - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver module supports the following cards: - * - Madge Smart 16/4 PCI Mk2 - * - * Maintainer(s): - * AF Adam Fritzler - * - * Modification History: - * 30-Dec-99 AF Split off from the tms380tr driver. - * 22-Jan-00 AF Updated to use indirect read/writes - * 23-Nov-00 JG New PCI API, cleanups - * - * - * TODO: - * 1. See if we can use MMIO instead of inb/outb/inw/outw - * 2. Add support for Mk1 (has AT24 attached to the PCI - * config registers) - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#include "tms380tr.h" -#include "abyss.h" /* Madge-specific constants */ - -static char version[] __devinitdata = -"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n"; - -#define ABYSS_IO_EXTENT 64 - -static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = { - { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, }, - { } /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(pci, abyss_pci_tbl); - -MODULE_LICENSE("GPL"); - -static int abyss_open(struct net_device *dev); -static int abyss_close(struct net_device *dev); -static void abyss_enable(struct net_device *dev); -static int abyss_chipset_init(struct net_device *dev); -static void abyss_read_eeprom(struct net_device *dev); -static unsigned short abyss_setnselout_pins(struct net_device *dev); - -static void at24_writedatabyte(unsigned long regaddr, unsigned char byte); -static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr); -static int at24_sendcmd(unsigned long regaddr, unsigned char cmd); -static unsigned char at24_readdatabit(unsigned long regaddr); -static unsigned char at24_readdatabyte(unsigned long regaddr); -static int at24_waitforack(unsigned long regaddr); -static int at24_waitfornack(unsigned long regaddr); -static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data); -static void at24_start(unsigned long regaddr); -static unsigned char at24_readb(unsigned long regaddr, unsigned char addr); - -static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg) -{ - return inb(dev->base_addr + reg); -} - -static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg) -{ - return inw(dev->base_addr + reg); -} - -static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outb(val, dev->base_addr + reg); -} - -static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outw(val, dev->base_addr + reg); -} - -static struct net_device_ops abyss_netdev_ops; - -static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - static int versionprinted; - struct net_device *dev; - struct net_local *tp; - int ret, pci_irq_line; - unsigned long pci_ioaddr; - - if (versionprinted++ == 0) - printk("%s", version); - - if (pci_enable_device(pdev)) - return -EIO; - - /* Remove I/O space marker in bit 0. */ - pci_irq_line = pdev->irq; - pci_ioaddr = pci_resource_start (pdev, 0); - - /* At this point we have found a valid card. */ - - dev = alloc_trdev(sizeof(struct net_local)); - if (!dev) - return -ENOMEM; - - if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) { - ret = -EBUSY; - goto err_out_trdev; - } - - ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED, - dev->name, dev); - if (ret) - goto err_out_region; - - dev->base_addr = pci_ioaddr; - dev->irq = pci_irq_line; - - printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name); - printk("%s: IO: %#4lx IRQ: %d\n", - dev->name, pci_ioaddr, dev->irq); - /* - * The TMS SIF registers lay 0x10 above the card base address. - */ - dev->base_addr += 0x10; - - ret = tmsdev_init(dev, &pdev->dev); - if (ret) { - printk("%s: unable to get memory for dev->priv.\n", - dev->name); - goto err_out_irq; - } - - abyss_read_eeprom(dev); - - printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr); - - tp = netdev_priv(dev); - tp->setnselout = abyss_setnselout_pins; - tp->sifreadb = abyss_sifreadb; - tp->sifreadw = abyss_sifreadw; - tp->sifwriteb = abyss_sifwriteb; - tp->sifwritew = abyss_sifwritew; - - memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); - - dev->netdev_ops = &abyss_netdev_ops; - - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - ret = register_netdev(dev); - if (ret) - goto err_out_tmsdev; - return 0; - -err_out_tmsdev: - pci_set_drvdata(pdev, NULL); - tmsdev_term(dev); -err_out_irq: - free_irq(pdev->irq, dev); -err_out_region: - release_region(pci_ioaddr, ABYSS_IO_EXTENT); -err_out_trdev: - free_netdev(dev); - return ret; -} - -static unsigned short abyss_setnselout_pins(struct net_device *dev) -{ - unsigned short val = 0; - struct net_local *tp = netdev_priv(dev); - - if(tp->DataRate == SPEED_4) - val |= 0x01; /* Set 4Mbps */ - else - val |= 0x00; /* Set 16Mbps */ - - return val; -} - -/* - * The following Madge boards should use this code: - * - Smart 16/4 PCI Mk2 (Abyss) - * - Smart 16/4 PCI Mk1 (PCI T) - * - Smart 16/4 Client Plus PnP (Big Apple) - * - Smart 16/4 Cardbus Mk2 - * - * These access an Atmel AT24 SEEPROM using their glue chip registers. - * - */ -static void at24_writedatabyte(unsigned long regaddr, unsigned char byte) -{ - int i; - - for (i = 0; i < 8; i++) { - at24_setlines(regaddr, 0, (byte >> (7-i))&0x01); - at24_setlines(regaddr, 1, (byte >> (7-i))&0x01); - at24_setlines(regaddr, 0, (byte >> (7-i))&0x01); - } -} - -static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr) -{ - if (at24_sendcmd(regaddr, cmd)) { - at24_writedatabyte(regaddr, addr); - return at24_waitforack(regaddr); - } - return 0; -} - -static int at24_sendcmd(unsigned long regaddr, unsigned char cmd) -{ - int i; - - for (i = 0; i < 10; i++) { - at24_start(regaddr); - at24_writedatabyte(regaddr, cmd); - if (at24_waitforack(regaddr)) - return 1; - } - return 0; -} - -static unsigned char at24_readdatabit(unsigned long regaddr) -{ - unsigned char val; - - at24_setlines(regaddr, 0, 1); - at24_setlines(regaddr, 1, 1); - val = (inb(regaddr) & AT24_DATA)?1:0; - at24_setlines(regaddr, 1, 1); - at24_setlines(regaddr, 0, 1); - return val; -} - -static unsigned char at24_readdatabyte(unsigned long regaddr) -{ - unsigned char data = 0; - int i; - - for (i = 0; i < 8; i++) { - data <<= 1; - data |= at24_readdatabit(regaddr); - } - - return data; -} - -static int at24_waitforack(unsigned long regaddr) -{ - int i; - - for (i = 0; i < 10; i++) { - if ((at24_readdatabit(regaddr) & 0x01) == 0x00) - return 1; - } - return 0; -} - -static int at24_waitfornack(unsigned long regaddr) -{ - int i; - for (i = 0; i < 10; i++) { - if ((at24_readdatabit(regaddr) & 0x01) == 0x01) - return 1; - } - return 0; -} - -static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data) -{ - unsigned char val = AT24_ENABLE; - if (clock) - val |= AT24_CLOCK; - if (data) - val |= AT24_DATA; - - outb(val, regaddr); - tms380tr_wait(20); /* Very necessary. */ -} - -static void at24_start(unsigned long regaddr) -{ - at24_setlines(regaddr, 0, 1); - at24_setlines(regaddr, 1, 1); - at24_setlines(regaddr, 1, 0); - at24_setlines(regaddr, 0, 1); -} - -static unsigned char at24_readb(unsigned long regaddr, unsigned char addr) -{ - unsigned char data = 0xff; - - if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) { - if (at24_sendcmd(regaddr, AT24_READ)) { - data = at24_readdatabyte(regaddr); - if (!at24_waitfornack(regaddr)) - data = 0xff; - } - } - return data; -} - - -/* - * Enable basic functions of the Madge chipset needed - * for initialization. - */ -static void abyss_enable(struct net_device *dev) -{ - unsigned char reset_reg; - unsigned long ioaddr; - - ioaddr = dev->base_addr; - reset_reg = inb(ioaddr + PCIBM2_RESET_REG); - reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - tms380tr_wait(100); -} - -/* - * Enable the functions of the Madge chipset needed for - * full working order. - */ -static int abyss_chipset_init(struct net_device *dev) -{ - unsigned char reset_reg; - unsigned long ioaddr; - - ioaddr = dev->base_addr; - - reset_reg = inb(ioaddr + PCIBM2_RESET_REG); - - reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - - reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES | - PCIBM2_RESET_REG_FIFO_NRES | - PCIBM2_RESET_REG_SIF_NRES); - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - - tms380tr_wait(100); - - reset_reg |= PCIBM2_RESET_REG_CHIP_NRES; - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - - reset_reg |= PCIBM2_RESET_REG_SIF_NRES; - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - - reset_reg |= PCIBM2_RESET_REG_FIFO_NRES; - outb(reset_reg, ioaddr + PCIBM2_RESET_REG); - - outb(PCIBM2_INT_CONTROL_REG_SINTEN | - PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE, - ioaddr + PCIBM2_INT_CONTROL_REG); - - outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD); - - return 0; -} - -static inline void abyss_chipset_close(struct net_device *dev) -{ - unsigned long ioaddr; - - ioaddr = dev->base_addr; - outb(0, ioaddr + PCIBM2_RESET_REG); -} - -/* - * Read configuration data from the AT24 SEEPROM on Madge cards. - * - */ -static void abyss_read_eeprom(struct net_device *dev) -{ - struct net_local *tp; - unsigned long ioaddr; - unsigned short val; - int i; - - tp = netdev_priv(dev); - ioaddr = dev->base_addr; - - /* Must enable glue chip first */ - abyss_enable(dev); - - val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, - PCIBM2_SEEPROM_RING_SPEED); - tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */ - printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate); - - val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, - PCIBM2_SEEPROM_RAM_SIZE) * 128; - printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val); - - dev->addr_len = 6; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, - PCIBM2_SEEPROM_BIA+i); -} - -static int abyss_open(struct net_device *dev) -{ - abyss_chipset_init(dev); - tms380tr_open(dev); - return 0; -} - -static int abyss_close(struct net_device *dev) -{ - tms380tr_close(dev); - abyss_chipset_close(dev); - return 0; -} - -static void __devexit abyss_detach (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - BUG_ON(!dev); - unregister_netdev(dev); - release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT); - free_irq(dev->irq, dev); - tmsdev_term(dev); - free_netdev(dev); - pci_set_drvdata(pdev, NULL); -} - -static struct pci_driver abyss_driver = { - .name = "abyss", - .id_table = abyss_pci_tbl, - .probe = abyss_attach, - .remove = __devexit_p(abyss_detach), -}; - -static int __init abyss_init (void) -{ - abyss_netdev_ops = tms380tr_netdev_ops; - - abyss_netdev_ops.ndo_open = abyss_open; - abyss_netdev_ops.ndo_stop = abyss_close; - - return pci_register_driver(&abyss_driver); -} - -static void __exit abyss_rmmod (void) -{ - pci_unregister_driver (&abyss_driver); -} - -module_init(abyss_init); -module_exit(abyss_rmmod); - diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h deleted file mode 100644 index b0a473b8913..00000000000 --- a/drivers/net/tokenring/abyss.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * abyss.h: Header for the abyss tms380tr module - * - * Authors: - * - Adam Fritzler - */ - -#ifndef __LINUX_MADGETR_H -#define __LINUX_MADGETR_H - -#ifdef __KERNEL__ - -/* - * For Madge Smart 16/4 PCI Mk2. Since we increment the base address - * to get everything correct for the TMS SIF, we do these as negatives - * as they fall below the SIF in addressing. - */ -#define PCIBM2_INT_STATUS_REG ((short)-15)/* 0x01 */ -#define PCIBM2_INT_CONTROL_REG ((short)-14)/* 0x02 */ -#define PCIBM2_RESET_REG ((short)-12)/* 0x04 */ -#define PCIBM2_SEEPROM_REG ((short)-9) /* 0x07 */ - -#define PCIBM2_INT_CONTROL_REG_SINTEN 0x02 -#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE 0x80 -#define PCIBM2_INT_STATUS_REG_PCI_ERR 0x80 - -#define PCIBM2_RESET_REG_CHIP_NRES 0x01 -#define PCIBM2_RESET_REG_FIFO_NRES 0x02 -#define PCIBM2_RESET_REG_SIF_NRES 0x04 - -#define PCIBM2_FIFO_THRESHOLD 0x21 -#define PCIBM2_BURST_LENGTH 0x22 - -/* - * Bits in PCIBM2_SEEPROM_REG. - */ -#define AT24_ENABLE 0x04 -#define AT24_DATA 0x02 -#define AT24_CLOCK 0x01 - -/* - * AT24 Commands. - */ -#define AT24_WRITE 0xA0 -#define AT24_READ 0xA1 - -/* - * Addresses in AT24 SEEPROM. - */ -#define PCIBM2_SEEPROM_BIA 0x12 -#define PCIBM2_SEEPROM_RING_SPEED 0x18 -#define PCIBM2_SEEPROM_RAM_SIZE 0x1A -#define PCIBM2_SEEPROM_HWF1 0x1C -#define PCIBM2_SEEPROM_HWF2 0x1E - - -#endif /* __KERNEL__ */ -#endif /* __LINUX_MADGETR_H */ diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c deleted file mode 100644 index b5c8c18f504..00000000000 --- a/drivers/net/tokenring/ibmtr.c +++ /dev/null @@ -1,1964 +0,0 @@ -/* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux - * - * Written 1993 by Mark Swanson and Peter De Schrijver. - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This device driver should work with Any IBM Token Ring Card that does - * not use DMA. - * - * I used Donald Becker's (becker@scyld.com) device driver work - * as a base for most of my initial work. - * - * Changes by Peter De Schrijver - * (Peter.Deschrijver@linux.cc.kuleuven.ac.be) : - * - * + changed name to ibmtr.c in anticipation of other tr boards. - * + changed reset code and adapter open code. - * + added SAP open code. - * + a first attempt to write interrupt, transmit and receive routines. - * - * Changes by David W. Morris (dwm@shell.portal.com) : - * 941003 dwm: - Restructure tok_probe for multiple adapters, devices. - * + Add comments, misc reorg for clarity. - * + Flatten interrupt handler levels. - * - * Changes by Farzad Farid (farzy@zen.via.ecp.fr) - * and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) : - * + multi ring support clean up. - * + RFC1042 compliance enhanced. - * - * Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) : - * + bug correction in tr_tx - * + removed redundant information display - * + some code reworking - * - * Changes by Michel Lespinasse (walken@via.ecp.fr), - * Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr) - * (February 18, 1996) : - * + modified shared memory and mmio access port the driver to - * alpha platform (structure access -> readb/writeb) - * - * Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com) - * (January 18 1996): - * + swapped WWOR and WWCR in ibmtr.h - * + moved some init code from tok_probe into trdev_init. The - * PCMCIA code can call trdev_init to complete initializing - * the driver. - * + added -DPCMCIA to support PCMCIA - * + detecting PCMCIA Card Removal in interrupt handler. If - * ISRP is FF, then a PCMCIA card has been removed - * 10/2000 Burt needed a new method to avoid crashing the OS - * - * Changes by Paul Norton (pnorton@cts.com) : - * + restructured the READ.LOG logic to prevent the transmit SRB - * from being rudely overwritten before the transmit cycle is - * complete. (August 15 1996) - * + completed multiple adapter support. (November 20 1996) - * + implemented csum_partial_copy in tr_rx and increased receive - * buffer size and count. Minor fixes. (March 15, 1997) - * - * Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk> - * + Now compiles ok as a module again. - * - * Changes by Paul Norton (pnorton@ieee.org) : - * + moved the header manipulation code in tr_tx and tr_rx to - * net/802/tr.c. (July 12 1997) - * + add retry and timeout on open if cable disconnected. (May 5 1998) - * + lifted 2000 byte mtu limit. now depends on shared-RAM size. - * May 25 1998) - * + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998) - * - * Changes by Joel Sloan (jjs@c-me.com) : - * + disable verbose debug messages by default - to enable verbose - * debugging, edit the IBMTR_DEBUG_MESSAGES define below - * - * Changes by Mike Phillips <phillim@amtrak.com> : - * + Added extra #ifdef's to work with new PCMCIA Token Ring Code. - * The PCMCIA code now just sets up the card so it can be recognized - * by ibmtr_probe. Also checks allocated memory vs. on-board memory - * for correct figure to use. - * - * Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) : - * + added spinlocks for SMP sanity (10 March 1999) - * - * Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting - * i.e. using functional address C0 00 00 04 00 00 to transmit and - * receive multicast packets. - * - * Changes by Mike Sullivan (based on original sram patch by Dave Grothe - * to support windowing into on adapter shared ram. - * i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging - * will shift this 16K window over the entire available shared RAM. - * - * Changes by Peter De Schrijver (p2@mind.be) : - * + fixed a problem with PCMCIA card removal - * - * Change by Mike Sullivan et al.: - * + added turbo card support. No need to use lanaid to configure - * the adapter into isa compatibility mode. - * - * Changes by Burt Silverman to allow the computer to behave nicely when - * a cable is pulled or not in place, or a PCMCIA card is removed hot. - */ - -/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value -in the event that chatty debug messages are desired - jjs 12/30/98 */ - -#define IBMTR_DEBUG_MESSAGES 0 - -#include <linux/module.h> -#include <linux/sched.h> - -#ifdef PCMCIA /* required for ibmtr_cs.c to build */ -#undef MODULE /* yes, really */ -#undef ENABLE_PAGING -#else -#define ENABLE_PAGING 1 -#endif - -/* changes the output format of driver initialization */ -#define TR_VERBOSE 0 - -/* some 95 OS send many non UI frame; this allow removing the warning */ -#define TR_FILTERNONUI 1 - -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/netdevice.h> -#include <linux/ip.h> -#include <linux/trdevice.h> -#include <linux/ibmtr.h> - -#include <net/checksum.h> - -#include <asm/io.h> - -#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args) -#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args) - -/* version and credits */ -#ifndef PCMCIA -static char version[] __devinitdata = - "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" - " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" - " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" - " v2.2.1 02/08/00 Mike Sullivan <sullivam@us.ibm.com>\n" - " v2.2.2 07/27/00 Burt Silverman <burts@us.ibm.com>\n" - " v2.4.0 03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n"; -#endif - -/* this allows displaying full adapter information */ - -static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" }; - -static char pcchannelid[] __devinitdata = { - 0x05, 0x00, 0x04, 0x09, - 0x04, 0x03, 0x04, 0x0f, - 0x03, 0x06, 0x03, 0x01, - 0x03, 0x01, 0x03, 0x00, - 0x03, 0x09, 0x03, 0x09, - 0x03, 0x00, 0x02, 0x00 -}; - -static char mcchannelid[] __devinitdata = { - 0x04, 0x0d, 0x04, 0x01, - 0x05, 0x02, 0x05, 0x03, - 0x03, 0x06, 0x03, 0x03, - 0x05, 0x08, 0x03, 0x04, - 0x03, 0x05, 0x03, 0x01, - 0x03, 0x08, 0x02, 0x00 -}; - -static char __devinit *adapter_def(char type) -{ - switch (type) { - case 0xF: return "PC Adapter | PC Adapter II | Adapter/A"; - case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)"; - case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter"; - case 0xC: return "Auto 16/4 Adapter"; - default: return "adapter (unknown type)"; - } -}; - -#define TRC_INIT 0x01 /* Trace initialization & PROBEs */ -#define TRC_INITV 0x02 /* verbose init trace points */ -static unsigned char ibmtr_debug_trace = 0; - -static int ibmtr_probe1(struct net_device *dev, int ioaddr); -static unsigned char get_sram_size(struct tok_info *adapt_info); -static int trdev_init(struct net_device *dev); -static int tok_open(struct net_device *dev); -static int tok_init_card(struct net_device *dev); -static void tok_open_adapter(unsigned long dev_addr); -static void open_sap(unsigned char type, struct net_device *dev); -static void tok_set_multicast_list(struct net_device *dev); -static netdev_tx_t tok_send_packet(struct sk_buff *skb, - struct net_device *dev); -static int tok_close(struct net_device *dev); -static irqreturn_t tok_interrupt(int irq, void *dev_id); -static void initial_tok_int(struct net_device *dev); -static void tr_tx(struct net_device *dev); -static void tr_rx(struct net_device *dev); -static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); -static void tok_rerun(unsigned long dev_addr); -static void ibmtr_readlog(struct net_device *dev); -static int ibmtr_change_mtu(struct net_device *dev, int mtu); -static void find_turbo_adapters(int *iolist); - -static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = { - 0xa20, 0xa24, 0, 0, 0 -}; -static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0}; -static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; -static int __devinitdata turbo_searched = 0; - -#ifndef PCMCIA -static __u32 ibmtr_mem_base __devinitdata = 0xd0000; -#endif - -static void __devinit PrtChanID(char *pcid, short stride) -{ - short i, j; - for (i = 0, j = 0; i < 24; i++, j += stride) - printk("%1x", ((int) pcid[j]) & 0x0f); - printk("\n"); -} - -static void __devinit HWPrtChanID(void __iomem *pcid, short stride) -{ - short i, j; - for (i = 0, j = 0; i < 24; i++, j += stride) - printk("%1x", ((int) readb(pcid + j)) & 0x0f); - printk("\n"); -} - -/* We have to ioremap every checked address, because isa_readb is - * going away. - */ - -static void __devinit find_turbo_adapters(int *iolist) -{ - int ram_addr; - int index=0; - void __iomem *chanid; - int found_turbo=0; - unsigned char *tchanid, ctemp; - int i, j; - unsigned long jif; - void __iomem *ram_mapped ; - - if (turbo_searched == 1) return; - turbo_searched=1; - for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) { - - __u32 intf_tbl=0; - - found_turbo=1; - ram_mapped = ioremap((u32)ram_addr,0x1fff) ; - if (ram_mapped==NULL) - continue ; - chanid=(CHANNEL_ID + ram_mapped); - tchanid=pcchannelid; - ctemp=readb(chanid) & 0x0f; - if (ctemp != *tchanid) continue; - for (i=2,j=1; i<=46; i=i+2,j++) { - if ((readb(chanid+i) & 0x0f) != tchanid[j]){ - found_turbo=0; - break; - } - } - if (!found_turbo) continue; - - writeb(0x90, ram_mapped+0x1E01); - for(i=2; i<0x0f; i++) { - writeb(0x00, ram_mapped+0x1E01+i); - } - writeb(0x00, ram_mapped+0x1E01); - for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif);); - intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN)); - if (intf_tbl) { -#if IBMTR_DEBUG_MESSAGES - printk("ibmtr::find_turbo_adapters, Turbo found at " - "ram_addr %x\n",ram_addr); - printk("ibmtr::find_turbo_adapters, interface_table "); - for(i=0; i<6; i++) { - printk("%x:",readb(ram_addr+intf_tbl+i)); - } - printk("\n"); -#endif - turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4)); - turbo_irq[index]=readb(ram_mapped+intf_tbl+3); - outb(0, turbo_io[index] + ADAPTRESET); - for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif);); - outb(0, turbo_io[index] + ADAPTRESETREL); - index++; - continue; - } -#if IBMTR_DEBUG_MESSAGES - printk("ibmtr::find_turbo_adapters, ibmtr card found at" - " %x but not a Turbo model\n",ram_addr); -#endif - iounmap(ram_mapped) ; - } /* for */ - for(i=0; i<IBMTR_MAX_ADAPTERS; i++) { - if(!turbo_io[i]) break; - for (j=0; j<IBMTR_MAX_ADAPTERS; j++) { - if ( iolist[j] && iolist[j] != turbo_io[i]) continue; - iolist[j]=turbo_io[i]; - break; - } - } -} - -static void ibmtr_cleanup_card(struct net_device *dev) -{ - if (dev->base_addr) { - outb(0,dev->base_addr+ADAPTRESET); - - schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ - - outb(0,dev->base_addr+ADAPTRESETREL); - } - -#ifndef PCMCIA - free_irq(dev->irq, dev); - release_region(dev->base_addr, IBMTR_IO_EXTENT); - - { - struct tok_info *ti = netdev_priv(dev); - iounmap(ti->mmio); - iounmap(ti->sram_virt); - } -#endif -} - -/**************************************************************************** - * ibmtr_probe(): Routine specified in the network device structure - * to probe for an IBM Token Ring Adapter. Routine outline: - * I. Interrogate hardware to determine if an adapter exists - * and what the speeds and feeds are - * II. Setup data structures to control execution based upon - * adapter characteristics. - * - * We expect ibmtr_probe to be called once for each device entry - * which references it. - ****************************************************************************/ - -static int __devinit ibmtr_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - - if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */ - return -ENXIO; - if (base_addr > 0x1ff) { /* Check a single specified location. */ - if (!ibmtr_probe1(dev, base_addr)) return 0; - return -ENODEV; - } - find_turbo_adapters(ibmtr_portlist); - for (i = 0; ibmtr_portlist[i]; i++) { - int ioaddr = ibmtr_portlist[i]; - - if (!ibmtr_probe1(dev, ioaddr)) return 0; - } - return -ENODEV; -} - -int __devinit ibmtr_probe_card(struct net_device *dev) -{ - int err = ibmtr_probe(dev); - if (!err) { - err = register_netdev(dev); - if (err) - ibmtr_cleanup_card(dev); - } - return err; -} - -/*****************************************************************************/ - -static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) -{ - - unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0; - void __iomem * t_mmio = NULL; - struct tok_info *ti = netdev_priv(dev); - void __iomem *cd_chanid; - unsigned char *tchanid, ctemp; -#ifndef PCMCIA - unsigned char t_irq=0; - unsigned long timeout; - static int version_printed; -#endif - - /* Query the adapter PIO base port which will return - * indication of where MMIO was placed. We also have a - * coded interrupt number. - */ - segment = inb(PIOaddr); - if (segment < 0x40 || segment > 0xe0) { - /* Out of range values so we'll assume non-existent IO device - * but this is not necessarily a problem, esp if a turbo - * adapter is being used. */ -#if IBMTR_DEBUG_MESSAGES - DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, " - "Hardware Problem?\n",PIOaddr,segment); -#endif - return -ENODEV; - } - /* - * Compute the linear base address of the MMIO area - * as LINUX doesn't care about segments - */ - t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048); - if (!t_mmio) { - DPRINTK("Cannot remap mmiobase memory area") ; - return -ENODEV ; - } - intr = segment & 0x03; /* low bits is coded interrupt # */ - if (ibmtr_debug_trace & TRC_INIT) - DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n" - , PIOaddr, (int) segment, t_mmio, (int) intr); - - /* - * Now we will compare expected 'channelid' strings with - * what we is there to learn of ISA/MCA or not TR card - */ -#ifdef PCMCIA - iounmap(t_mmio); - t_mmio = ti->mmio; /*BMS to get virtual address */ - irq = ti->irq; /*BMS to display the irq! */ -#endif - cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */ - tchanid = pcchannelid; - cardpresent = TR_ISA; /* try ISA */ - - /* Suboptimize knowing first byte different */ - ctemp = readb(cd_chanid) & 0x0f; - if (ctemp != *tchanid) { /* NOT ISA card, try MCA */ - tchanid = mcchannelid; - cardpresent = TR_MCA; - if (ctemp != *tchanid) /* Neither ISA nor MCA */ - cardpresent = NOTOK; - } - if (cardpresent != NOTOK) { - /* Know presumed type, try rest of ID */ - for (i = 2, j = 1; i <= 46; i = i + 2, j++) { - if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue; - /* match failed, not TR card */ - cardpresent = NOTOK; - break; - } - } - /* - * If we have an ISA board check for the ISA P&P version, - * as it has different IRQ settings - */ - if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e)) - cardpresent = TR_ISAPNP; - if (cardpresent == NOTOK) { /* "channel_id" did not match, report */ - if (!(ibmtr_debug_trace & TRC_INIT)) { -#ifndef PCMCIA - iounmap(t_mmio); -#endif - return -ENODEV; - } - DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n", - PIOaddr); - DPRINTK("Expected for ISA: "); - PrtChanID(pcchannelid, 1); - DPRINTK(" found: "); -/* BMS Note that this can be misleading, when hardware is flaky, because you - are reading it a second time here. So with my flaky hardware, I'll see my- - self in this block, with the HW ID matching the ISA ID exactly! */ - HWPrtChanID(cd_chanid, 2); - DPRINTK("Expected for MCA: "); - PrtChanID(mcchannelid, 1); - } - /* Now, setup some of the pl0 buffers for this driver.. */ - /* If called from PCMCIA, it is already set up, so no need to - waste the memory, just use the existing structure */ -#ifndef PCMCIA - ti->mmio = t_mmio; - for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) { - if (turbo_io[i] != PIOaddr) - continue; -#if IBMTR_DEBUG_MESSAGES - printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n", - PIOaddr); -#endif - ti->turbo = 1; - t_irq = turbo_irq[i]; - } -#endif /* !PCMCIA */ - ti->readlog_pending = 0; - init_waitqueue_head(&ti->wait_for_reset); - - /* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP - * depending which card is inserted. */ - -#ifndef PCMCIA - switch (cardpresent) { - case TR_ISA: - if (intr == 0) irq = 9; /* irq2 really is irq9 */ - if (intr == 1) irq = 3; - if (intr == 2) irq = 6; - if (intr == 3) irq = 7; - ti->adapter_int_enable = PIOaddr + ADAPTINTREL; - break; - case TR_MCA: - if (intr == 0) irq = 9; - if (intr == 1) irq = 3; - if (intr == 2) irq = 10; - if (intr == 3) irq = 11; - ti->global_int_enable = 0; - ti->adapter_int_enable = 0; - ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12; - break; - case TR_ISAPNP: - if (!t_irq) { - if (intr == 0) irq = 9; - if (intr == 1) irq = 3; - if (intr == 2) irq = 10; - if (intr == 3) irq = 11; - } else - irq=t_irq; - timeout = jiffies + TR_SPIN_INTERVAL; - while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){ - if (!time_after(jiffies, timeout)) continue; - DPRINTK( "Hardware timeout during initialization.\n"); - iounmap(t_mmio); - return -ENODEV; - } - ti->sram_phys = - ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12); - ti->adapter_int_enable = PIOaddr + ADAPTINTREL; - break; - } /*end switch (cardpresent) */ -#endif /*not PCMCIA */ - - if (ibmtr_debug_trace & TRC_INIT) { /* just report int */ - DPRINTK("irq=%d", irq); - printk(", sram_phys=0x%x", ti->sram_phys); - if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */ - DPRINTK(", ti->mmio=%p", ti->mmio); - printk(", segment=%02X", segment); - } - printk(".\n"); - } - - /* Get hw address of token ring card */ - j = 0; - for (i = 0; i < 0x18; i = i + 2) { - /* technical reference states to do this */ - temp = readb(ti->mmio + AIP + i) & 0x0f; - ti->hw_address[j] = temp; - if (j & 1) - dev->dev_addr[(j / 2)] = - ti->hw_address[j]+ (ti->hw_address[j - 1] << 4); - ++j; - } - /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */ - ti->adapter_type = readb(ti->mmio + AIPADAPTYPE); - - /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */ - ti->data_rate = readb(ti->mmio + AIPDATARATE); - - /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */ - ti->token_release = readb(ti->mmio + AIPEARLYTOKEN); - - /* How much shared RAM is on adapter ? */ - if (ti->turbo) { - ti->avail_shared_ram=127; - } else { - ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */ - } - /* We need to set or do a bunch of work here based on previous results*/ - /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */ - ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE); - - /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */ - switch (readb(ti->mmio + AIP4MBDHB)) { - case 0xe: ti->dhb_size4mb = 4096; break; - case 0xd: ti->dhb_size4mb = 4464; break; - default: ti->dhb_size4mb = 2048; break; - } - - /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */ - switch (readb(ti->mmio + AIP16MBDHB)) { - case 0xe: ti->dhb_size16mb = 4096; break; - case 0xd: ti->dhb_size16mb = 8192; break; - case 0xc: ti->dhb_size16mb = 16384; break; - case 0xb: ti->dhb_size16mb = 17960; break; - default: ti->dhb_size16mb = 2048; break; - } - - /* We must figure out how much shared memory space this adapter - * will occupy so that if there are two adapters we can fit both - * in. Given a choice, we will limit this adapter to 32K. The - * maximum space will will use for two adapters is 64K so if the - * adapter we are working on demands 64K (it also doesn't support - * paging), then only one adapter can be supported. - */ - - /* - * determine how much of total RAM is mapped into PC space - */ - ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/ - 1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4); - ti->page_mask = 0; - if (ti->turbo) ti->page_mask=0xf0; - else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */ - else { -#ifdef ENABLE_PAGING - unsigned char pg_size = 0; - /* BMS: page size: PCMCIA, use configuration register; - ISAPNP, use LANAIDC config tool from www.ibm.com */ - switch (ti->shared_ram_paging) { - case 0xf: - break; - case 0xe: - ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0; - pg_size = 32; /* 16KB page size */ - break; - case 0xd: - ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0; - pg_size = 64; /* 32KB page size */ - break; - case 0xc: - switch (ti->mapped_ram_size) { - case 32: - ti->page_mask = 0xc0; - pg_size = 32; - break; - case 64: - ti->page_mask = 0x80; - pg_size = 64; - break; - } - break; - default: - DPRINTK("Unknown shared ram paging info %01X\n", - ti->shared_ram_paging); - iounmap(t_mmio); - return -ENODEV; - break; - } /*end switch shared_ram_paging */ - - if (ibmtr_debug_trace & TRC_INIT) - DPRINTK("Shared RAM paging code: %02X, " - "mapped RAM size: %dK, shared RAM size: %dK, " - "page mask: %02X\n:", - ti->shared_ram_paging, ti->mapped_ram_size / 2, - ti->avail_shared_ram / 2, ti->page_mask); -#endif /*ENABLE_PAGING */ - } - -#ifndef PCMCIA - /* finish figuring the shared RAM address */ - if (cardpresent == TR_ISA) { - static const __u32 ram_bndry_mask[] = { - 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 - }; - __u32 new_base, rrr_32, chk_base, rbm; - - rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03; - rbm = ram_bndry_mask[rrr_32]; - new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */ - chk_base = new_base + (ti->mapped_ram_size << 9); - if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) { - DPRINTK("Shared RAM for this adapter (%05x) exceeds " - "driver limit (%05x), adapter not started.\n", - chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); - iounmap(t_mmio); - return -ENODEV; - } else { /* seems cool, record what we have figured out */ - ti->sram_base = new_base >> 12; - ibmtr_mem_base = chk_base; - } - } - else ti->sram_base = ti->sram_phys >> 12; - - /* The PCMCIA has already got the interrupt line and the io port, - so no chance of anybody else getting it - MLP */ - if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) { - DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", - irq); - iounmap(t_mmio); - return -ENODEV; - } - /*?? Now, allocate some of the PIO PORTs for this driver.. */ - /* record PIOaddr range as busy */ - if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) { - DPRINTK("Could not grab PIO range. Halting driver.\n"); - free_irq(dev->irq, dev); - iounmap(t_mmio); - return -EBUSY; - } - - if (!version_printed++) { - printk(version); - } -#endif /* !PCMCIA */ - DPRINTK("%s %s found\n", - channel_def[cardpresent - 1], adapter_def(ti->adapter_type)); - DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n", - irq, PIOaddr, ti->mapped_ram_size / 2); - DPRINTK("Hardware address : %pM\n", dev->dev_addr); - if (ti->page_mask) - DPRINTK("Shared RAM paging enabled. " - "Page size: %uK Shared Ram size %dK\n", - ((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2); - else - DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n", - ti->page_mask); - - /* Calculate the maximum DHB we can use */ - /* two cases where avail_shared_ram doesn't equal mapped_ram_size: - 1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical) - 2. user has configured adapter for less than avail_shared_ram - but is not using paging (she should use paging, I believe) - */ - if (!ti->page_mask) { - ti->avail_shared_ram= - min(ti->mapped_ram_size,ti->avail_shared_ram); - } - - switch (ti->avail_shared_ram) { - case 16: /* 8KB shared RAM */ - ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048); - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=2; - ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048); - ti->rbuf_len16 = 1032; - ti->rbuf_cnt16=2; - break; - case 32: /* 16KB shared RAM */ - ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=4; - ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096); - ti->rbuf_len16 = 1032; /*1024 usable */ - ti->rbuf_cnt16=4; - break; - case 64: /* 32KB shared RAM */ - ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=6; - ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240); - ti->rbuf_len16 = 1032; - ti->rbuf_cnt16=6; - break; - case 127: /* 63.5KB shared RAM */ - ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=6; - ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384); - ti->rbuf_len16 = 1032; - ti->rbuf_cnt16=16; - break; - case 128: /* 64KB shared RAM */ - ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464); - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=6; - ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960); - ti->rbuf_len16 = 1032; - ti->rbuf_cnt16=16; - break; - default: - ti->dhb_size4mb = 2048; - ti->rbuf_len4 = 1032; - ti->rbuf_cnt4=2; - ti->dhb_size16mb = 2048; - ti->rbuf_len16 = 1032; - ti->rbuf_cnt16=2; - break; - } - /* this formula is not smart enough for the paging case - ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE - - ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH - - DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB - - SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>; - */ - ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN; - ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN; - /*BMS assuming 18 bytes of Routing Information (usually works) */ - DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n", - ti->maxmtu16, ti->maxmtu4); - - dev->base_addr = PIOaddr; /* set the value for device */ - dev->mem_start = ti->sram_base << 12; - dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1; - trdev_init(dev); - return 0; /* Return 0 to indicate we have found a Token Ring card. */ -} /*ibmtr_probe1() */ - -/*****************************************************************************/ - -/* query the adapter for the size of shared RAM */ -/* the function returns the RAM size in units of 512 bytes */ - -static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) -{ - unsigned char avail_sram_code; - static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 }; - /* Adapter gives - 'F' -- use RRR bits 3,2 - 'E' -- 8kb 'D' -- 16kb - 'C' -- 32kb 'A' -- 64KB - 'B' - 64KB less 512 bytes at top - (WARNING ... must zero top bytes in INIT */ - - avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM); - if (avail_sram_code) return size_code[avail_sram_code]; - else /* for code 'F', must compute size from RRR(3,2) bits */ - return 1 << - ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4); -} - -/*****************************************************************************/ - -static const struct net_device_ops trdev_netdev_ops = { - .ndo_open = tok_open, - .ndo_stop = tok_close, - .ndo_start_xmit = tok_send_packet, - .ndo_set_rx_mode = tok_set_multicast_list, - .ndo_change_mtu = ibmtr_change_mtu, -}; - -static int __devinit trdev_init(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - - SET_PAGE(ti->srb_page); - ti->open_failure = NO ; - dev->netdev_ops = &trdev_netdev_ops; - - return 0; -} - -/*****************************************************************************/ - -static int tok_init_card(struct net_device *dev) -{ - struct tok_info *ti; - short PIOaddr; - unsigned long i; - - PIOaddr = dev->base_addr; - ti = netdev_priv(dev); - /* Special processing for first interrupt after reset */ - ti->do_tok_int = FIRST_INT; - /* Reset adapter */ - writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); - outb(0, PIOaddr + ADAPTRESET); - - schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ - - outb(0, PIOaddr + ADAPTRESETREL); -#ifdef ENABLE_PAGING - if (ti->page_mask) - writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN); -#endif - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ); - return i? 0 : -EAGAIN; -} - -/*****************************************************************************/ -static int tok_open(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - int i; - - /*the case we were left in a failure state during a previous open */ - if (ti->open_failure == YES) { - DPRINTK("Last time you were disconnected, how about now?\n"); - printk("You can't insert with an ICS connector half-cocked.\n"); - } - - ti->open_status = CLOSED; /* CLOSED or OPEN */ - ti->sap_status = CLOSED; /* CLOSED or OPEN */ - ti->open_failure = NO; /* NO or YES */ - ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */ - - ti->sram_phys &= ~1; /* to reverse what we do in tok_close */ - /* init the spinlock */ - spin_lock_init(&ti->lock); - init_timer(&ti->tr_timer); - - i = tok_init_card(dev); - if (i) return i; - - while (1){ - tok_open_adapter((unsigned long) dev); - i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ); - /* sig catch: estimate opening adapter takes more than .5 sec*/ - if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */ - if (i==0) break; - if (ti->open_status == OPEN && ti->sap_status==OPEN) { - netif_start_queue(dev); - DPRINTK("Adapter is up and running\n"); - return 0; - } - i=schedule_timeout_interruptible(TR_RETRY_INTERVAL); - /* wait 30 seconds */ - if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */ - } - outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/ - DPRINTK("TERMINATED via signal\n"); /*BMS useful */ - return -EAGAIN; -} - -/*****************************************************************************/ - -#define COMMAND_OFST 0 -#define OPEN_OPTIONS_OFST 8 -#define NUM_RCV_BUF_OFST 24 -#define RCV_BUF_LEN_OFST 26 -#define DHB_LENGTH_OFST 28 -#define NUM_DHB_OFST 30 -#define DLC_MAX_SAP_OFST 32 -#define DLC_MAX_STA_OFST 33 - -static void tok_open_adapter(unsigned long dev_addr) -{ - struct net_device *dev = (struct net_device *) dev_addr; - struct tok_info *ti; - int i; - - ti = netdev_priv(dev); - SET_PAGE(ti->init_srb_page); - writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); - for (i = 0; i < sizeof(struct dir_open_adapter); i++) - writeb(0, ti->init_srb + i); - writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST); - writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST); - if (ti->ring_speed == 16) { - writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST); - writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST); - writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST); - } else { - writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST); - writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST); - writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST); - } - writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST); - writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST); - writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST); - ti->srb = ti->init_srb; /* We use this one in the interrupt handler */ - ti->srb_page = ti->init_srb_page; - DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n", - readb(ti->init_srb + NUM_DHB_OFST), - ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)), - ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)), - ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST))); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); -} - -/*****************************************************************************/ - -static void open_sap(unsigned char type, struct net_device *dev) -{ - int i; - struct tok_info *ti = netdev_priv(dev); - - SET_PAGE(ti->srb_page); - for (i = 0; i < sizeof(struct dlc_open_sap); i++) - writeb(0, ti->srb + i); - -#define MAX_I_FIELD_OFST 14 -#define SAP_VALUE_OFST 16 -#define SAP_OPTIONS_OFST 17 -#define STATION_COUNT_OFST 18 - - writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST); - writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST); - writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST); - writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST); - writeb(type, ti->srb + SAP_VALUE_OFST); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); -} - - -/*****************************************************************************/ - -static void tok_set_multicast_list(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - struct netdev_hw_addr *ha; - unsigned char address[4]; - - int i; - - /*BMS the next line is CRUCIAL or you may be sad when you */ - /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ - if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; - address[0] = address[1] = address[2] = address[3] = 0; - netdev_for_each_mc_addr(ha, dev) { - address[0] |= ha->addr[2]; - address[1] |= ha->addr[3]; - address[2] |= ha->addr[4]; - address[3] |= ha->addr[5]; - } - SET_PAGE(ti->srb_page); - for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) - writeb(0, ti->srb + i); - -#define FUNCT_ADDRESS_OFST 6 - - writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST); - for (i = 0; i < 4; i++) - writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); -#if TR_VERBOSE - DPRINTK("Setting functional address: "); - for (i=0;i<4;i++) printk("%02X ", address[i]); - printk("\n"); -#endif -} - -/*****************************************************************************/ - -#define STATION_ID_OFST 4 - -static netdev_tx_t tok_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct tok_info *ti; - unsigned long flags; - ti = netdev_priv(dev); - - netif_stop_queue(dev); - - /* lock against other CPUs */ - spin_lock_irqsave(&(ti->lock), flags); - - /* Save skb; we'll need it when the adapter asks for the data */ - ti->current_skb = skb; - SET_PAGE(ti->srb_page); - writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST); - writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - spin_unlock_irqrestore(&(ti->lock), flags); - return NETDEV_TX_OK; -} - -/*****************************************************************************/ - -static int tok_close(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - - /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */ - /* unloading the module from memory, and then if a timer pops, ouch */ - del_timer_sync(&ti->tr_timer); - outb(0, dev->base_addr + ADAPTRESET); - ti->sram_phys |= 1; - ti->open_status = CLOSED; - - netif_stop_queue(dev); - DPRINTK("Adapter is closed.\n"); - return 0; -} - -/*****************************************************************************/ - -#define RETCODE_OFST 2 -#define OPEN_ERROR_CODE_OFST 6 -#define ASB_ADDRESS_OFST 8 -#define SRB_ADDRESS_OFST 10 -#define ARB_ADDRESS_OFST 12 -#define SSB_ADDRESS_OFST 14 - -static char *printphase[]= {"Lobe media test","Physical insertion", - "Address verification","Roll call poll","Request Parameters"}; -static char *printerror[]={"Function failure","Signal loss","Reserved", - "Frequency error","Timeout","Ring failure","Ring beaconing", - "Duplicate node address", - "Parameter request-retry count exceeded","Remove received", - "IMPL force received","Duplicate modifier", - "No monitor detected","Monitor contention failed for RPL"}; - -static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page) -{ - if (ti->page_mask) { - *page = (index >> 8) & ti->page_mask; - index &= ~(ti->page_mask << 8); - } - return ti->sram_virt + index; -} - -static void dir_open_adapter (struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - unsigned char ret_code; - __u16 err; - - ti->srb = map_address(ti, - ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)), - &ti->srb_page); - ti->ssb = map_address(ti, - ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)), - &ti->ssb_page); - ti->arb = map_address(ti, - ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)), - &ti->arb_page); - ti->asb = map_address(ti, - ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)), - &ti->asb_page); - ti->current_skb = NULL; - ret_code = readb(ti->init_srb + RETCODE_OFST); - err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST)); - if (!ret_code) { - ti->open_status = OPEN; /* TR adapter is now available */ - if (ti->open_mode == AUTOMATIC) { - DPRINTK("Adapter reopened.\n"); - } - writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD); - open_sap(EXTENDED_SAP, dev); - return; - } - ti->open_failure = YES; - if (ret_code == 7){ - if (err == 0x24) { - if (!ti->auto_speedsave) { - DPRINTK("Open failed: Adapter speed must match " - "ring speed if Automatic Ring Speed Save is " - "disabled.\n"); - ti->open_action = FAIL; - }else - DPRINTK("Retrying open to adjust to " - "ring speed, "); - } else if (err == 0x2d) { - DPRINTK("Physical Insertion: No Monitor Detected, "); - printk("retrying after %ds delay...\n", - TR_RETRY_INTERVAL/HZ); - } else if (err == 0x11) { - DPRINTK("Lobe Media Function Failure (0x11), "); - printk(" retrying after %ds delay...\n", - TR_RETRY_INTERVAL/HZ); - } else { - char **prphase = printphase; - char **prerror = printerror; - int pnr = err / 16 - 1; - int enr = err % 16 - 1; - DPRINTK("TR Adapter misc open failure, error code = "); - if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) || - enr < 0 || - enr >= ARRAY_SIZE(printerror)) - printk("0x%x, invalid Phase/Error.", err); - else - printk("0x%x, Phase: %s, Error: %s\n", err, - prphase[pnr], prerror[enr]); - printk(" retrying after %ds delay...\n", - TR_RETRY_INTERVAL/HZ); - } - } else DPRINTK("open failed: ret_code = %02X..., ", ret_code); - if (ti->open_action != FAIL) { - if (ti->open_mode==AUTOMATIC){ - ti->open_action = REOPEN; - ibmtr_reset_timer(&(ti->tr_timer), dev); - return; - } - wake_up(&ti->wait_for_reset); - return; - } - DPRINTK("FAILURE, CAPUT\n"); -} - -/******************************************************************************/ - -static irqreturn_t tok_interrupt(int irq, void *dev_id) -{ - unsigned char status; - /* unsigned char status_even ; */ - struct tok_info *ti; - struct net_device *dev; -#ifdef ENABLE_PAGING - unsigned char save_srpr; -#endif - - dev = dev_id; -#if TR_VERBOSE - DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq); -#endif - ti = netdev_priv(dev); - if (ti->sram_phys & 1) - return IRQ_NONE; /* PCMCIA card extraction flag */ - spin_lock(&(ti->lock)); -#ifdef ENABLE_PAGING - save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); -#endif - - /* Disable interrupts till processing is finished */ - writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); - - /* Reset interrupt for ISA boards */ - if (ti->adapter_int_enable) - outb(0, ti->adapter_int_enable); - else /* used for PCMCIA cards */ - outb(0, ti->global_int_enable); - if (ti->do_tok_int == FIRST_INT){ - initial_tok_int(dev); -#ifdef ENABLE_PAGING - writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); -#endif - spin_unlock(&(ti->lock)); - return IRQ_HANDLED; - } - /* Begin interrupt handler HERE inline to avoid the extra - levels of logic and call depth for the original solution. */ - status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD); - /*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */ - /*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */ - /*BMS status,status_even); */ - - if (status & ADAP_CHK_INT) { - int i; - void __iomem *check_reason; - __u8 check_reason_page = 0; - check_reason = map_address(ti, - ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)), - &check_reason_page); - SET_PAGE(check_reason_page); - - DPRINTK("Adapter check interrupt\n"); - DPRINTK("8 reason bytes follow: "); - for (i = 0; i < 8; i++, check_reason++) - printk("%02X ", (int) readb(check_reason)); - printk("\n"); - writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); - status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN); - DPRINTK("ISRA_EVEN == 0x02%x\n",status); - ti->open_status = CLOSED; - ti->sap_status = CLOSED; - ti->open_mode = AUTOMATIC; - netif_carrier_off(dev); - netif_stop_queue(dev); - ti->open_action = RESTART; - outb(0, dev->base_addr + ADAPTRESET); - ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/ - spin_unlock(&(ti->lock)); - return IRQ_HANDLED; - } - if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) - & (TCR_INT | ERR_INT | ACCESS_INT)) { - DPRINTK("adapter error: ISRP_EVEN : %02x\n", - (int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN)); - writeb(~(TCR_INT | ERR_INT | ACCESS_INT), - ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); - status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/ - DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/ - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); -#ifdef ENABLE_PAGING - writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); -#endif - spin_unlock(&(ti->lock)); - return IRQ_HANDLED; - } - if (status & SRB_RESP_INT) { /* SRB response */ - SET_PAGE(ti->srb_page); -#if TR_VERBOSE - DPRINTK("SRB resp: cmd=%02X rsp=%02X\n", - readb(ti->srb), readb(ti->srb + RETCODE_OFST)); -#endif - switch (readb(ti->srb)) { /* SRB command check */ - case XMIT_DIR_FRAME:{ - unsigned char xmit_ret_code; - xmit_ret_code = readb(ti->srb + RETCODE_OFST); - if (xmit_ret_code == 0xff) break; - DPRINTK("error on xmit_dir_frame request: %02X\n", - xmit_ret_code); - if (ti->current_skb) { - dev_kfree_skb_irq(ti->current_skb); - ti->current_skb = NULL; - } - /*dev->tbusy = 0;*/ - netif_wake_queue(dev); - if (ti->readlog_pending) - ibmtr_readlog(dev); - break; - } - case XMIT_UI_FRAME:{ - unsigned char xmit_ret_code; - - xmit_ret_code = readb(ti->srb + RETCODE_OFST); - if (xmit_ret_code == 0xff) break; - DPRINTK("error on xmit_ui_frame request: %02X\n", - xmit_ret_code); - if (ti->current_skb) { - dev_kfree_skb_irq(ti->current_skb); - ti->current_skb = NULL; - } - netif_wake_queue(dev); - if (ti->readlog_pending) - ibmtr_readlog(dev); - break; - } - case DIR_OPEN_ADAPTER: - dir_open_adapter(dev); - break; - case DLC_OPEN_SAP: - if (readb(ti->srb + RETCODE_OFST)) { - DPRINTK("open_sap failed: ret_code = %02X, " - "retrying\n", - (int) readb(ti->srb + RETCODE_OFST)); - ti->open_action = REOPEN; - ibmtr_reset_timer(&(ti->tr_timer), dev); - break; - } - ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST); - ti->sap_status = OPEN;/* TR adapter is now available */ - if (ti->open_mode==MANUAL){ - wake_up(&ti->wait_for_reset); - break; - } - netif_wake_queue(dev); - netif_carrier_on(dev); - break; - case DIR_INTERRUPT: - case DIR_MOD_OPEN_PARAMS: - case DIR_SET_GRP_ADDR: - case DIR_SET_FUNC_ADDR: - case DLC_CLOSE_SAP: - if (readb(ti->srb + RETCODE_OFST)) - DPRINTK("error on %02X: %02X\n", - (int) readb(ti->srb + COMMAND_OFST), - (int) readb(ti->srb + RETCODE_OFST)); - break; - case DIR_READ_LOG: - if (readb(ti->srb + RETCODE_OFST)){ - DPRINTK("error on dir_read_log: %02X\n", - (int) readb(ti->srb + RETCODE_OFST)); - netif_wake_queue(dev); - break; - } -#if IBMTR_DEBUG_MESSAGES - -#define LINE_ERRORS_OFST 0 -#define INTERNAL_ERRORS_OFST 1 -#define BURST_ERRORS_OFST 2 -#define AC_ERRORS_OFST 3 -#define ABORT_DELIMITERS_OFST 4 -#define LOST_FRAMES_OFST 6 -#define RECV_CONGEST_COUNT_OFST 7 -#define FRAME_COPIED_ERRORS_OFST 8 -#define FREQUENCY_ERRORS_OFST 9 -#define TOKEN_ERRORS_OFST 10 - - DPRINTK("Line errors %02X, Internal errors %02X, " - "Burst errors %02X\n" "A/C errors %02X, " - "Abort delimiters %02X, Lost frames %02X\n" - "Receive congestion count %02X, " - "Frame copied errors %02X\nFrequency errors %02X, " - "Token errors %02X\n", - (int) readb(ti->srb + LINE_ERRORS_OFST), - (int) readb(ti->srb + INTERNAL_ERRORS_OFST), - (int) readb(ti->srb + BURST_ERRORS_OFST), - (int) readb(ti->srb + AC_ERRORS_OFST), - (int) readb(ti->srb + ABORT_DELIMITERS_OFST), - (int) readb(ti->srb + LOST_FRAMES_OFST), - (int) readb(ti->srb + RECV_CONGEST_COUNT_OFST), - (int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST), - (int) readb(ti->srb + FREQUENCY_ERRORS_OFST), - (int) readb(ti->srb + TOKEN_ERRORS_OFST)); -#endif - netif_wake_queue(dev); - break; - default: - DPRINTK("Unknown command %02X encountered\n", - (int) readb(ti->srb)); - } /* end switch SRB command check */ - writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); - } /* if SRB response */ - if (status & ASB_FREE_INT) { /* ASB response */ - SET_PAGE(ti->asb_page); -#if TR_VERBOSE - DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb)); -#endif - - switch (readb(ti->asb)) { /* ASB command check */ - case REC_DATA: - case XMIT_UI_FRAME: - case XMIT_DIR_FRAME: - break; - default: - DPRINTK("unknown command in asb %02X\n", - (int) readb(ti->asb)); - } /* switch ASB command check */ - if (readb(ti->asb + 2) != 0xff) /* checks ret_code */ - DPRINTK("ASB error %02X in cmd %02X\n", - (int) readb(ti->asb + 2), (int) readb(ti->asb)); - writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); - } /* if ASB response */ - -#define STATUS_OFST 6 -#define NETW_STATUS_OFST 6 - - if (status & ARB_CMD_INT) { /* ARB response */ - SET_PAGE(ti->arb_page); -#if TR_VERBOSE - DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb)); -#endif - - switch (readb(ti->arb)) { /* ARB command check */ - case DLC_STATUS: - DPRINTK("DLC_STATUS new status: %02X on station %02X\n", - ntohs(readw(ti->arb + STATUS_OFST)), - ntohs(readw(ti->arb+ STATION_ID_OFST))); - break; - case REC_DATA: - tr_rx(dev); - break; - case RING_STAT_CHANGE:{ - unsigned short ring_status; - ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST)); - if (ibmtr_debug_trace & TRC_INIT) - DPRINTK("Ring Status Change...(0x%x)\n", - ring_status); - if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){ - netif_stop_queue(dev); - netif_carrier_off(dev); - DPRINTK("Remove received, or Auto-removal error" - ", or Lobe fault\n"); - DPRINTK("We'll try to reopen the closed adapter" - " after a %d second delay.\n", - TR_RETRY_INTERVAL/HZ); - /*I was confused: I saw the TR reopening but */ - /*forgot:with an RJ45 in an RJ45/ICS adapter */ - /*but adapter not in the ring, the TR will */ - /* open, and then soon close and come here. */ - ti->open_mode = AUTOMATIC; - ti->open_status = CLOSED; /*12/2000 BMS*/ - ti->open_action = REOPEN; - ibmtr_reset_timer(&(ti->tr_timer), dev); - } else if (ring_status & LOG_OVERFLOW) { - if(netif_queue_stopped(dev)) - ti->readlog_pending = 1; - else - ibmtr_readlog(dev); - } - break; - } - case XMIT_DATA_REQ: - tr_tx(dev); - break; - default: - DPRINTK("Unknown command %02X in arb\n", - (int) readb(ti->arb)); - break; - } /* switch ARB command check */ - writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD); - writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - } /* if ARB response */ - if (status & SSB_RESP_INT) { /* SSB response */ - unsigned char retcode; - SET_PAGE(ti->ssb_page); -#if TR_VERBOSE - DPRINTK("SSB resp: cmd=%02X rsp=%02X\n", - readb(ti->ssb), readb(ti->ssb + 2)); -#endif - - switch (readb(ti->ssb)) { /* SSB command check */ - case XMIT_DIR_FRAME: - case XMIT_UI_FRAME: - retcode = readb(ti->ssb + 2); - if (retcode && (retcode != 0x22))/* checks ret_code */ - DPRINTK("xmit ret_code: %02X xmit error code: " - "%02X\n", - (int)retcode, (int)readb(ti->ssb + 6)); - else - dev->stats.tx_packets++; - break; - case XMIT_XID_CMD: - DPRINTK("xmit xid ret_code: %02X\n", - (int) readb(ti->ssb + 2)); - default: - DPRINTK("Unknown command %02X in ssb\n", - (int) readb(ti->ssb)); - } /* SSB command check */ - writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD); - writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - } /* if SSB response */ -#ifdef ENABLE_PAGING - writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); -#endif - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - spin_unlock(&(ti->lock)); - return IRQ_HANDLED; -} /*tok_interrupt */ - -/*****************************************************************************/ - -#define INIT_STATUS_OFST 1 -#define INIT_STATUS_2_OFST 2 -#define ENCODED_ADDRESS_OFST 8 - -static void initial_tok_int(struct net_device *dev) -{ - - __u32 encoded_addr, hw_encoded_addr; - struct tok_info *ti; - unsigned char init_status; /*BMS 12/2000*/ - - ti = netdev_priv(dev); - - ti->do_tok_int = NOT_FIRST; - - /* we assign the shared-ram address for ISA devices */ - writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN); -#ifndef PCMCIA - ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram); -#endif - ti->init_srb = map_address(ti, - ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)), - &ti->init_srb_page); - if (ti->page_mask && ti->avail_shared_ram == 127) { - void __iomem *last_512; - __u8 last_512_page=0; - int i; - last_512 = map_address(ti, 0xfe00, &last_512_page); - /* initialize high section of ram (if necessary) */ - SET_PAGE(last_512_page); - for (i = 0; i < 512; i++) - writeb(0, last_512 + i); - } - SET_PAGE(ti->init_srb_page); - -#if TR_VERBOSE - { - int i; - - DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page); - DPRINTK("init_srb(%p):", ti->init_srb ); - for (i = 0; i < 20; i++) - printk("%02X ", (int) readb(ti->init_srb + i)); - printk("\n"); - } -#endif - - hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST); - encoded_addr = ntohs(hw_encoded_addr); - init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/ - readb(ti->init_srb+offsetof(struct srb_init_response,init_status)); - /*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/ - ti->ring_speed = init_status & 0x01 ? 16 : 4; - DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n", - ti->ring_speed, (unsigned int)dev->mem_start); - ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0; - - if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset); - else tok_open_adapter((unsigned long)dev); - -} /*initial_tok_int() */ - -/*****************************************************************************/ - -#define CMD_CORRELATE_OFST 1 -#define DHB_ADDRESS_OFST 6 - -#define FRAME_LENGTH_OFST 6 -#define HEADER_LENGTH_OFST 8 -#define RSAP_VALUE_OFST 9 - -static void tr_tx(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data; - unsigned int hdr_len; - __u32 dhb=0,dhb_base; - void __iomem *dhbuf = NULL; - unsigned char xmit_command; - int i,dhb_len=0x4000,src_len,src_offset; - struct trllc *llc; - struct srb_xmit xsrb; - __u8 dhb_page = 0; - __u8 llc_ssap; - - SET_PAGE(ti->asb_page); - - if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n"); - - /* in providing the transmit interrupts, is telling us it is ready for - data and providing a shared memory address for us to stuff with data. - Here we compute the effective address where we will place data. - */ - SET_PAGE(ti->arb_page); - dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST)); - if (ti->page_mask) { - dhb_page = (dhb_base >> 8) & ti->page_mask; - dhb=dhb_base & ~(ti->page_mask << 8); - } - dhbuf = ti->sram_virt + dhb; - - /* Figure out the size of the 802.5 header */ - if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */ - hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN; - else - hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8) - + sizeof(struct trh_hdr) - TR_MAXRIFLEN; - - llc = (struct trllc *) (ti->current_skb->data + hdr_len); - - llc_ssap = llc->ssap; - SET_PAGE(ti->srb_page); - memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb)); - SET_PAGE(ti->asb_page); - xmit_command = xsrb.command; - - writeb(xmit_command, ti->asb + COMMAND_OFST); - writew(xsrb.station_id, ti->asb + STATION_ID_OFST); - writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST); - writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST); - writeb(0, ti->asb + RETCODE_OFST); - if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) { - writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST); - writeb(0x0e, ti->asb + HEADER_LENGTH_OFST); - SET_PAGE(dhb_page); - writeb(AC, dhbuf); - writeb(LLC_FRAME, dhbuf + 1); - for (i = 0; i < TR_ALEN; i++) - writeb((int) 0x0FF, dhbuf + i + 2); - for (i = 0; i < TR_ALEN; i++) - writeb(0, dhbuf + i + TR_ALEN + 2); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - return; - } - /* - * the token ring packet is copied from sk_buff to the adapter - * buffer identified in the command data received with the interrupt. - */ - writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST); - writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST); - src_len=ti->current_skb->len; - src_offset=0; - dhb=dhb_base; - while(1) { - if (ti->page_mask) { - dhb_page=(dhb >> 8) & ti->page_mask; - dhb=dhb & ~(ti->page_mask << 8); - dhb_len=0x4000-dhb; /* remaining size of this page */ - } - dhbuf = ti->sram_virt + dhb; - SET_PAGE(dhb_page); - if (src_len > dhb_len) { - memcpy_toio(dhbuf,&ti->current_skb->data[src_offset], - dhb_len); - src_len -= dhb_len; - src_offset += dhb_len; - dhb_base+=dhb_len; - dhb=dhb_base; - continue; - } - memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len); - break; - } - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - dev->stats.tx_bytes += ti->current_skb->len; - dev_kfree_skb_irq(ti->current_skb); - ti->current_skb = NULL; - netif_wake_queue(dev); - if (ti->readlog_pending) - ibmtr_readlog(dev); -} /*tr_tx */ - -/*****************************************************************************/ - - -#define RECEIVE_BUFFER_OFST 6 -#define LAN_HDR_LENGTH_OFST 8 -#define DLC_HDR_LENGTH_OFST 9 - -#define DSAP_OFST 0 -#define SSAP_OFST 1 -#define LLC_OFST 2 -#define PROTID_OFST 3 -#define ETHERTYPE_OFST 6 - -static void tr_rx(struct net_device *dev) -{ - struct tok_info *ti = netdev_priv(dev); - __u32 rbuffer; - void __iomem *rbuf, *rbufdata, *llc; - __u8 rbuffer_page = 0; - unsigned char *data; - unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length; - unsigned char dlc_hdr_len; - struct sk_buff *skb; - unsigned int skb_size = 0; - int IPv4_p = 0; - unsigned int chksum = 0; - struct iphdr *iph; - struct arb_rec_req rarb; - - SET_PAGE(ti->arb_page); - memcpy_fromio(&rarb, ti->arb, sizeof(rarb)); - rbuffer = ntohs(rarb.rec_buf_addr) ; - rbuf = map_address(ti, rbuffer, &rbuffer_page); - - SET_PAGE(ti->asb_page); - - if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n"); - - writeb(REC_DATA, ti->asb + COMMAND_OFST); - writew(rarb.station_id, ti->asb + STATION_ID_OFST); - writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST); - - lan_hdr_len = rarb.lan_hdr_len; - if (lan_hdr_len > sizeof(struct trh_hdr)) { - DPRINTK("Linux cannot handle greater than 18 bytes RIF\n"); - return; - } /*BMS I added this above just to be very safe */ - dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST); - hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr); - - SET_PAGE(rbuffer_page); - llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len; - -#if TR_VERBOSE - DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n", - (__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len); - DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n", - llc, ntohs(rarb.rec_buf_addr), dev->mem_start); - DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, " - "ethertype: %04X\n", - (int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST), - (int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST), - (int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2), - (int) ntohs(readw(llc + ETHERTYPE_OFST))); -#endif - if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { - SET_PAGE(ti->asb_page); - writeb(DATA_LOST, ti->asb + RETCODE_OFST); - dev->stats.rx_dropped++; - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - return; - } - length = ntohs(rarb.frame_len); - if (readb(llc + DSAP_OFST) == EXTENDED_SAP && - readb(llc + SSAP_OFST) == EXTENDED_SAP && - length >= hdr_len) IPv4_p = 1; -#if TR_VERBOSE -#define SADDR_OFST 8 -#define DADDR_OFST 2 - - if (!IPv4_p) { - - void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data); - u8 saddr[6]; - u8 daddr[6]; - int i; - for (i = 0 ; i < 6 ; i++) - saddr[i] = readb(trhhdr + SADDR_OFST + i); - for (i = 0 ; i < 6 ; i++) - daddr[i] = readb(trhhdr + DADDR_OFST + i); - DPRINTK("Probably non-IP frame received.\n"); - DPRINTK("ssap: %02X dsap: %02X " - "saddr: %pM daddr: %pM\n", - readb(llc + SSAP_OFST), readb(llc + DSAP_OFST), - saddr, daddr); - } -#endif - - /*BMS handle the case she comes in with few hops but leaves with many */ - skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc); - - if (!(skb = dev_alloc_skb(skb_size))) { - DPRINTK("out of memory. frame dropped.\n"); - dev->stats.rx_dropped++; - SET_PAGE(ti->asb_page); - writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - return; - } - /*BMS again, if she comes in with few but leaves with many */ - skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); - skb_put(skb, length); - data = skb->data; - rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); - rbufdata = rbuf + offsetof(struct rec_buf, data); - - if (IPv4_p) { - /* Copy the headers without checksumming */ - memcpy_fromio(data, rbufdata, hdr_len); - - /* Watch for padded packets and bogons */ - iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc)); - ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr); - length -= hdr_len; - if ((ip_len <= length) && (ip_len > 7)) - length = ip_len; - data += hdr_len; - rbuffer_len -= hdr_len; - rbufdata += hdr_len; - } - /* Copy the payload... */ -#define BUFFER_POINTER_OFST 2 -#define BUFFER_LENGTH_OFST 6 - for (;;) { - if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len) - DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n", - length,rbuffer_len); - if (IPv4_p) - chksum=csum_partial_copy_nocheck((void*)rbufdata, - data,length<rbuffer_len?length:rbuffer_len,chksum); - else - memcpy_fromio(data, rbufdata, rbuffer_len); - rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ; - if (!rbuffer) - break; - rbuffer -= 2; - length -= rbuffer_len; - data += rbuffer_len; - rbuf = map_address(ti, rbuffer, &rbuffer_page); - SET_PAGE(rbuffer_page); - rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST)); - rbufdata = rbuf + offsetof(struct rec_buf, data); - } - - SET_PAGE(ti->asb_page); - writeb(0, ti->asb + offsetof(struct asb_rec, ret_code)); - - writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - - dev->stats.rx_bytes += skb->len; - dev->stats.rx_packets++; - - skb->protocol = tr_type_trans(skb, dev); - if (IPv4_p) { - skb->csum = chksum; - skb->ip_summed = CHECKSUM_COMPLETE; - } - netif_rx(skb); -} /*tr_rx */ - -/*****************************************************************************/ - -static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev) -{ - tmr->expires = jiffies + TR_RETRY_INTERVAL; - tmr->data = (unsigned long) dev; - tmr->function = tok_rerun; - init_timer(tmr); - add_timer(tmr); -} - -/*****************************************************************************/ - -static void tok_rerun(unsigned long dev_addr) -{ - struct net_device *dev = (struct net_device *)dev_addr; - struct tok_info *ti = netdev_priv(dev); - - if ( ti->open_action == RESTART){ - ti->do_tok_int = FIRST_INT; - outb(0, dev->base_addr + ADAPTRESETREL); -#ifdef ENABLE_PAGING - if (ti->page_mask) - writeb(SRPR_ENABLE_PAGING, - ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN); -#endif - - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - } else - tok_open_adapter(dev_addr); -} - -/*****************************************************************************/ - -static void ibmtr_readlog(struct net_device *dev) -{ - struct tok_info *ti; - - ti = netdev_priv(dev); - - ti->readlog_pending = 0; - SET_PAGE(ti->srb_page); - writeb(DIR_READ_LOG, ti->srb); - writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN); - writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - - netif_stop_queue(dev); - -} - -/*****************************************************************************/ - -static int ibmtr_change_mtu(struct net_device *dev, int mtu) -{ - struct tok_info *ti = netdev_priv(dev); - - if (ti->ring_speed == 16 && mtu > ti->maxmtu16) - return -EINVAL; - if (ti->ring_speed == 4 && mtu > ti->maxmtu4) - return -EINVAL; - dev->mtu = mtu; - return 0; -} - -/*****************************************************************************/ -#ifdef MODULE - -/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */ -static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS]; -static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 }; -static int irq[IBMTR_MAX_ADAPTERS]; -static int mem[IBMTR_MAX_ADAPTERS]; - -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); - -static int __init ibmtr_init(void) -{ - int i; - int count=0; - - find_turbo_adapters(io); - - for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) { - struct net_device *dev; - irq[i] = 0; - mem[i] = 0; - dev = alloc_trdev(sizeof(struct tok_info)); - if (dev == NULL) { - if (i == 0) - return -ENOMEM; - break; - } - dev->base_addr = io[i]; - dev->irq = irq[i]; - dev->mem_start = mem[i]; - - if (ibmtr_probe_card(dev)) { - free_netdev(dev); - continue; - } - dev_ibmtr[i] = dev; - count++; - } - if (count) return 0; - printk("ibmtr: register_netdev() returned non-zero.\n"); - return -EIO; -} -module_init(ibmtr_init); - -static void __exit ibmtr_cleanup(void) -{ - int i; - - for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){ - if (!dev_ibmtr[i]) - continue; - unregister_netdev(dev_ibmtr[i]); - ibmtr_cleanup_card(dev_ibmtr[i]); - free_netdev(dev_ibmtr[i]); - } -} -module_exit(ibmtr_cleanup); -#endif diff --git a/drivers/net/tokenring/ibmtr_cs.c b/drivers/net/tokenring/ibmtr_cs.c deleted file mode 100644 index 356e28e4881..00000000000 --- a/drivers/net/tokenring/ibmtr_cs.c +++ /dev/null @@ -1,370 +0,0 @@ -/*====================================================================== - - A PCMCIA token-ring driver for IBM-based cards - - This driver supports the IBM PCMCIA Token-Ring Card. - Written by Steve Kipisz, kipisz@vnet.ibm.com or - bungy@ibm.net - - Written 1995,1996. - - This code is based on pcnet_cs.c from David Hinds. - - V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com - - Linux V2.2.x presented significant changes to the underlying - ibmtr.c code. Mainly the code became a lot more organized and - modular. - - This caused the old PCMCIA Token Ring driver to give up and go - home early. Instead of just patching the old code to make it - work, the PCMCIA code has been streamlined, updated and possibly - improved. - - This code now only contains code required for the Card Services. - All we do here is set the card up enough so that the real ibmtr.c - driver can find it and work with it properly. - - i.e. We set up the io port, irq, mmio memory and shared ram - memory. This enables ibmtr_probe in ibmtr.c to find the card and - configure it as though it was a normal ISA and/or PnP card. - - CHANGES - - v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com) - Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c - - v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com) - Updated to version 2.2.7 to match the first version of the kernel - that the modification to ibmtr.c were incorporated into. - - v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com) - Address translation feature of PCMCIA controller is usable so - memory windows can be placed in High memory (meaning above - 0xFFFFF.) - -======================================================================*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/ibmtr.h> - -#include <pcmcia/cistpl.h> -#include <pcmcia/ds.h> - -#include <asm/uaccess.h> -#include <asm/io.h> - -#define PCMCIA -#include "ibmtr.c" - - -/*====================================================================*/ - -/* Parameters that can be set with 'insmod' */ - -/* MMIO base address */ -static u_long mmiobase = 0xce000; - -/* SRAM base address */ -static u_long srambase = 0xd0000; - -/* SRAM size 8,16,32,64 */ -static u_long sramsize = 64; - -/* Ringspeed 4,16 */ -static int ringspeed = 16; - -module_param(mmiobase, ulong, 0); -module_param(srambase, ulong, 0); -module_param(sramsize, ulong, 0); -module_param(ringspeed, int, 0); -MODULE_LICENSE("GPL"); - -/*====================================================================*/ - -static int ibmtr_config(struct pcmcia_device *link); -static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase); -static void ibmtr_release(struct pcmcia_device *link); -static void ibmtr_detach(struct pcmcia_device *p_dev); - -/*====================================================================*/ - -typedef struct ibmtr_dev_t { - struct pcmcia_device *p_dev; - struct net_device *dev; - struct tok_info *ti; -} ibmtr_dev_t; - -static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { - ibmtr_dev_t *info = dev_id; - struct net_device *dev = info->dev; - return tok_interrupt(irq, dev); -}; - -static int __devinit ibmtr_attach(struct pcmcia_device *link) -{ - ibmtr_dev_t *info; - struct net_device *dev; - - dev_dbg(&link->dev, "ibmtr_attach()\n"); - - /* Create new token-ring device */ - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) return -ENOMEM; - dev = alloc_trdev(sizeof(struct tok_info)); - if (!dev) { - kfree(info); - return -ENOMEM; - } - - info->p_dev = link; - link->priv = info; - info->ti = netdev_priv(dev); - - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 4; - link->config_flags |= CONF_ENABLE_IRQ; - link->config_regs = PRESENT_OPTION; - - info->dev = dev; - - return ibmtr_config(link); -} /* ibmtr_attach */ - -static void ibmtr_detach(struct pcmcia_device *link) -{ - struct ibmtr_dev_t *info = link->priv; - struct net_device *dev = info->dev; - struct tok_info *ti = netdev_priv(dev); - - dev_dbg(&link->dev, "ibmtr_detach\n"); - - /* - * When the card removal interrupt hits tok_interrupt(), - * bail out early, so we don't crash the machine - */ - ti->sram_phys |= 1; - - unregister_netdev(dev); - - del_timer_sync(&(ti->tr_timer)); - - ibmtr_release(link); - - free_netdev(dev); - kfree(info); -} /* ibmtr_detach */ - -static int __devinit ibmtr_config(struct pcmcia_device *link) -{ - ibmtr_dev_t *info = link->priv; - struct net_device *dev = info->dev; - struct tok_info *ti = netdev_priv(dev); - int i, ret; - - dev_dbg(&link->dev, "ibmtr_config\n"); - - link->io_lines = 16; - link->config_index = 0x61; - - /* Determine if this is PRIMARY or ALTERNATE. */ - - /* Try PRIMARY card at 0xA20-0xA23 */ - link->resource[0]->start = 0xA20; - i = pcmcia_request_io(link); - if (i != 0) { - /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ - link->resource[0]->start = 0xA24; - ret = pcmcia_request_io(link); - if (ret) - goto failed; - } - dev->base_addr = link->resource[0]->start; - - ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt); - if (ret) - goto failed; - dev->irq = link->irq; - ti->irq = link->irq; - ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); - - /* Allocate the MMIO memory window */ - link->resource[2]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - link->resource[2]->flags |= WIN_USE_WAIT; - link->resource[2]->start = 0; - link->resource[2]->end = 0x2000; - ret = pcmcia_request_window(link, link->resource[2], 250); - if (ret) - goto failed; - - ret = pcmcia_map_mem_page(link, link->resource[2], mmiobase); - if (ret) - goto failed; - ti->mmio = ioremap(link->resource[2]->start, - resource_size(link->resource[2])); - - /* Allocate the SRAM memory window */ - link->resource[3]->flags = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - link->resource[3]->flags |= WIN_USE_WAIT; - link->resource[3]->start = 0; - link->resource[3]->end = sramsize * 1024; - ret = pcmcia_request_window(link, link->resource[3], 250); - if (ret) - goto failed; - - ret = pcmcia_map_mem_page(link, link->resource[3], srambase); - if (ret) - goto failed; - - ti->sram_base = srambase >> 12; - ti->sram_virt = ioremap(link->resource[3]->start, - resource_size(link->resource[3])); - ti->sram_phys = link->resource[3]->start; - - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - /* Set up the Token-Ring Controller Configuration Register and - turn on the card. Check the "Local Area Network Credit Card - Adapters Technical Reference" SC30-3585 for this info. */ - ibmtr_hw_setup(dev, mmiobase); - - SET_NETDEV_DEV(dev, &link->dev); - - i = ibmtr_probe_card(dev); - if (i != 0) { - pr_notice("register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", - dev->base_addr, dev->irq, - (u_long)ti->mmio, (u_long)(ti->sram_base << 12), - dev->dev_addr); - return 0; - -failed: - ibmtr_release(link); - return -ENODEV; -} /* ibmtr_config */ - -static void ibmtr_release(struct pcmcia_device *link) -{ - ibmtr_dev_t *info = link->priv; - struct net_device *dev = info->dev; - - dev_dbg(&link->dev, "ibmtr_release\n"); - - if (link->resource[2]->end) { - struct tok_info *ti = netdev_priv(dev); - iounmap(ti->mmio); - } - pcmcia_disable_device(link); -} - -static int ibmtr_suspend(struct pcmcia_device *link) -{ - ibmtr_dev_t *info = link->priv; - struct net_device *dev = info->dev; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int __devinit ibmtr_resume(struct pcmcia_device *link) -{ - ibmtr_dev_t *info = link->priv; - struct net_device *dev = info->dev; - - if (link->open) { - ibmtr_probe(dev); /* really? */ - netif_device_attach(dev); - } - - return 0; -} - - -/*====================================================================*/ - -static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase) -{ - int i; - - /* Bizarre IBM behavior, there are 16 bits of information we - need to set, but the card only allows us to send 4 bits at a - time. For each byte sent to base_addr, bits 7-4 tell the - card which part of the 16 bits we are setting, bits 3-0 contain - the actual information */ - - /* First nibble provides 4 bits of mmio */ - i = (mmiobase >> 16) & 0x0F; - outb(i, dev->base_addr); - - /* Second nibble provides 3 bits of mmio */ - i = 0x10 | ((mmiobase >> 12) & 0x0E); - outb(i, dev->base_addr); - - /* Third nibble, hard-coded values */ - i = 0x26; - outb(i, dev->base_addr); - - /* Fourth nibble sets shared ram page size */ - - /* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */ - i = (sramsize >> 4) & 0x07; - i = ((i == 4) ? 3 : i) << 2; - i |= 0x30; - - if (ringspeed == 16) - i |= 2; - if (dev->base_addr == 0xA24) - i |= 1; - outb(i, dev->base_addr); - - /* 0x40 will release the card for use */ - outb(0x40, dev->base_addr); -} - -static const struct pcmcia_device_id ibmtr_ids[] = { - PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e), - PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids); - -static struct pcmcia_driver ibmtr_cs_driver = { - .owner = THIS_MODULE, - .name = "ibmtr_cs", - .probe = ibmtr_attach, - .remove = ibmtr_detach, - .id_table = ibmtr_ids, - .suspend = ibmtr_suspend, - .resume = ibmtr_resume, -}; - -static int __init init_ibmtr_cs(void) -{ - return pcmcia_register_driver(&ibmtr_cs_driver); -} - -static void __exit exit_ibmtr_cs(void) -{ - pcmcia_unregister_driver(&ibmtr_cs_driver); -} - -module_init(init_ibmtr_cs); -module_exit(exit_ibmtr_cs); diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c deleted file mode 100644 index 3e4b4f09111..00000000000 --- a/drivers/net/tokenring/lanstreamer.c +++ /dev/null @@ -1,1917 +0,0 @@ -/* - * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter - * - * Written By: Mike Sullivan, IBM Corporation - * - * Copyright (C) 1999 IBM Corporation - * - * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC - * chipset. - * - * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic - * chipsets) written by: - * 1999 Peter De Schrijver All Rights Reserved - * 1999 Mike Phillips (phillim@amtrak.com) - * - * Base Driver Skeleton: - * Written 1993-94 by Donald Becker. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * NO WARRANTY - * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT - * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is - * solely responsible for determining the appropriateness of using and - * distributing the Program and assumes all risks associated with its - * exercise of rights under this Agreement, including but not limited to - * the risks and costs of program errors, damage to or loss of data, - * programs or equipment, and unavailability or interruption of operations. - * - * DISCLAIMER OF LIABILITY - * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED - * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * - * 12/10/99 - Alpha Release 0.1.0 - * First release to the public - * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing - * malloc free checks, reviewed code. <alan@redhat.com> - * 03/13/00 - Added spinlocks for smp - * 03/08/01 - Added support for module_init() and module_exit() - * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue - * calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com> - * 11/05/01 - Restructured the interrupt function, added delays, reduced the - * the number of TX descriptors to 1, which together can prevent - * the card from locking up the box - <yoder1@us.ibm.com> - * 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com> - * 11/13/02 - Removed free_irq calls which could cause a hang, added - * netif_carrier_{on|off} - <yoder1@us.ibm.com> - * - * To Do: - * - * - * If Problems do Occur - * Most problems can be rectified by either closing and opening the interface - * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult - * if compiled into the kernel). - */ - -/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */ - -#define STREAMER_DEBUG 0 -#define STREAMER_DEBUG_PACKETS 0 - -/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel. - * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the - * kernel. - * Intended to be used to create a ring-error reporting network module - * i.e. it will give you the source address of beaconers on the ring - */ - -#define STREAMER_NETWORK_MONITOR 0 - -/* #define CONFIG_PROC_FS */ - -/* - * Allow or disallow ioctl's for debugging - */ - -#define STREAMER_IOCTL 0 - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/in.h> -#include <linux/ioport.h> -#include <linux/string.h> -#include <linux/proc_fs.h> -#include <linux/ptrace.h> -#include <linux/skbuff.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/stddef.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/spinlock.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> -#include <linux/slab.h> - -#include <net/net_namespace.h> -#include <net/checksum.h> - -#include <asm/io.h> - -#include "lanstreamer.h" - -#if (BITS_PER_LONG == 64) -#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int -#endif - - -/* I've got to put some intelligence into the version number so that Peter and I know - * which version of the code somebody has got. - * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author. - * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike - * - * Official releases will only have an a.b.c version number format. - */ - -static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n" - " v0.5.3 11/13/02 - Kent Yoder"; - -static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = { - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,}, - {} /* terminating entry */ -}; -MODULE_DEVICE_TABLE(pci,streamer_pci_tbl); - - -static char *open_maj_error[] = { - "No error", "Lobe Media Test", "Physical Insertion", - "Address Verification", "Neighbor Notification (Ring Poll)", - "Request Parameters", "FDX Registration Request", - "FDX Lobe Media Test", "FDX Duplicate Address Check", - "Unknown stage" -}; - -static char *open_min_error[] = { - "No error", "Function Failure", "Signal Lost", "Wire Fault", - "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing", - "Duplicate Node Address", "Request Parameters", "Remove Received", - "Reserved", "Reserved", "No Monitor Detected for RPL", - "Monitor Contention failer for RPL", "FDX Protocol Error" -}; - -/* Module parameters */ - -/* Ring Speed 0,4,16 - * 0 = Autosense - * 4,16 = Selected speed only, no autosense - * This allows the card to be the first on the ring - * and become the active monitor. - * - * WARNING: Some hubs will allow you to insert - * at the wrong speed - */ - -static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, }; - -module_param_array(ringspeed, int, NULL, 0); - -/* Packet buffer size */ - -static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, }; - -module_param_array(pkt_buf_sz, int, NULL, 0); - -/* Message Level */ - -static int message_level[STREAMER_MAX_ADAPTERS] = { 1, }; - -module_param_array(message_level, int, NULL, 0); - -#if STREAMER_IOCTL -static int streamer_ioctl(struct net_device *, struct ifreq *, int); -#endif - -static int streamer_reset(struct net_device *dev); -static int streamer_open(struct net_device *dev); -static netdev_tx_t streamer_xmit(struct sk_buff *skb, - struct net_device *dev); -static int streamer_close(struct net_device *dev); -static void streamer_set_rx_mode(struct net_device *dev); -static irqreturn_t streamer_interrupt(int irq, void *dev_id); -static int streamer_set_mac_address(struct net_device *dev, void *addr); -static void streamer_arb_cmd(struct net_device *dev); -static int streamer_change_mtu(struct net_device *dev, int mtu); -static void streamer_srb_bh(struct net_device *dev); -static void streamer_asb_bh(struct net_device *dev); -#if STREAMER_NETWORK_MONITOR -#ifdef CONFIG_PROC_FS -static int streamer_proc_info(char *buffer, char **start, off_t offset, - int length, int *eof, void *data); -static int sprintf_info(char *buffer, struct net_device *dev); -struct streamer_private *dev_streamer=NULL; -#endif -#endif - -static const struct net_device_ops streamer_netdev_ops = { - .ndo_open = streamer_open, - .ndo_stop = streamer_close, - .ndo_start_xmit = streamer_xmit, - .ndo_change_mtu = streamer_change_mtu, -#if STREAMER_IOCTL - .ndo_do_ioctl = streamer_ioctl, -#endif - .ndo_set_rx_mode = streamer_set_rx_mode, - .ndo_set_mac_address = streamer_set_mac_address, -}; - -static int __devinit streamer_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - struct streamer_private *streamer_priv; - unsigned long pio_start, pio_end, pio_flags, pio_len; - unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; - int rc = 0; - static int card_no=-1; - u16 pcr; - -#if STREAMER_DEBUG - printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev); -#endif - - card_no++; - dev = alloc_trdev(sizeof(*streamer_priv)); - if (dev==NULL) { - printk(KERN_ERR "lanstreamer: out of memory.\n"); - return -ENOMEM; - } - - streamer_priv = netdev_priv(dev); - -#if STREAMER_NETWORK_MONITOR -#ifdef CONFIG_PROC_FS - if (!dev_streamer) - create_proc_read_entry("streamer_tr", 0, init_net.proc_net, - streamer_proc_info, NULL); - streamer_priv->next = dev_streamer; - dev_streamer = streamer_priv; -#endif -#endif - - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - printk(KERN_ERR "%s: No suitable PCI mapping available.\n", - dev->name); - rc = -ENODEV; - goto err_out; - } - - rc = pci_enable_device(pdev); - if (rc) { - printk(KERN_ERR "lanstreamer: unable to enable pci device\n"); - rc=-EIO; - goto err_out; - } - - pci_set_master(pdev); - - rc = pci_set_mwi(pdev); - if (rc) { - printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n"); - goto err_out_disable; - } - - pio_start = pci_resource_start(pdev, 0); - pio_end = pci_resource_end(pdev, 0); - pio_flags = pci_resource_flags(pdev, 0); - pio_len = pci_resource_len(pdev, 0); - - mmio_start = pci_resource_start(pdev, 1); - mmio_end = pci_resource_end(pdev, 1); - mmio_flags = pci_resource_flags(pdev, 1); - mmio_len = pci_resource_len(pdev, 1); - -#if STREAMER_DEBUG - printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n", - pio_start, pio_end, pio_len, pio_flags); - printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n", - mmio_start, mmio_end, mmio_flags, mmio_len); -#endif - - if (!request_region(pio_start, pio_len, "lanstreamer")) { - printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n", - pio_start); - rc= -EBUSY; - goto err_out_mwi; - } - - if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) { - printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n", - mmio_start); - rc= -EBUSY; - goto err_out_free_pio; - } - - streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len); - if (streamer_priv->streamer_mmio == NULL) { - printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n", - mmio_start); - rc= -EIO; - goto err_out_free_mmio; - } - - init_waitqueue_head(&streamer_priv->srb_wait); - init_waitqueue_head(&streamer_priv->trb_wait); - - dev->netdev_ops = &streamer_netdev_ops; - dev->irq = pdev->irq; - dev->base_addr=pio_start; - SET_NETDEV_DEV(dev, &pdev->dev); - - streamer_priv->streamer_card_name = (char *)pdev->resource[0].name; - streamer_priv->pci_dev = pdev; - - if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000)) - streamer_priv->pkt_buf_sz = PKT_BUF_SZ; - else - streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no]; - - streamer_priv->streamer_ring_speed = ringspeed[card_no]; - streamer_priv->streamer_message_level = message_level[card_no]; - - pci_set_drvdata(pdev, dev); - - spin_lock_init(&streamer_priv->streamer_lock); - - pci_read_config_word (pdev, PCI_COMMAND, &pcr); - pcr |= PCI_COMMAND_SERR; - pci_write_config_word (pdev, PCI_COMMAND, pcr); - - printk("%s\n", version); - printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name, - streamer_priv->streamer_card_name, - (unsigned int) dev->base_addr, - streamer_priv->streamer_mmio, - dev->irq); - - if (streamer_reset(dev)) - goto err_out_unmap; - - rc = register_netdev(dev); - if (rc) - goto err_out_unmap; - return 0; - -err_out_unmap: - iounmap(streamer_priv->streamer_mmio); -err_out_free_mmio: - release_mem_region(mmio_start, mmio_len); -err_out_free_pio: - release_region(pio_start, pio_len); -err_out_mwi: - pci_clear_mwi(pdev); -err_out_disable: - pci_disable_device(pdev); -err_out: - free_netdev(dev); -#if STREAMER_DEBUG - printk("lanstreamer: Exit error %x\n",rc); -#endif - return rc; -} - -static void __devexit streamer_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev=pci_get_drvdata(pdev); - struct streamer_private *streamer_priv; - -#if STREAMER_DEBUG - printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev); -#endif - - if (dev == NULL) { - printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n"); - return; - } - - streamer_priv=netdev_priv(dev); - if (streamer_priv == NULL) { - printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n"); - return; - } - -#if STREAMER_NETWORK_MONITOR -#ifdef CONFIG_PROC_FS - { - struct streamer_private **p, **next; - - for (p = &dev_streamer; *p; p = next) { - next = &(*p)->next; - if (*p == streamer_priv) { - *p = *next; - break; - } - } - if (!dev_streamer) - remove_proc_entry("streamer_tr", init_net.proc_net); - } -#endif -#endif - - unregister_netdev(dev); - iounmap(streamer_priv->streamer_mmio); - release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1)); - release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0)); - pci_clear_mwi(pdev); - pci_disable_device(pdev); - free_netdev(dev); - pci_set_drvdata(pdev, NULL); -} - - -static int streamer_reset(struct net_device *dev) -{ - struct streamer_private *streamer_priv; - __u8 __iomem *streamer_mmio; - unsigned long t; - unsigned int uaa_addr; - struct sk_buff *skb = NULL; - __u16 misr; - - streamer_priv = netdev_priv(dev); - streamer_mmio = streamer_priv->streamer_mmio; - - writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL); - t = jiffies; - /* Hold soft reset bit for a while */ - ssleep(1); - - writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET, - streamer_mmio + BCTL); - -#if STREAMER_DEBUG - printk("BCTL: %x\n", readw(streamer_mmio + BCTL)); - printk("GPR: %x\n", readw(streamer_mmio + GPR)); - printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK)); -#endif - writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL ); - - if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */ - writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE, - streamer_mmio + GPR); - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Ringspeed autosense mode on\n", - dev->name); - } else if (streamer_priv->streamer_ring_speed == 16) { - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", - dev->name); - writew(GPR_16MBPS, streamer_mmio + GPR); - } else if (streamer_priv->streamer_ring_speed == 4) { - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", - dev->name); - writew(0, streamer_mmio + GPR); - } - - skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); - if (!skb) { - printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n", - dev->name); - } else { - struct streamer_rx_desc *rx_ring; - u8 *data; - - rx_ring=(struct streamer_rx_desc *)skb->data; - data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc); - rx_ring->forward=0; - rx_ring->status=0; - rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data, - 512, PCI_DMA_FROMDEVICE)); - rx_ring->framelen_buflen=512; - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)), - streamer_mmio+RXBDA); - } - -#if STREAMER_DEBUG - printk("GPR = %x\n", readw(streamer_mmio + GPR)); -#endif - /* start solo init */ - writew(SISR_MI, streamer_mmio + SISR_MASK_SUM); - - while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) { - msleep_interruptible(100); - if (time_after(jiffies, t + 40 * HZ)) { - printk(KERN_ERR - "IBM PCI tokenring card not responding\n"); - release_region(dev->base_addr, STREAMER_IO_SPACE); - if (skb) - dev_kfree_skb(skb); - return -1; - } - } - writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM); - misr = readw(streamer_mmio + MISR_RUM); - writew(~misr, streamer_mmio + MISR_RUM); - - if (skb) - dev_kfree_skb(skb); /* release skb used for diagnostics */ - -#if STREAMER_DEBUG - printk("LAPWWO: %x, LAPA: %x LAPE: %x\n", - readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA), - readw(streamer_mmio + LAPE)); -#endif - -#if STREAMER_DEBUG - { - int i; - writew(readw(streamer_mmio + LAPWWO), - streamer_mmio + LAPA); - printk("initialization response srb dump: "); - for (i = 0; i < 10; i++) - printk("%x:", - ntohs(readw(streamer_mmio + LAPDINC))); - printk("\n"); - } -#endif - - writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA); - if (readw(streamer_mmio + LAPD)) { - printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n", - ntohs(readw(streamer_mmio + LAPD))); - release_region(dev->base_addr, STREAMER_IO_SPACE); - return -1; - } - - writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA); - uaa_addr = ntohs(readw(streamer_mmio + LAPDINC)); - readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */ - streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC)); - streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC)); - -#if STREAMER_DEBUG - printk("UAA resides at %x\n", uaa_addr); -#endif - - /* setup uaa area for access with LAPD */ - { - int i; - __u16 addr; - writew(uaa_addr, streamer_mmio + LAPA); - for (i = 0; i < 6; i += 2) { - addr=ntohs(readw(streamer_mmio+LAPDINC)); - dev->dev_addr[i]= (addr >> 8) & 0xff; - dev->dev_addr[i+1]= addr & 0xff; - } -#if STREAMER_DEBUG - printk("Adapter address: %pM\n", dev->dev_addr); -#endif - } - return 0; -} - -static int streamer_open(struct net_device *dev) -{ - struct streamer_private *streamer_priv = netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - unsigned long flags; - char open_error[255]; - int i, open_finished = 1; - __u16 srb_word; - __u16 srb_open; - int rc; - - if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) { - rc=streamer_reset(dev); - } - - if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) { - return -EAGAIN; - } -#if STREAMER_DEBUG - printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM)); - printk("pending ints: %x\n", readw(streamer_mmio + SISR)); -#endif - - writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */ - writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */ - - /* adapter is closed, so SRB is pointed to by LAPWWO */ - writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA); - -#if STREAMER_DEBUG - printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO), - readw(streamer_mmio + LAPA)); - printk("LAPE: %x\n", readw(streamer_mmio + LAPE)); - printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK)); -#endif - do { - for (i = 0; i < SRB_COMMAND_SIZE; i += 2) { - writew(0, streamer_mmio + LAPDINC); - } - - writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA); - writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */ - writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC); - writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC); - - writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA); -#if STREAMER_NETWORK_MONITOR - /* If Network Monitor, instruct card to copy MAC frames through the ARB */ - writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */ -#else - writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */ -#endif - - if (streamer_priv->streamer_laa[0]) { - writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA); - writew(htons((streamer_priv->streamer_laa[0] << 8) | - streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC); - writew(htons((streamer_priv->streamer_laa[2] << 8) | - streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC); - writew(htons((streamer_priv->streamer_laa[4] << 8) | - streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC); - memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len); - } - - /* save off srb open offset */ - srb_open = readw(streamer_mmio + LAPWWO); -#if STREAMER_DEBUG - writew(readw(streamer_mmio + LAPWWO), - streamer_mmio + LAPA); - printk("srb open request:\n"); - for (i = 0; i < 16; i++) { - printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); - } - printk("\n"); -#endif - spin_lock_irqsave(&streamer_priv->streamer_lock, flags); - streamer_priv->srb_queued = 1; - - /* signal solo that SRB command has been issued */ - writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); - spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags); - - while (streamer_priv->srb_queued) { - interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ); - if (signal_pending(current)) { - printk(KERN_WARNING "%s: SRB timed out.\n", dev->name); - printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n", - readw(streamer_mmio + SISR), - readw(streamer_mmio + MISR_RUM), - readw(streamer_mmio + LISR)); - streamer_priv->srb_queued = 0; - break; - } - } - -#if STREAMER_DEBUG - printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK)); - printk("srb open response:\n"); - writew(srb_open, streamer_mmio + LAPA); - for (i = 0; i < 10; i++) { - printk("%x:", - ntohs(readw(streamer_mmio + LAPDINC))); - } -#endif - - /* If we get the same return response as we set, the interrupt wasn't raised and the open - * timed out. - */ - writew(srb_open + 2, streamer_mmio + LAPA); - srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8; - if (srb_word == STREAMER_CLEAR_RET_CODE) { - printk(KERN_WARNING "%s: Adapter Open time out or error.\n", - dev->name); - return -EIO; - } - - if (srb_word != 0) { - if (srb_word == 0x07) { - if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */ - printk(KERN_WARNING "%s: Retrying at different ring speed\n", - dev->name); - open_finished = 0; - } else { - __u16 error_code; - - writew(srb_open + 6, streamer_mmio + LAPA); - error_code = ntohs(readw(streamer_mmio + LAPD)); - strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]); - strcat(open_error, " - "); - strcat(open_error, open_min_error[(error_code & 0x0f)]); - - if (!streamer_priv->streamer_ring_speed && - ((error_code & 0x0f) == 0x0d)) - { - printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name); - printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name); - free_irq(dev->irq, dev); - return -EIO; - } - - printk(KERN_WARNING "%s: %s\n", - dev->name, open_error); - free_irq(dev->irq, dev); - return -EIO; - - } /* if autosense && open_finished */ - } else { - printk(KERN_WARNING "%s: Bad OPEN response: %x\n", - dev->name, srb_word); - free_irq(dev->irq, dev); - return -EIO; - } - } else - open_finished = 1; - } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */ - - writew(srb_open + 18, streamer_mmio + LAPA); - srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8; - if (srb_word & (1 << 3)) - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name); - - if (srb_word & 1) - streamer_priv->streamer_ring_speed = 16; - else - streamer_priv->streamer_ring_speed = 4; - - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Opened in %d Mbps mode\n", - dev->name, - streamer_priv->streamer_ring_speed); - - writew(srb_open + 8, streamer_mmio + LAPA); - streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC)); - streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC)); - streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC)); - readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */ - streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC)); - - streamer_priv->streamer_receive_options = 0x00; - streamer_priv->streamer_copy_all_options = 0; - - /* setup rx ring */ - /* enable rx channel */ - writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM); - - /* setup rx descriptors */ - streamer_priv->streamer_rx_ring= - kmalloc( sizeof(struct streamer_rx_desc)* - STREAMER_RX_RING_SIZE,GFP_KERNEL); - if (!streamer_priv->streamer_rx_ring) { - printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name); - return -EIO; - } - - for (i = 0; i < STREAMER_RX_RING_SIZE; i++) { - struct sk_buff *skb; - - skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); - if (skb == NULL) - break; - - skb->dev = dev; - - streamer_priv->streamer_rx_ring[i].forward = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1], - sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); - streamer_priv->streamer_rx_ring[i].status = 0; - streamer_priv->streamer_rx_ring[i].buffer = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, - streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); - streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz; - streamer_priv->rx_ring_skb[i] = skb; - } - streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], - sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); - - if (i == 0) { - printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name); - free_irq(dev->irq, dev); - return -EIO; - } - - streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */ - - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], - sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), - streamer_mmio + RXBDA); - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1], - sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), - streamer_mmio + RXLBDA); - - /* set bus master interrupt event mask */ - writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); - - - /* setup tx ring */ - streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)* - STREAMER_TX_RING_SIZE,GFP_KERNEL); - if (!streamer_priv->streamer_tx_ring) { - printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name); - return -EIO; - } - - writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */ - for (i = 0; i < STREAMER_TX_RING_SIZE; i++) { - streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, - &streamer_priv->streamer_tx_ring[i + 1], - sizeof(struct streamer_tx_desc), - PCI_DMA_TODEVICE)); - streamer_priv->streamer_tx_ring[i].status = 0; - streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0; - streamer_priv->streamer_tx_ring[i].buffer = 0; - streamer_priv->streamer_tx_ring[i].buflen = 0; - streamer_priv->streamer_tx_ring[i].rsvd1 = 0; - streamer_priv->streamer_tx_ring[i].rsvd2 = 0; - streamer_priv->streamer_tx_ring[i].rsvd3 = 0; - } - streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0], - sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)); - - streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE; - streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */ - streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1; - - /* set Busmaster interrupt event mask (handle receives on interrupt only */ - writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); - /* set system event interrupt mask */ - writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM); - -#if STREAMER_DEBUG - printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM)); - printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK)); -#endif - -#if STREAMER_NETWORK_MONITOR - - writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); - printk("%s: Node Address: %04x:%04x:%04x\n", dev->name, - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC))); - readw(streamer_mmio + LAPDINC); - readw(streamer_mmio + LAPDINC); - printk("%s: Functional Address: %04x:%04x\n", dev->name, - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC))); - - writew(streamer_priv->streamer_parms_addr + 4, - streamer_mmio + LAPA); - printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name, - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC))); -#endif - - netif_start_queue(dev); - netif_carrier_on(dev); - return 0; -} - -/* - * When we enter the rx routine we do not know how many frames have been - * queued on the rx channel. Therefore we start at the next rx status - * position and travel around the receive ring until we have completed - * all the frames. - * - * This means that we may process the frame before we receive the end - * of frame interrupt. This is why we always test the status instead - * of blindly processing the next frame. - * - */ -static void streamer_rx(struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - struct streamer_rx_desc *rx_desc; - int rx_ring_last_received, length, frame_length, buffer_cnt = 0; - struct sk_buff *skb, *skb2; - - /* setup the next rx descriptor to be received */ - rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; - rx_ring_last_received = streamer_priv->rx_ring_last_received; - - while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */ - if (rx_ring_last_received != streamer_priv->rx_ring_last_received) - { - printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n", - rx_ring_last_received, streamer_priv->rx_ring_last_received); - } - streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); - rx_ring_last_received = streamer_priv->rx_ring_last_received; - - length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ - frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff; - - if (rx_desc->status & 0x7E830000) { /* errors */ - if (streamer_priv->streamer_message_level) { - printk(KERN_WARNING "%s: Rx Error %x\n", - dev->name, rx_desc->status); - } - } else { /* received without errors */ - if (rx_desc->status & 0x80000000) { /* frame complete */ - buffer_cnt = 1; - skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); - } else { - skb = dev_alloc_skb(frame_length); - } - - if (skb == NULL) - { - printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name); - dev->stats.rx_dropped++; - } else { /* we allocated an skb OK */ - if (buffer_cnt == 1) { - /* release the DMA mapping */ - pci_unmap_single(streamer_priv->pci_dev, - le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer), - streamer_priv->pkt_buf_sz, - PCI_DMA_FROMDEVICE); - skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received]; -#if STREAMER_DEBUG_PACKETS - { - int i; - printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head); - for (i = 0; i < frame_length; i++) - { - printk("%x:", skb2->data[i]); - if (((i + 1) % 16) == 0) - printk("\n"); - } - printk("\n"); - } -#endif - skb_put(skb2, length); - skb2->protocol = tr_type_trans(skb2, dev); - /* recycle this descriptor */ - streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; - streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; - streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz, - PCI_DMA_FROMDEVICE)); - streamer_priv->rx_ring_skb[rx_ring_last_received] = skb; - /* place recycled descriptor back on the adapter */ - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, - &streamer_priv->streamer_rx_ring[rx_ring_last_received], - sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)), - streamer_mmio + RXLBDA); - /* pass the received skb up to the protocol */ - netif_rx(skb2); - } else { - do { /* Walk the buffers */ - pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE), - memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */ - streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; - streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; - - /* give descriptor back to the adapter */ - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, - &streamer_priv->streamer_rx_ring[rx_ring_last_received], - length, PCI_DMA_FROMDEVICE)), - streamer_mmio + RXLBDA); - - if (rx_desc->status & 0x80000000) - break; /* this descriptor completes the frame */ - - /* else get the next pending descriptor */ - if (rx_ring_last_received!= streamer_priv->rx_ring_last_received) - { - printk("RX Error rx_ring_last_received not the same %x %x\n", - rx_ring_last_received, - streamer_priv->rx_ring_last_received); - } - rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)]; - - length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ - streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1); - rx_ring_last_received = streamer_priv->rx_ring_last_received; - } while (1); - - skb->protocol = tr_type_trans(skb, dev); - /* send up to the protocol */ - netif_rx(skb); - } - dev->stats.rx_packets++; - dev->stats.rx_bytes += length; - } /* if skb == null */ - } /* end received without errors */ - - /* try the next one */ - rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; - } /* end for all completed rx descriptors */ -} - -static irqreturn_t streamer_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - __u16 sisr; - __u16 misr; - u8 max_intr = MAX_INTR; - - spin_lock(&streamer_priv->streamer_lock); - sisr = readw(streamer_mmio + SISR); - - while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE | - SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) && - (max_intr > 0)) { - - if(sisr & SISR_PAR_ERR) { - writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - - else if(sisr & SISR_SERR_ERR) { - writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - - else if(sisr & SISR_MI) { - misr = readw(streamer_mmio + MISR_RUM); - - if (misr & MISR_TX2_EOF) { - while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { - streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); - streamer_priv->free_tx_ring_entries++; - dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; - dev->stats.tx_packets++; - dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0; - } - netif_wake_queue(dev); - } - - if (misr & MISR_RX_EOF) { - streamer_rx(dev); - } - /* MISR_RX_EOF */ - - if (misr & MISR_RX_NOBUF) { - /* According to the documentation, we don't have to do anything, - * but trapping it keeps it out of /var/log/messages. - */ - } /* SISR_RX_NOBUF */ - - writew(~misr, streamer_mmio + MISR_RUM); - (void)readw(streamer_mmio + MISR_RUM); - } - - else if (sisr & SISR_SRB_REPLY) { - if (streamer_priv->srb_queued == 1) { - wake_up_interruptible(&streamer_priv->srb_wait); - } else if (streamer_priv->srb_queued == 2) { - streamer_srb_bh(dev); - } - streamer_priv->srb_queued = 0; - - writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - - else if (sisr & SISR_ADAPTER_CHECK) { - printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); - writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA); - printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n", - dev->name, readw(streamer_mmio + LAPDINC), - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC)), - ntohs(readw(streamer_mmio + LAPDINC))); - netif_stop_queue(dev); - netif_carrier_off(dev); - printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name); - } - - /* SISR_ADAPTER_CHECK */ - else if (sisr & SISR_ASB_FREE) { - /* Wake up anything that is waiting for the asb response */ - if (streamer_priv->asb_queued) { - streamer_asb_bh(dev); - } - writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - /* SISR_ASB_FREE */ - else if (sisr & SISR_ARB_CMD) { - streamer_arb_cmd(dev); - writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - /* SISR_ARB_CMD */ - else if (sisr & SISR_TRB_REPLY) { - /* Wake up anything that is waiting for the trb response */ - if (streamer_priv->trb_queued) { - wake_up_interruptible(&streamer_priv-> - trb_wait); - } - streamer_priv->trb_queued = 0; - writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM); - (void)readw(streamer_mmio + SISR_RUM); - } - /* SISR_TRB_REPLY */ - - sisr = readw(streamer_mmio + SISR); - max_intr--; - } /* while() */ - - spin_unlock(&streamer_priv->streamer_lock) ; - return IRQ_HANDLED; -} - -static netdev_tx_t streamer_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - unsigned long flags ; - - spin_lock_irqsave(&streamer_priv->streamer_lock, flags); - - if (streamer_priv->free_tx_ring_entries) { - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer = - cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0; - streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len; - - streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb; - streamer_priv->free_tx_ring_entries--; -#if STREAMER_DEBUG_PACKETS - { - int i; - printk("streamer_xmit packet print:\n"); - for (i = 0; i < skb->len; i++) { - printk("%x:", skb->data[i]); - if (((i + 1) % 16) == 0) - printk("\n"); - } - printk("\n"); - } -#endif - - writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, - &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free], - sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)), - streamer_mmio + TX2LFDA); - (void)readl(streamer_mmio + TX2LFDA); - - streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1); - spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags); - return NETDEV_TX_OK; - } else { - netif_stop_queue(dev); - spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags); - return NETDEV_TX_BUSY; - } -} - - -static int streamer_close(struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - unsigned long flags; - int i; - - netif_stop_queue(dev); - netif_carrier_off(dev); - writew(streamer_priv->srb, streamer_mmio + LAPA); - writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - - spin_lock_irqsave(&streamer_priv->streamer_lock, flags); - - streamer_priv->srb_queued = 1; - writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); - - spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags); - - while (streamer_priv->srb_queued) - { - interruptible_sleep_on_timeout(&streamer_priv->srb_wait, - jiffies + 60 * HZ); - if (signal_pending(current)) - { - printk(KERN_WARNING "%s: SRB timed out.\n", dev->name); - printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n", - readw(streamer_mmio + SISR), - readw(streamer_mmio + MISR_RUM), - readw(streamer_mmio + LISR)); - streamer_priv->srb_queued = 0; - break; - } - } - - streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); - - for (i = 0; i < STREAMER_RX_RING_SIZE; i++) { - if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) { - dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]); - } - streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); - } - - /* reset tx/rx fifo's and busmaster logic */ - - /* TBD. Add graceful way to reset the LLC channel without doing a soft reset. - writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL); - udelay(1); - writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); - */ - -#if STREAMER_DEBUG - writew(streamer_priv->srb, streamer_mmio + LAPA); - printk("srb): "); - for (i = 0; i < 2; i++) { - printk("%x ", ntohs(readw(streamer_mmio + LAPDINC))); - } - printk("\n"); -#endif - free_irq(dev->irq, dev); - return 0; -} - -static void streamer_set_rx_mode(struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - __u8 options = 0; - struct netdev_hw_addr *ha; - unsigned char dev_mc_address[5]; - - writel(streamer_priv->srb, streamer_mmio + LAPA); - options = streamer_priv->streamer_copy_all_options; - - if (dev->flags & IFF_PROMISC) - options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */ - else - options &= ~(3 << 5); - - /* Only issue the srb if there is a change in options */ - - if ((options ^ streamer_priv->streamer_copy_all_options)) - { - /* Now to issue the srb command to alter the copy.all.options */ - writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC); - writew(htons(0x4a41),streamer_mmio+LAPDINC); - writew(htons(0x4d45),streamer_mmio+LAPDINC); - writew(htons(0x5320),streamer_mmio+LAPDINC); - writew(0x2020, streamer_mmio + LAPDINC); - - streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ - - writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM); - - streamer_priv->streamer_copy_all_options = options; - return; - } - - /* Set the functional addresses we need for multicast */ - writel(streamer_priv->srb,streamer_mmio+LAPA); - dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - - netdev_for_each_mc_addr(ha, dev) { - dev_mc_address[0] |= ha->addr[2]; - dev_mc_address[1] |= ha->addr[3]; - dev_mc_address[2] |= ha->addr[4]; - dev_mc_address[3] |= ha->addr[5]; - } - - writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - writew(0,streamer_mmio+LAPDINC); - writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC); - writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC); - streamer_priv->srb_queued = 2 ; - writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM); -} - -static void streamer_srb_bh(struct net_device *dev) -{ - struct streamer_private *streamer_priv = netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - __u16 srb_word; - - writew(streamer_priv->srb, streamer_mmio + LAPA); - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - - switch (srb_word) { - - /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) - * At some point we should do something if we get an error, such as - * resetting the IFF_PROMISC flag in dev - */ - - case SRB_MODIFY_RECEIVE_OPTIONS: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - - switch (srb_word) { - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - default: - if (streamer_priv->streamer_message_level) - printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n", - dev->name, - streamer_priv->streamer_copy_all_options, - streamer_priv->streamer_receive_options); - break; - } /* switch srb[2] */ - break; - - - /* SRB_SET_GROUP_ADDRESS - Multicast group setting - */ - case SRB_SET_GROUP_ADDRESS: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - switch (srb_word) { - case 0x00: - break; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - case 0x3c: - printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name); - break; - case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */ - printk(KERN_WARNING "%s: Group address registers full\n", dev->name); - break; - case 0x55: - printk(KERN_INFO "%s: Group Address already set.\n", dev->name); - break; - default: - break; - } /* switch srb[2] */ - break; - - - /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list - */ - case SRB_RESET_GROUP_ADDRESS: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - switch (srb_word) { - case 0x00: - break; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - case 0x39: /* Must deal with this if individual multicast addresses used */ - printk(KERN_INFO "%s: Group address not found\n", dev->name); - break; - default: - break; - } /* switch srb[2] */ - break; - - - /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode - */ - - case SRB_SET_FUNC_ADDRESS: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - switch (srb_word) { - case 0x00: - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name); - break; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - default: - break; - } /* switch srb[2] */ - break; - - /* SRB_READ_LOG - Read and reset the adapter error counters - */ - - case SRB_READ_LOG: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - switch (srb_word) { - case 0x00: - { - int i; - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Read Log command complete\n", dev->name); - printk("Read Log statistics: "); - writew(streamer_priv->srb + 6, - streamer_mmio + LAPA); - for (i = 0; i < 5; i++) { - printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); - } - printk("\n"); - } - break; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - - } /* switch srb[2] */ - break; - - /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */ - - case SRB_READ_SR_COUNTERS: - srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; - switch (srb_word) { - case 0x00: - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name); - break; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); - break; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); - break; - default: - break; - } /* switch srb[2] */ - break; - - default: - printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name); - break; - } /* switch srb[0] */ -} - -static int streamer_set_mac_address(struct net_device *dev, void *addr) -{ - struct sockaddr *saddr = addr; - struct streamer_private *streamer_priv = netdev_priv(dev); - - if (netif_running(dev)) - { - printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name); - return -EIO; - } - - memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len); - - if (streamer_priv->streamer_message_level) { - printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n", - dev->name, streamer_priv->streamer_laa[0], - streamer_priv->streamer_laa[1], - streamer_priv->streamer_laa[2], - streamer_priv->streamer_laa[3], - streamer_priv->streamer_laa[4], - streamer_priv->streamer_laa[5]); - } - return 0; -} - -static void streamer_arb_cmd(struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - __u8 header_len; - __u16 frame_len, buffer_len; - struct sk_buff *mac_frame; - __u8 frame_data[256]; - __u16 buff_off; - __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */ - __u8 fdx_prot_error; - __u16 next_ptr; - __u16 arb_word; - -#if STREAMER_NETWORK_MONITOR - struct trh_hdr *mac_hdr; -#endif - - writew(streamer_priv->arb, streamer_mmio + LAPA); - arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8; - - if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */ - writew(streamer_priv->arb + 6, streamer_mmio + LAPA); - streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC)); - header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */ - frame_len = ntohs(readw(streamer_mmio + LAPDINC)); - -#if STREAMER_DEBUG - { - int i; - __u16 next; - __u8 status; - __u16 len; - - writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */ - next = htons(readw(streamer_mmio + LAPDINC)); - status = - ntohs(readw(streamer_mmio + LAPDINC)) & 0xff; - len = ntohs(readw(streamer_mmio + LAPDINC)); - - /* print out 1st 14 bytes of frame data */ - for (i = 0; i < 7; i++) { - printk("Loc %d = %04x\n", i, - ntohs(readw - (streamer_mmio + LAPDINC))); - } - - printk("next %04x, fs %02x, len %04x\n", next, - status, len); - } -#endif - if (!(mac_frame = dev_alloc_skb(frame_len))) { - printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", - dev->name); - goto drop_frame; - } - /* Walk the buffer chain, creating the frame */ - - do { - int i; - __u16 rx_word; - - writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */ - next_ptr = ntohs(readw(streamer_mmio + LAPDINC)); - readw(streamer_mmio + LAPDINC); /* read thru status word */ - buffer_len = ntohs(readw(streamer_mmio + LAPDINC)); - - if (buffer_len > 256) - break; - - i = 0; - while (i < buffer_len) { - rx_word=ntohs(readw(streamer_mmio+LAPDINC)); - frame_data[i]=rx_word >> 8; - frame_data[i+1]=rx_word & 0xff; - i += 2; - } - - memcpy(skb_put(mac_frame, buffer_len), - frame_data, buffer_len); - } while (next_ptr && (buff_off = next_ptr)); - - mac_frame->protocol = tr_type_trans(mac_frame, dev); -#if STREAMER_NETWORK_MONITOR - printk(KERN_WARNING "%s: Received MAC Frame, details:\n", - dev->name); - mac_hdr = tr_hdr(mac_frame); - printk(KERN_WARNING - "%s: MAC Frame Dest. Addr: %pM\n", - dev->name, mac_hdr->daddr); - printk(KERN_WARNING - "%s: MAC Frame Srce. Addr: %pM\n", - dev->name, mac_hdr->saddr); -#endif - netif_rx(mac_frame); - - /* Now tell the card we have dealt with the received frame */ -drop_frame: - /* Set LISR Bit 1 */ - writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM); - - /* Is the ASB free ? */ - - if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE)) - { - streamer_priv->asb_queued = 1; - writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); - return; - /* Drop out and wait for the bottom half to be run */ - } - - - writew(streamer_priv->asb, streamer_mmio + LAPA); - writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - writew(0, streamer_mmio + LAPDINC); - writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD); - - writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); - - streamer_priv->asb_queued = 2; - return; - - } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */ - writew(streamer_priv->arb + 6, streamer_mmio + LAPA); - lan_status = ntohs(readw(streamer_mmio + LAPDINC)); - fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8; - - /* Issue ARB Free */ - writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM); - - lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) & - lan_status; - - if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR)) - { - if (lan_status_diff & LSC_LWF) - printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name); - if (lan_status_diff & LSC_ARW) - printk(KERN_WARNING "%s: Auto removal error\n", dev->name); - if (lan_status_diff & LSC_FPE) - printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name); - if (lan_status_diff & LSC_RR) - printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name); - - /* Adapter has been closed by the hardware */ - - /* reset tx/rx fifo's and busmaster logic */ - - /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL); - udelay(1); - writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */ - - netif_stop_queue(dev); - netif_carrier_off(dev); - printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name); - } - /* If serious error */ - if (streamer_priv->streamer_message_level) { - if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); - if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing\n", dev->name); - if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); - if (lan_status_diff & LSC_TRAN_BCN) - printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name); - if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); - if (lan_status_diff & LSC_RING_REC) - printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name); - if (lan_status_diff & LSC_FDX_MODE) - printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name); - } - - if (lan_status_diff & LSC_CO) { - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Counter Overflow\n", dev->name); - - /* Issue READ.LOG command */ - - writew(streamer_priv->srb, streamer_mmio + LAPA); - writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - writew(0, streamer_mmio + LAPDINC); - streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ - - writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); - } - - if (lan_status_diff & LSC_SR_CO) { - if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); - - /* Issue a READ.SR.COUNTERS */ - writew(streamer_priv->srb, streamer_mmio + LAPA); - writew(htons(SRB_READ_SR_COUNTERS << 8), - streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), - streamer_mmio+LAPDINC); - streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */ - writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM); - - } - streamer_priv->streamer_lan_status = lan_status; - } /* Lan.change.status */ - else - printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); -} - -static void streamer_asb_bh(struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - - if (streamer_priv->asb_queued == 1) - { - /* Dropped through the first time */ - - writew(streamer_priv->asb, streamer_mmio + LAPA); - writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC); - writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC); - writew(0, streamer_mmio + LAPDINC); - writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD); - - writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM); - streamer_priv->asb_queued = 2; - - return; - } - - if (streamer_priv->asb_queued == 2) { - __u8 rc; - writew(streamer_priv->asb + 2, streamer_mmio + LAPA); - rc=ntohs(readw(streamer_mmio+LAPD)) >> 8; - switch (rc) { - case 0x01: - printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); - break; - case 0x26: - printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); - break; - case 0xFF: - /* Valid response, everything should be ok again */ - break; - default: - printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name); - break; - } - } - streamer_priv->asb_queued = 0; -} - -static int streamer_change_mtu(struct net_device *dev, int mtu) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u16 max_mtu; - - if (streamer_priv->streamer_ring_speed == 4) - max_mtu = 4500; - else - max_mtu = 18000; - - if (mtu > max_mtu) - return -EINVAL; - if (mtu < 100) - return -EINVAL; - - dev->mtu = mtu; - streamer_priv->pkt_buf_sz = mtu + TR_HLEN; - - return 0; -} - -#if STREAMER_NETWORK_MONITOR -#ifdef CONFIG_PROC_FS -static int streamer_proc_info(char *buffer, char **start, off_t offset, - int length, int *eof, void *data) -{ - struct streamer_private *sdev=NULL; - struct pci_dev *pci_device = NULL; - int len = 0; - off_t begin = 0; - off_t pos = 0; - int size; - - struct net_device *dev; - - size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n"); - - pos += size; - len += size; - - for(sdev=dev_streamer; sdev; sdev=sdev->next) { - pci_device=sdev->pci_dev; - dev=pci_get_drvdata(pci_device); - - size = sprintf_info(buffer + len, dev); - len += size; - pos = begin + len; - - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) - break; - } /* for */ - - *start = buffer + (offset - begin); /* Start of wanted data */ - len -= (offset - begin); /* Start slop */ - if (len > length) - len = length; /* Ending slop */ - return len; -} - -static int sprintf_info(char *buffer, struct net_device *dev) -{ - struct streamer_private *streamer_priv = - netdev_priv(dev); - __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; - struct streamer_adapter_addr_table sat; - struct streamer_parameters_table spt; - int size = 0; - int i; - - writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); - for (i = 0; i < 14; i += 2) { - __u16 io_word; - __u8 *datap = (__u8 *) & sat; - io_word=ntohs(readw(streamer_mmio+LAPDINC)); - datap[size]=io_word >> 8; - datap[size+1]=io_word & 0xff; - } - writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA); - for (i = 0; i < 68; i += 2) { - __u16 io_word; - __u8 *datap = (__u8 *) & spt; - io_word=ntohs(readw(streamer_mmio+LAPDINC)); - datap[size]=io_word >> 8; - datap[size+1]=io_word & 0xff; - } - - size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); - - size += sprintf(buffer + size, - "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n", - dev->name, dev->dev_addr, sat.node_addr, - sat.func_addr[0], sat.func_addr[1], - sat.func_addr[2], sat.func_addr[3]); - - size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name); - - size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name); - - size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n", - dev->name, spt.phys_addr[0], spt.phys_addr[1], - spt.phys_addr[2], spt.phys_addr[3], - spt.up_node_addr, spt.poll_addr, - ntohs(spt.acc_priority), ntohs(spt.auth_source_class), - ntohs(spt.att_code)); - - size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name); - - size += sprintf(buffer + size, - "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n", - dev->name, spt.source_addr, - ntohs(spt.beacon_type), ntohs(spt.major_vector), - ntohs(spt.lan_status), ntohs(spt.local_ring), - ntohs(spt.mon_error), ntohs(spt.frame_correl)); - - size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", - dev->name); - - size += sprintf(buffer + size, - "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n", - dev->name, ntohs(spt.beacon_transmit), - ntohs(spt.beacon_receive), - spt.beacon_naun, - spt.beacon_phys[0], spt.beacon_phys[1], - spt.beacon_phys[2], spt.beacon_phys[3]); - return size; -} -#endif -#endif - -static struct pci_driver streamer_pci_driver = { - .name = "lanstreamer", - .id_table = streamer_pci_tbl, - .probe = streamer_init_one, - .remove = __devexit_p(streamer_remove_one), -}; - -static int __init streamer_init_module(void) { - return pci_register_driver(&streamer_pci_driver); -} - -static void __exit streamer_cleanup_module(void) { - pci_unregister_driver(&streamer_pci_driver); -} - -module_init(streamer_init_module); -module_exit(streamer_cleanup_module); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h deleted file mode 100644 index 3c58d6a3fbc..00000000000 --- a/drivers/net/tokenring/lanstreamer.h +++ /dev/null @@ -1,343 +0,0 @@ -/* - * lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter - * - * Written By: Mike Sullivan, IBM Corporation - * - * Copyright (C) 1999 IBM Corporation - * - * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC - * chipset. - * - * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic - * chipsets) written by: - * 1999 Peter De Schrijver All Rights Reserved - * 1999 Mike Phillips (phillim@amtrak.com) - * - * Base Driver Skeleton: - * Written 1993-94 by Donald Becker. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * NO WARRANTY - * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT - * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is - * solely responsible for determining the appropriateness of using and - * distributing the Program and assumes all risks associated with its - * exercise of rights under this Agreement, including but not limited to - * the risks and costs of program errors, damage to or loss of data, - * programs or equipment, and unavailability or interruption of operations. - * - * DISCLAIMER OF LIABILITY - * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED - * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * - * 12/10/99 - Alpha Release 0.1.0 - * First release to the public - * 08/15/01 - Added ioctl() definitions and others - Kent Yoder <yoder1@us.ibm.com> - * - */ - -/* MAX_INTR - the maximum number of times we can loop - * inside the interrupt function before returning - * control to the OS (maximum value is 256) - */ -#define MAX_INTR 5 - -#define CLS 0x0C -#define MLR 0x86 -#define LTR 0x0D - -#define BCTL 0x60 -#define BCTL_SOFTRESET (1<<15) -#define BCTL_RX_FIFO_8 (1<<1) -#define BCTL_TX_FIFO_8 (1<<3) - -#define GPR 0x4a -#define GPR_AUTOSENSE (1<<2) -#define GPR_16MBPS (1<<3) - -#define LISR 0x10 -#define LISR_SUM 0x12 -#define LISR_RUM 0x14 - -#define LISR_LIE (1<<15) -#define LISR_SLIM (1<<13) -#define LISR_SLI (1<<12) -#define LISR_BPEI (1<<9) -#define LISR_BPE (1<<8) -#define LISR_SRB_CMD (1<<5) -#define LISR_ASB_REPLY (1<<4) -#define LISR_ASB_FREE_REQ (1<<2) -#define LISR_ARB_FREE (1<<1) -#define LISR_TRB_FRAME (1<<0) - -#define SISR 0x16 -#define SISR_SUM 0x18 -#define SISR_RUM 0x1A -#define SISR_MASK 0x54 -#define SISR_MASK_SUM 0x56 -#define SISR_MASK_RUM 0x58 - -#define SISR_MI (1<<15) -#define SISR_SERR_ERR (1<<14) -#define SISR_TIMER (1<<11) -#define SISR_LAP_PAR_ERR (1<<10) -#define SISR_LAP_ACC_ERR (1<<9) -#define SISR_PAR_ERR (1<<8) -#define SISR_ADAPTER_CHECK (1<<6) -#define SISR_SRB_REPLY (1<<5) -#define SISR_ASB_FREE (1<<4) -#define SISR_ARB_CMD (1<<3) -#define SISR_TRB_REPLY (1<<2) - -#define MISR_RUM 0x5A -#define MISR_MASK 0x5C -#define MISR_MASK_RUM 0x5E - -#define MISR_TX2_IDLE (1<<15) -#define MISR_TX2_NO_STATUS (1<<14) -#define MISR_TX2_HALT (1<<13) -#define MISR_TX2_EOF (1<<12) -#define MISR_TX1_IDLE (1<<11) -#define MISR_TX1_NO_STATUS (1<<10) -#define MISR_TX1_HALT (1<<9) -#define MISR_TX1_EOF (1<<8) -#define MISR_RX_NOBUF (1<<5) -#define MISR_RX_EOB (1<<4) -#define MISR_RX_NO_STATUS (1<<2) -#define MISR_RX_HALT (1<<1) -#define MISR_RX_EOF (1<<0) - -#define LAPA 0x62 -#define LAPE 0x64 -#define LAPD 0x66 -#define LAPDINC 0x68 -#define LAPWWO 0x6A -#define LAPWWC 0x6C -#define LAPCTL 0x6E - -#define TIMER 0x4E4 - -#define BMCTL_SUM 0x50 -#define BMCTL_RUM 0x52 -#define BMCTL_TX1_DIS (1<<14) -#define BMCTL_TX2_DIS (1<<10) -#define BMCTL_RX_DIS (1<<6) -#define BMCTL_RX_ENABLED (1<<5) - -#define RXLBDA 0x90 -#define RXBDA 0x94 -#define RXSTAT 0x98 -#define RXDBA 0x9C - -#define TX1LFDA 0xA0 -#define TX1FDA 0xA4 -#define TX1STAT 0xA8 -#define TX1DBA 0xAC -#define TX2LFDA 0xB0 -#define TX2FDA 0xB4 -#define TX2STAT 0xB8 -#define TX2DBA 0xBC - -#define STREAMER_IO_SPACE 256 - -#define SRB_COMMAND_SIZE 50 - -#define STREAMER_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ - -/* Defines for LAN STATUS CHANGE reports */ -#define LSC_SIG_LOSS 0x8000 -#define LSC_HARD_ERR 0x4000 -#define LSC_SOFT_ERR 0x2000 -#define LSC_TRAN_BCN 0x1000 -#define LSC_LWF 0x0800 -#define LSC_ARW 0x0400 -#define LSC_FPE 0x0200 -#define LSC_RR 0x0100 -#define LSC_CO 0x0080 -#define LSC_SS 0x0040 -#define LSC_RING_REC 0x0020 -#define LSC_SR_CO 0x0010 -#define LSC_FDX_MODE 0x0004 - -/* Defines for OPEN ADAPTER command */ - -#define OPEN_ADAPTER_EXT_WRAP (1<<15) -#define OPEN_ADAPTER_DIS_HARDEE (1<<14) -#define OPEN_ADAPTER_DIS_SOFTERR (1<<13) -#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12) -#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11) -#define OPEN_ADAPTER_ENABLE_EC (1<<10) -#define OPEN_ADAPTER_CONTENDER (1<<8) -#define OPEN_ADAPTER_PASS_BEACON (1<<7) -#define OPEN_ADAPTER_ENABLE_FDX (1<<6) -#define OPEN_ADAPTER_ENABLE_RPL (1<<5) -#define OPEN_ADAPTER_INHIBIT_ETR (1<<4) -#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3) - - -/* Defines for SRB Commands */ -#define SRB_CLOSE_ADAPTER 0x04 -#define SRB_CONFIGURE_BRIDGE 0x0c -#define SRB_CONFIGURE_HP_CHANNEL 0x13 -#define SRB_MODIFY_BRIDGE_PARMS 0x15 -#define SRB_MODIFY_OPEN_OPTIONS 0x01 -#define SRB_MODIFY_RECEIVE_OPTIONS 0x17 -#define SRB_NO_OPERATION 0x00 -#define SRB_OPEN_ADAPTER 0x03 -#define SRB_READ_LOG 0x08 -#define SRB_READ_SR_COUNTERS 0x16 -#define SRB_RESET_GROUP_ADDRESS 0x02 -#define SRB_RESET_TARGET_SEGMETN 0x14 -#define SRB_SAVE_CONFIGURATION 0x1b -#define SRB_SET_BRIDGE_PARMS 0x09 -#define SRB_SET_FUNC_ADDRESS 0x07 -#define SRB_SET_GROUP_ADDRESS 0x06 -#define SRB_SET_TARGET_SEGMENT 0x05 - -/* Clear return code */ -#define STREAMER_CLEAR_RET_CODE 0xfe - -/* ARB Commands */ -#define ARB_RECEIVE_DATA 0x81 -#define ARB_LAN_CHANGE_STATUS 0x84 - -/* ASB Response commands */ -#define ASB_RECEIVE_DATA 0x81 - - -/* Streamer defaults for buffers */ - -#define STREAMER_RX_RING_SIZE 16 /* should be a power of 2 */ -/* Setting the number of TX descriptors to 1 is a workaround for an - * undocumented hardware problem with the lanstreamer board. Setting - * this to something higher may slightly increase the throughput you - * can get from the card, but at the risk of locking up the box. - - * <yoder1@us.ibm.com> - */ -#define STREAMER_TX_RING_SIZE 1 /* should be a power of 2 */ - -#define PKT_BUF_SZ 4096 /* Default packet size */ - -/* Streamer data structures */ - -struct streamer_tx_desc { - __u32 forward; - __u32 status; - __u32 bufcnt_framelen; - __u32 buffer; - __u32 buflen; - __u32 rsvd1; - __u32 rsvd2; - __u32 rsvd3; -}; - -struct streamer_rx_desc { - __u32 forward; - __u32 status; - __u32 buffer; - __u32 framelen_buflen; -}; - -struct mac_receive_buffer { - __u16 next; - __u8 padding; - __u8 frame_status; - __u16 buffer_length; - __u8 frame_data; -}; - -struct streamer_private { - - __u16 srb; - __u16 trb; - __u16 arb; - __u16 asb; - - struct streamer_private *next; - struct pci_dev *pci_dev; - __u8 __iomem *streamer_mmio; - char *streamer_card_name; - - spinlock_t streamer_lock; - - volatile int srb_queued; /* True if an SRB is still posted */ - wait_queue_head_t srb_wait; - - volatile int asb_queued; /* True if an ASB is posted */ - - volatile int trb_queued; /* True if a TRB is posted */ - wait_queue_head_t trb_wait; - - struct streamer_rx_desc *streamer_rx_ring; - struct streamer_tx_desc *streamer_tx_ring; - struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE], - *rx_ring_skb[STREAMER_RX_RING_SIZE]; - int tx_ring_free, tx_ring_last_status, rx_ring_last_received, - free_tx_ring_entries; - - __u16 streamer_lan_status; - __u8 streamer_ring_speed; - __u16 pkt_buf_sz; - __u8 streamer_receive_options, streamer_copy_all_options, - streamer_message_level; - __u16 streamer_addr_table_addr, streamer_parms_addr; - __u16 mac_rx_buffer; - __u8 streamer_laa[6]; -}; - -struct streamer_adapter_addr_table { - - __u8 node_addr[6]; - __u8 reserved[4]; - __u8 func_addr[4]; -}; - -struct streamer_parameters_table { - - __u8 phys_addr[4]; - __u8 up_node_addr[6]; - __u8 up_phys_addr[4]; - __u8 poll_addr[6]; - __u16 reserved; - __u16 acc_priority; - __u16 auth_source_class; - __u16 att_code; - __u8 source_addr[6]; - __u16 beacon_type; - __u16 major_vector; - __u16 lan_status; - __u16 soft_error_time; - __u16 reserved1; - __u16 local_ring; - __u16 mon_error; - __u16 beacon_transmit; - __u16 beacon_receive; - __u16 frame_correl; - __u8 beacon_naun[6]; - __u32 reserved2; - __u8 beacon_phys[4]; -}; diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c deleted file mode 100644 index 28adcdf3b14..00000000000 --- a/drivers/net/tokenring/madgemc.c +++ /dev/null @@ -1,761 +0,0 @@ -/* - * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card. - * - * Written 2000 by Adam Fritzler - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver module supports the following cards: - * - Madge Smart 16/4 Ringnode MC16 - * - Madge Smart 16/4 Ringnode MC32 (??) - * - * Maintainer(s): - * AF Adam Fritzler - * - * Modification History: - * 16-Jan-00 AF Created - * - */ -static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; - -#include <linux/module.h> -#include <linux/mca.h> -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#include "tms380tr.h" -#include "madgemc.h" /* Madge-specific constants */ - -#define MADGEMC_IO_EXTENT 32 -#define MADGEMC_SIF_OFFSET 0x08 - -struct card_info { - /* - * These are read from the BIA ROM. - */ - unsigned int manid; - unsigned int cardtype; - unsigned int cardrev; - unsigned int ramsize; - - /* - * These are read from the MCA POS registers. - */ - unsigned int burstmode:2; - unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */ - unsigned int arblevel:4; - unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */ - unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */ -}; - -static int madgemc_open(struct net_device *dev); -static int madgemc_close(struct net_device *dev); -static int madgemc_chipset_init(struct net_device *dev); -static void madgemc_read_rom(struct net_device *dev, struct card_info *card); -static unsigned short madgemc_setnselout_pins(struct net_device *dev); -static void madgemc_setcabletype(struct net_device *dev, int type); - -static int madgemc_mcaproc(char *buf, int slot, void *d); - -static void madgemc_setregpage(struct net_device *dev, int page); -static void madgemc_setsifsel(struct net_device *dev, int val); -static void madgemc_setint(struct net_device *dev, int val); - -static irqreturn_t madgemc_interrupt(int irq, void *dev_id); - -/* - * These work around paging, however they don't guarantee you're on the - * right page. - */ -#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) -#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) -#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8))) -#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8))) - -/* - * Read a byte-length value from the register. - */ -static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg) -{ - unsigned short ret; - if (reg<0x8) - ret = SIFREADB(reg); - else { - madgemc_setregpage(dev, 1); - ret = SIFREADB(reg); - madgemc_setregpage(dev, 0); - } - return ret; -} - -/* - * Write a byte-length value to a register. - */ -static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) -{ - if (reg<0x8) - SIFWRITEB(val, reg); - else { - madgemc_setregpage(dev, 1); - SIFWRITEB(val, reg); - madgemc_setregpage(dev, 0); - } -} - -/* - * Read a word-length value from a register - */ -static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg) -{ - unsigned short ret; - if (reg<0x8) - ret = SIFREADW(reg); - else { - madgemc_setregpage(dev, 1); - ret = SIFREADW(reg); - madgemc_setregpage(dev, 0); - } - return ret; -} - -/* - * Write a word-length value to a register. - */ -static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) -{ - if (reg<0x8) - SIFWRITEW(val, reg); - else { - madgemc_setregpage(dev, 1); - SIFWRITEW(val, reg); - madgemc_setregpage(dev, 0); - } -} - -static struct net_device_ops madgemc_netdev_ops __read_mostly; - -static int __devinit madgemc_probe(struct device *device) -{ - static int versionprinted; - struct net_device *dev; - struct net_local *tp; - struct card_info *card; - struct mca_device *mdev = to_mca_device(device); - int ret = 0; - - if (versionprinted++ == 0) - printk("%s", version); - - if(mca_device_claimed(mdev)) - return -EBUSY; - mca_device_set_claim(mdev, 1); - - dev = alloc_trdev(sizeof(struct net_local)); - if (!dev) { - printk("madgemc: unable to allocate dev space\n"); - mca_device_set_claim(mdev, 0); - ret = -ENOMEM; - goto getout; - } - - dev->netdev_ops = &madgemc_netdev_ops; - - card = kmalloc(sizeof(struct card_info), GFP_KERNEL); - if (card==NULL) { - ret = -ENOMEM; - goto getout1; - } - - /* - * Parse configuration information. This all comes - * directly from the publicly available @002d.ADF. - * Get it from Madge or your local ADF library. - */ - - /* - * Base address - */ - dev->base_addr = 0x0a20 + - ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) + - ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) + - ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0); - - /* - * Interrupt line - */ - switch(mdev->pos[0] >> 6) { /* upper two bits */ - case 0x1: dev->irq = 3; break; - case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */ - case 0x3: dev->irq = 10; break; - default: dev->irq = 0; break; - } - - if (dev->irq == 0) { - printk("%s: invalid IRQ\n", dev->name); - ret = -EBUSY; - goto getout2; - } - - if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, - "madgemc")) { - printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr); - dev->base_addr += MADGEMC_SIF_OFFSET; - ret = -EBUSY; - goto getout2; - } - dev->base_addr += MADGEMC_SIF_OFFSET; - - /* - * Arbitration Level - */ - card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8; - - /* - * Burst mode and Fairness - */ - card->burstmode = ((mdev->pos[2] >> 6) & 0x3); - card->fairness = ((mdev->pos[2] >> 4) & 0x1); - - /* - * Ring Speed - */ - if ((mdev->pos[1] >> 2)&0x1) - card->ringspeed = 2; /* not selected */ - else if ((mdev->pos[2] >> 5) & 0x1) - card->ringspeed = 1; /* 16Mb */ - else - card->ringspeed = 0; /* 4Mb */ - - /* - * Cable type - */ - if ((mdev->pos[1] >> 6)&0x1) - card->cabletype = 1; /* STP/DB9 */ - else - card->cabletype = 0; /* UTP/RJ-45 */ - - - /* - * ROM Info. This requires us to actually twiddle - * bits on the card, so we must ensure above that - * the base address is free of conflict (request_region above). - */ - madgemc_read_rom(dev, card); - - if (card->manid != 0x4d) { /* something went wrong */ - printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid); - goto getout3; - } - - if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) { - printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype); - ret = -EIO; - goto getout3; - } - - /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */ - if ((card->cardtype == 0x08) && (card->cardrev <= 0x01)) - card->ramsize = 128; - else - card->ramsize = 256; - - printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", - dev->name, - (card->cardtype == 0x08)?MADGEMC16_CARDNAME: - MADGEMC32_CARDNAME, card->cardrev, - dev->base_addr, dev->irq); - - if (card->cardtype == 0x0d) - printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name); - - if (card->ringspeed==2) { /* Unknown */ - printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name); - card->ringspeed = 1; /* default to 16mb */ - } - - printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize); - - printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name, - (card->ringspeed)?16:4, - card->cabletype?"STP/DB9":"UTP/RJ-45"); - printk("%s: Arbitration Level: %d\n", dev->name, - card->arblevel); - - printk("%s: Burst Mode: ", dev->name); - switch(card->burstmode) { - case 0: printk("Cycle steal"); break; - case 1: printk("Limited burst"); break; - case 2: printk("Delayed release"); break; - case 3: printk("Immediate release"); break; - } - printk(" (%s)\n", (card->fairness)?"Unfair":"Fair"); - - - /* - * Enable SIF before we assign the interrupt handler, - * just in case we get spurious interrupts that need - * handling. - */ - outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */ - madgemc_setsifsel(dev, 1); - if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED, - "madgemc", dev)) { - ret = -EBUSY; - goto getout3; - } - - madgemc_chipset_init(dev); /* enables interrupts! */ - madgemc_setcabletype(dev, card->cabletype); - - /* Setup MCA structures */ - mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); - mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); - - printk("%s: Ring Station Address: %pM\n", - dev->name, dev->dev_addr); - - if (tmsdev_init(dev, device)) { - printk("%s: unable to get memory for dev->priv.\n", - dev->name); - ret = -ENOMEM; - goto getout4; - } - tp = netdev_priv(dev); - - /* - * The MC16 is physically a 32bit card. However, Madge - * insists on calling it 16bit, so I'll assume here that - * they know what they're talking about. Cut off DMA - * at 16mb. - */ - tp->setnselout = madgemc_setnselout_pins; - tp->sifwriteb = madgemc_sifwriteb; - tp->sifreadb = madgemc_sifreadb; - tp->sifwritew = madgemc_sifwritew; - tp->sifreadw = madgemc_sifreadw; - tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4; - - memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); - - tp->tmspriv = card; - dev_set_drvdata(device, dev); - - if (register_netdev(dev) == 0) - return 0; - - dev_set_drvdata(device, NULL); - ret = -ENOMEM; -getout4: - free_irq(dev->irq, dev); -getout3: - release_region(dev->base_addr-MADGEMC_SIF_OFFSET, - MADGEMC_IO_EXTENT); -getout2: - kfree(card); -getout1: - free_netdev(dev); -getout: - mca_device_set_claim(mdev, 0); - return ret; -} - -/* - * Handle interrupts generated by the card - * - * The MicroChannel Madge cards need slightly more handling - * after an interrupt than other TMS380 cards do. - * - * First we must make sure it was this card that generated the - * interrupt (since interrupt sharing is allowed). Then, - * because we're using level-triggered interrupts (as is - * standard on MCA), we must toggle the interrupt line - * on the card in order to claim and acknowledge the interrupt. - * Once that is done, the interrupt should be handlable in - * the normal tms380tr_interrupt() routine. - * - * There's two ways we can check to see if the interrupt is ours, - * both with their own disadvantages... - * - * 1) Read in the SIFSTS register from the TMS controller. This - * is guaranteed to be accurate, however, there's a fairly - * large performance penalty for doing so: the Madge chips - * must request the register from the Eagle, the Eagle must - * read them from its internal bus, and then take the route - * back out again, for a 16bit read. - * - * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs. - * The major disadvantage here is that the accuracy of the - * bit is in question. However, it cuts out the extra read - * cycles it takes to read the Eagle's SIF, as its only an - * 8bit read, and theoretically the Madge bit is directly - * connected to the interrupt latch coming out of the Eagle - * hardware (that statement is not verified). - * - * I can't determine which of these methods has the best win. For now, - * we make a compromise. Use the Madge way for the first interrupt, - * which should be the fast-path, and then once we hit the first - * interrupt, keep on trying using the SIF method until we've - * exhausted all contiguous interrupts. - * - */ -static irqreturn_t madgemc_interrupt(int irq, void *dev_id) -{ - int pending,reg1; - struct net_device *dev; - - if (!dev_id) { - printk("madgemc_interrupt: was not passed a dev_id!\n"); - return IRQ_NONE; - } - - dev = dev_id; - - /* Make sure its really us. -- the Madge way */ - pending = inb(dev->base_addr + MC_CONTROL_REG0); - if (!(pending & MC_CONTROL_REG0_SINTR)) - return IRQ_NONE; /* not our interrupt */ - - /* - * Since we're level-triggered, we may miss the rising edge - * of the next interrupt while we're off handling this one, - * so keep checking until the SIF verifies that it has nothing - * left for us to do. - */ - pending = STS_SYSTEM_IRQ; - do { - if (pending & STS_SYSTEM_IRQ) { - - /* Toggle the interrupt to reset the latch on card */ - reg1 = inb(dev->base_addr + MC_CONTROL_REG1); - outb(reg1 ^ MC_CONTROL_REG1_SINTEN, - dev->base_addr + MC_CONTROL_REG1); - outb(reg1, dev->base_addr + MC_CONTROL_REG1); - - /* Continue handling as normal */ - tms380tr_interrupt(irq, dev_id); - - pending = SIFREADW(SIFSTS); /* restart - the SIF way */ - - } else - return IRQ_HANDLED; - } while (1); - - return IRQ_HANDLED; /* not reachable */ -} - -/* - * Set the card to the preferred ring speed. - * - * Unlike newer cards, the MC16/32 have their speed selection - * circuit connected to the Madge ASICs and not to the TMS380 - * NSELOUT pins. Set the ASIC bits correctly here, and return - * zero to leave the TMS NSELOUT bits unaffected. - * - */ -static unsigned short madgemc_setnselout_pins(struct net_device *dev) -{ - unsigned char reg1; - struct net_local *tp = netdev_priv(dev); - - reg1 = inb(dev->base_addr + MC_CONTROL_REG1); - - if(tp->DataRate == SPEED_16) - reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */ - else if (reg1 & MC_CONTROL_REG1_SPEED_SEL) - reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */ - outb(reg1, dev->base_addr + MC_CONTROL_REG1); - - return 0; /* no change */ -} - -/* - * Set the register page. This equates to the SRSX line - * on the TMS380Cx6. - * - * Register selection is normally done via three contiguous - * bits. However, some boards (such as the MC16/32) use only - * two bits, plus a separate bit in the glue chip. This - * sets the SRSX bit (the top bit). See page 4-17 in the - * Yellow Book for which registers are affected. - * - */ -static void madgemc_setregpage(struct net_device *dev, int page) -{ - static int reg1; - - reg1 = inb(dev->base_addr + MC_CONTROL_REG1); - if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) { - outb(reg1 ^ MC_CONTROL_REG1_SRSX, - dev->base_addr + MC_CONTROL_REG1); - } - else if (page == 1) { - outb(reg1 | MC_CONTROL_REG1_SRSX, - dev->base_addr + MC_CONTROL_REG1); - } - reg1 = inb(dev->base_addr + MC_CONTROL_REG1); -} - -/* - * The SIF registers are not mapped into register space by default - * Set this to 1 to map them, 0 to map the BIA ROM. - * - */ -static void madgemc_setsifsel(struct net_device *dev, int val) -{ - unsigned int reg0; - - reg0 = inb(dev->base_addr + MC_CONTROL_REG0); - if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) { - outb(reg0 ^ MC_CONTROL_REG0_SIFSEL, - dev->base_addr + MC_CONTROL_REG0); - } else if (val == 1) { - outb(reg0 | MC_CONTROL_REG0_SIFSEL, - dev->base_addr + MC_CONTROL_REG0); - } - reg0 = inb(dev->base_addr + MC_CONTROL_REG0); -} - -/* - * Enable SIF interrupts - * - * This does not enable interrupts in the SIF, but rather - * enables SIF interrupts to be passed onto the host. - * - */ -static void madgemc_setint(struct net_device *dev, int val) -{ - unsigned int reg1; - - reg1 = inb(dev->base_addr + MC_CONTROL_REG1); - if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) { - outb(reg1 ^ MC_CONTROL_REG1_SINTEN, - dev->base_addr + MC_CONTROL_REG1); - } else if (val == 1) { - outb(reg1 | MC_CONTROL_REG1_SINTEN, - dev->base_addr + MC_CONTROL_REG1); - } -} - -/* - * Cable type is set via control register 7. Bit zero high - * for UTP, low for STP. - */ -static void madgemc_setcabletype(struct net_device *dev, int type) -{ - outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP, - dev->base_addr + MC_CONTROL_REG7); -} - -/* - * Enable the functions of the Madge chipset needed for - * full working order. - */ -static int madgemc_chipset_init(struct net_device *dev) -{ - outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */ - tms380tr_wait(100); /* wait for card to reset */ - - /* bring back into normal operating mode */ - outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1); - - /* map SIF registers */ - madgemc_setsifsel(dev, 1); - - /* enable SIF interrupts */ - madgemc_setint(dev, 1); - - return 0; -} - -/* - * Disable the board, and put back into power-up state. - */ -static void madgemc_chipset_close(struct net_device *dev) -{ - /* disable interrupts */ - madgemc_setint(dev, 0); - /* unmap SIF registers */ - madgemc_setsifsel(dev, 0); -} - -/* - * Read the card type (MC16 or MC32) from the card. - * - * The configuration registers are stored in two separate - * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE) - * for page zero, or setting bit 3 for page one. - * - * Page zero contains the following data: - * Byte 0: Manufacturer ID (0x4D -- ASCII "M") - * Byte 1: Card type: - * 0x08 for MC16 - * 0x0D for MC32 - * Byte 2: Card revision - * Byte 3: Mirror of POS config register 0 - * Byte 4: Mirror of POS 1 - * Byte 5: Mirror of POS 2 - * - * Page one contains the following data: - * Byte 0: Unused - * Byte 1-6: BIA, MSB to LSB. - * - * Note that to read the BIA, we must unmap the SIF registers - * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data - * will reside in the same logical location. For this reason, - * _never_ read the BIA while the Eagle processor is running! - * The SIF will be completely inaccessible until the BIA operation - * is complete. - * - */ -static void madgemc_read_rom(struct net_device *dev, struct card_info *card) -{ - unsigned long ioaddr; - unsigned char reg0, reg1, tmpreg0, i; - - ioaddr = dev->base_addr; - - reg0 = inb(ioaddr + MC_CONTROL_REG0); - reg1 = inb(ioaddr + MC_CONTROL_REG1); - - /* Switch to page zero and unmap SIF */ - tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL); - outb(tmpreg0, ioaddr + MC_CONTROL_REG0); - - card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID); - card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID); - card->cardrev = inb(ioaddr + MC_ROM_REVISION); - - /* Switch to rom page one */ - outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0); - - /* Read BIA */ - dev->addr_len = 6; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i); - - /* Restore original register values */ - outb(reg0, ioaddr + MC_CONTROL_REG0); - outb(reg1, ioaddr + MC_CONTROL_REG1); -} - -static int madgemc_open(struct net_device *dev) -{ - /* - * Go ahead and reinitialize the chipset again, just to - * make sure we didn't get left in a bad state. - */ - madgemc_chipset_init(dev); - tms380tr_open(dev); - return 0; -} - -static int madgemc_close(struct net_device *dev) -{ - tms380tr_close(dev); - madgemc_chipset_close(dev); - return 0; -} - -/* - * Give some details available from /proc/mca/slotX - */ -static int madgemc_mcaproc(char *buf, int slot, void *d) -{ - struct net_device *dev = (struct net_device *)d; - struct net_local *tp = netdev_priv(dev); - struct card_info *curcard = tp->tmspriv; - int len = 0; - - len += sprintf(buf+len, "-------\n"); - if (curcard) { - len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev); - len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize); - len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45"); - len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4); - len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4); - len += sprintf(buf+len, "Device: %s\n", dev->name); - len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr); - len += sprintf(buf+len, "IRQ: %d\n", dev->irq); - len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel); - len += sprintf(buf+len, "Burst Mode: "); - switch(curcard->burstmode) { - case 0: len += sprintf(buf+len, "Cycle steal"); break; - case 1: len += sprintf(buf+len, "Limited burst"); break; - case 2: len += sprintf(buf+len, "Delayed release"); break; - case 3: len += sprintf(buf+len, "Immediate release"); break; - } - len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); - - len += sprintf(buf+len, "Ring Station Address: %pM\n", - dev->dev_addr); - } else - len += sprintf(buf+len, "Card not configured\n"); - - return len; -} - -static int __devexit madgemc_remove(struct device *device) -{ - struct net_device *dev = dev_get_drvdata(device); - struct net_local *tp; - struct card_info *card; - - BUG_ON(!dev); - - tp = netdev_priv(dev); - card = tp->tmspriv; - kfree(card); - tp->tmspriv = NULL; - - unregister_netdev(dev); - release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); - free_irq(dev->irq, dev); - tmsdev_term(dev); - free_netdev(dev); - dev_set_drvdata(device, NULL); - - return 0; -} - -static short madgemc_adapter_ids[] __initdata = { - 0x002d, - 0x0000 -}; - -static struct mca_driver madgemc_driver = { - .id_table = madgemc_adapter_ids, - .driver = { - .name = "madgemc", - .bus = &mca_bus_type, - .probe = madgemc_probe, - .remove = __devexit_p(madgemc_remove), - }, -}; - -static int __init madgemc_init (void) -{ - madgemc_netdev_ops = tms380tr_netdev_ops; - madgemc_netdev_ops.ndo_open = madgemc_open; - madgemc_netdev_ops.ndo_stop = madgemc_close; - - return mca_register_driver (&madgemc_driver); -} - -static void __exit madgemc_exit (void) -{ - mca_unregister_driver (&madgemc_driver); -} - -module_init(madgemc_init); -module_exit(madgemc_exit); - -MODULE_LICENSE("GPL"); - diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h deleted file mode 100644 index fe88e272c53..00000000000 --- a/drivers/net/tokenring/madgemc.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * madgemc.h: Header for the madgemc tms380tr module - * - * Authors: - * - Adam Fritzler - */ - -#ifndef __LINUX_MADGEMC_H -#define __LINUX_MADGEMC_H - -#ifdef __KERNEL__ - -#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode" -#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode" - -/* - * Bit definitions for the POS config registers - */ -#define MC16_POS0_ADDR1 0x20 -#define MC16_POS2_ADDR2 0x04 -#define MC16_POS3_ADDR3 0x20 - -#define MC_CONTROL_REG0 ((long)-8) /* 0x00 */ -#define MC_CONTROL_REG1 ((long)-7) /* 0x01 */ -#define MC_ADAPTER_POS_REG0 ((long)-6) /* 0x02 */ -#define MC_ADAPTER_POS_REG1 ((long)-5) /* 0x03 */ -#define MC_ADAPTER_POS_REG2 ((long)-4) /* 0x04 */ -#define MC_ADAPTER_REG5_UNUSED ((long)-3) /* 0x05 */ -#define MC_ADAPTER_REG6_UNUSED ((long)-2) /* 0x06 */ -#define MC_CONTROL_REG7 ((long)-1) /* 0x07 */ - -#define MC_CONTROL_REG0_UNKNOWN1 0x01 -#define MC_CONTROL_REG0_UNKNOWN2 0x02 -#define MC_CONTROL_REG0_SIFSEL 0x04 -#define MC_CONTROL_REG0_PAGE 0x08 -#define MC_CONTROL_REG0_TESTINTERRUPT 0x10 -#define MC_CONTROL_REG0_UNKNOWN20 0x20 -#define MC_CONTROL_REG0_SINTR 0x40 -#define MC_CONTROL_REG0_UNKNOWN80 0x80 - -#define MC_CONTROL_REG1_SINTEN 0x01 -#define MC_CONTROL_REG1_BITOFDEATH 0x02 -#define MC_CONTROL_REG1_NSRESET 0x04 -#define MC_CONTROL_REG1_UNKNOWN8 0x08 -#define MC_CONTROL_REG1_UNKNOWN10 0x10 -#define MC_CONTROL_REG1_UNKNOWN20 0x20 -#define MC_CONTROL_REG1_SRSX 0x40 -#define MC_CONTROL_REG1_SPEED_SEL 0x80 - -#define MC_CONTROL_REG7_CABLESTP 0x00 -#define MC_CONTROL_REG7_CABLEUTP 0x01 - -/* - * ROM Page Zero - */ -#define MC_ROM_MANUFACTURERID 0x00 -#define MC_ROM_ADAPTERID 0x01 -#define MC_ROM_REVISION 0x02 -#define MC_ROM_CONFIG0 0x03 -#define MC_ROM_CONFIG1 0x04 -#define MC_ROM_CONFIG2 0x05 - -/* - * ROM Page One - */ -#define MC_ROM_UNUSED_BYTE 0x00 -#define MC_ROM_BIA_START 0x01 - -#endif /* __KERNEL__ */ -#endif /* __LINUX_MADGEMC_H */ diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c deleted file mode 100644 index 0e234741cc7..00000000000 --- a/drivers/net/tokenring/olympic.c +++ /dev/null @@ -1,1749 +0,0 @@ -/* - * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved - * 1999/2000 Mike Phillips (mikep@linuxtr.net) - * - * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic - * chipset. - * - * Base Driver Skeleton: - * Written 1993-94 by Donald Becker. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their - * assistance and perserverance with the testing of this driver. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * 4/27/99 - Alpha Release 0.1.0 - * First release to the public - * - * 6/8/99 - Official Release 0.2.0 - * Merged into the kernel code - * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci - * resource. Driver also reports the card name returned by - * the pci resource. - * 1/11/00 - Added spinlocks for smp - * 2/23/00 - Updated to dev_kfree_irq - * 3/10/00 - Fixed FDX enable which triggered other bugs also - * squashed. - * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes. - * The odd thing about the changes is that the fix for - * endian issues with the big-endian data in the arb, asb... - * was to always swab() the bytes, no matter what CPU. - * That's because the read[wl]() functions always swap the - * bytes on the way in on PPC. - * Fixing the hardware descriptors was another matter, - * because they weren't going through read[wl](), there all - * the results had to be in memory in le32 values. kdaaker - * - * 12/23/00 - Added minimal Cardbus support (Thanks Donald). - * - * 03/09/01 - Add new pci api, dev_base_lock, general clean up. - * - * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev - * Change proc_fs behaviour, now one entry per adapter. - * - * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the - * adapter when live does not take the system down with it. - * - * 06/02/01 - Clean up, copy skb for small packets - * - * 06/22/01 - Add EISR error handling routines - * - * 07/19/01 - Improve bad LAA reporting, strip out freemem - * into a separate function, its called from 3 - * different places now. - * 02/09/02 - Replaced sleep_on. - * 03/01/02 - Replace access to several registers from 32 bit to - * 16 bit. Fixes alignment errors on PPC 64 bit machines. - * Thanks to Al Trautman for this one. - * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was - * silently ignored until the error checking code - * went into version 1.0.0 - * 06/04/02 - Add correct start up sequence for the cardbus adapters. - * Required for strict compliance with pci power mgmt specs. - * To Do: - * - * Wake on lan - * - * If Problems do Occur - * Most problems can be rectified by either closing and opening the interface - * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult - * if compiled into the kernel). - */ - -/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */ - -#define OLYMPIC_DEBUG 0 - - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/in.h> -#include <linux/ioport.h> -#include <linux/seq_file.h> -#include <linux/string.h> -#include <linux/proc_fs.h> -#include <linux/ptrace.h> -#include <linux/skbuff.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/stddef.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/spinlock.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <net/checksum.h> -#include <net/net_namespace.h> - -#include <asm/io.h> - -#include "olympic.h" - -/* I've got to put some intelligence into the version number so that Peter and I know - * which version of the code somebody has got. - * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author. - * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike - * - * Official releases will only have an a.b.c version number format. - */ - -static char version[] = -"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ; - -static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion", - "Address Verification", "Neighbor Notification (Ring Poll)", - "Request Parameters","FDX Registration Request", - "FDX Duplicate Address Check", "Station registration Query Wait", - "Unknown stage"}; - -static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault", - "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing", - "Duplicate Node Address","Request Parameters","Remove Received", - "Reserved", "Reserved", "No Monitor Detected for RPL", - "Monitor Contention failer for RPL", "FDX Protocol Error"}; - -/* Module parameters */ - -MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; -MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; - -/* Ring Speed 0,4,16,100 - * 0 = Autosense - * 4,16 = Selected speed only, no autosense - * This allows the card to be the first on the ring - * and become the active monitor. - * 100 = Nothing at present, 100mbps is autodetected - * if FDX is turned on. May be implemented in the future to - * fail if 100mpbs is not detected. - * - * WARNING: Some hubs will allow you to insert - * at the wrong speed - */ - -static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ; -module_param_array(ringspeed, int, NULL, 0); - -/* Packet buffer size */ - -static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ; -module_param_array(pkt_buf_sz, int, NULL, 0) ; - -/* Message Level */ - -static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ; -module_param_array(message_level, int, NULL, 0) ; - -/* Change network_monitor to receive mac frames through the arb channel. - * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr - * device, i.e. tr0, tr1 etc. - * Intended to be used to create a ring-error reporting network module - * i.e. it will give you the source address of beaconers on the ring - */ -static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,}; -module_param_array(network_monitor, int, NULL, 0); - -static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = { - {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,}, - { } /* Terminating Entry */ -}; -MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ; - - -static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -static int olympic_init(struct net_device *dev); -static int olympic_open(struct net_device *dev); -static netdev_tx_t olympic_xmit(struct sk_buff *skb, - struct net_device *dev); -static int olympic_close(struct net_device *dev); -static void olympic_set_rx_mode(struct net_device *dev); -static void olympic_freemem(struct net_device *dev) ; -static irqreturn_t olympic_interrupt(int irq, void *dev_id); -static int olympic_set_mac_address(struct net_device *dev, void *addr) ; -static void olympic_arb_cmd(struct net_device *dev); -static int olympic_change_mtu(struct net_device *dev, int mtu); -static void olympic_srb_bh(struct net_device *dev) ; -static void olympic_asb_bh(struct net_device *dev) ; -static const struct file_operations olympic_proc_ops; - -static const struct net_device_ops olympic_netdev_ops = { - .ndo_open = olympic_open, - .ndo_stop = olympic_close, - .ndo_start_xmit = olympic_xmit, - .ndo_change_mtu = olympic_change_mtu, - .ndo_set_rx_mode = olympic_set_rx_mode, - .ndo_set_mac_address = olympic_set_mac_address, -}; - -static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *dev ; - struct olympic_private *olympic_priv; - static int card_no = -1 ; - int i ; - - card_no++ ; - - if ((i = pci_enable_device(pdev))) { - return i ; - } - - pci_set_master(pdev); - - if ((i = pci_request_regions(pdev,"olympic"))) { - goto op_disable_dev; - } - - dev = alloc_trdev(sizeof(struct olympic_private)) ; - if (!dev) { - i = -ENOMEM; - goto op_release_dev; - } - - olympic_priv = netdev_priv(dev) ; - - spin_lock_init(&olympic_priv->olympic_lock) ; - - init_waitqueue_head(&olympic_priv->srb_wait); - init_waitqueue_head(&olympic_priv->trb_wait); -#if OLYMPIC_DEBUG - printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev)); -#endif - dev->irq=pdev->irq; - dev->base_addr=pci_resource_start(pdev, 0); - olympic_priv->olympic_card_name = pci_name(pdev); - olympic_priv->pdev = pdev; - olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256); - olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048); - if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) { - goto op_free_iomap; - } - - if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) ) - olympic_priv->pkt_buf_sz = PKT_BUF_SZ ; - else - olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; - - dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ; - olympic_priv->olympic_ring_speed = ringspeed[card_no] ; - olympic_priv->olympic_message_level = message_level[card_no] ; - olympic_priv->olympic_network_monitor = network_monitor[card_no]; - - if ((i = olympic_init(dev))) { - goto op_free_iomap; - } - - dev->netdev_ops = &olympic_netdev_ops; - SET_NETDEV_DEV(dev, &pdev->dev); - - pci_set_drvdata(pdev,dev) ; - register_netdev(dev) ; - printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name); - if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */ - char proc_name[20] ; - strcpy(proc_name,"olympic_") ; - strcat(proc_name,dev->name) ; - proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev); - printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); - } - return 0 ; - -op_free_iomap: - if (olympic_priv->olympic_mmio) - iounmap(olympic_priv->olympic_mmio); - if (olympic_priv->olympic_lap) - iounmap(olympic_priv->olympic_lap); - - free_netdev(dev); -op_release_dev: - pci_release_regions(pdev); - -op_disable_dev: - pci_disable_device(pdev); - return i; -} - -static int olympic_init(struct net_device *dev) -{ - struct olympic_private *olympic_priv; - u8 __iomem *olympic_mmio, *init_srb,*adapter_addr; - unsigned long t; - unsigned int uaa_addr; - - olympic_priv=netdev_priv(dev); - olympic_mmio=olympic_priv->olympic_mmio; - - printk("%s\n", version); - printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq); - - writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL); - t=jiffies; - while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) { - schedule(); - if(time_after(jiffies, t + 40*HZ)) { - printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); - return -ENODEV; - } - } - - - /* Needed for cardbus */ - if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) { - writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK); - } - -#if OLYMPIC_DEBUG - printk("BCTL: %x\n",readl(olympic_mmio+BCTL)); - printk("GPR: %x\n",readw(olympic_mmio+GPR)); - printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK)); -#endif - /* Aaaahhh, You have got to be real careful setting GPR, the card - holds the previous values from flash memory, including autosense - and ring speed */ - - writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL); - - if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */ - writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR); - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name); - } else if (olympic_priv->olympic_ring_speed == 16) { - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name); - writew(GPR_16MBPS, olympic_mmio+GPR); - } else if (olympic_priv->olympic_ring_speed == 4) { - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ; - writew(0, olympic_mmio+GPR); - } - - writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR); - -#if OLYMPIC_DEBUG - printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ; -#endif - /* Solo has been paused to meet the Cardbus power - * specs if the adapter is cardbus. Check to - * see its been paused and then restart solo. The - * adapter should set the pause bit within 1 second. - */ - - if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) { - t=jiffies; - while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) { - schedule() ; - if(time_after(jiffies, t + 2*HZ)) { - printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; - return -ENODEV; - } - } - writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ; - } - - /* start solo init */ - writel((1<<15),olympic_mmio+SISR_MASK_SUM); - - t=jiffies; - while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) { - schedule(); - if(time_after(jiffies, t + 15*HZ)) { - printk(KERN_ERR "IBM PCI tokenring card not responding.\n"); - return -ENODEV; - } - } - - writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA); - -#if OLYMPIC_DEBUG - printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); -#endif - - init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800)); - -#if OLYMPIC_DEBUG -{ - int i; - printk("init_srb(%p): ",init_srb); - for(i=0;i<20;i++) - printk("%x ",readb(init_srb+i)); - printk("\n"); -} -#endif - if(readw(init_srb+6)) { - printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6)); - return -ENODEV; - } - - if (olympic_priv->olympic_message_level) { - if ( readb(init_srb +2) & 0x40) { - printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ; - } else { - printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n"); - } - } - - uaa_addr=swab16(readw(init_srb+8)); - -#if OLYMPIC_DEBUG - printk("UAA resides at %x\n",uaa_addr); -#endif - - writel(uaa_addr,olympic_mmio+LAPA); - adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800)); - - memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); - -#if OLYMPIC_DEBUG - printk("adapter address: %pM\n", dev->dev_addr); -#endif - - olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); - olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14)); - - return 0; - -} - -static int olympic_open(struct net_device *dev) -{ - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; - unsigned long flags, t; - int i, open_finished = 1 ; - u8 resp, err; - - DECLARE_WAITQUEUE(wait,current) ; - - olympic_init(dev); - - if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic", - dev)) - return -EAGAIN; - -#if OLYMPIC_DEBUG - printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); - printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR)); -#endif - - writel(SISR_MI,olympic_mmio+SISR_MASK_SUM); - - writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */ - - writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */ - - /* adapter is closed, so SRB is pointed to by LAPWWO */ - - writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA); - init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800)); - -#if OLYMPIC_DEBUG - printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); - printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK)); - printk("Before the open command\n"); -#endif - do { - memset_io(init_srb,0,SRB_COMMAND_SIZE); - - writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */ - writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2); - - /* If Network Monitor, instruct card to copy MAC frames through the ARB */ - if (olympic_priv->olympic_network_monitor) - writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8); - else - writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8); - - /* Test OR of first 3 bytes as its totally possible for - * someone to set the first 2 bytes to be zero, although this - * is an error, the first byte must have bit 6 set to 1 */ - - if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) { - writeb(olympic_priv->olympic_laa[0],init_srb+12); - writeb(olympic_priv->olympic_laa[1],init_srb+13); - writeb(olympic_priv->olympic_laa[2],init_srb+14); - writeb(olympic_priv->olympic_laa[3],init_srb+15); - writeb(olympic_priv->olympic_laa[4],init_srb+16); - writeb(olympic_priv->olympic_laa[5],init_srb+17); - memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ; - } - writeb(1,init_srb+30); - - spin_lock_irqsave(&olympic_priv->olympic_lock,flags); - olympic_priv->srb_queued=1; - - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); - - t = jiffies ; - - add_wait_queue(&olympic_priv->srb_wait,&wait) ; - set_current_state(TASK_INTERRUPTIBLE) ; - - while(olympic_priv->srb_queued) { - schedule() ; - if(signal_pending(current)) { - printk(KERN_WARNING "%s: Signal received in open.\n", - dev->name); - printk(KERN_WARNING "SISR=%x LISR=%x\n", - readl(olympic_mmio+SISR), - readl(olympic_mmio+LISR)); - olympic_priv->srb_queued=0; - break; - } - if (time_after(jiffies, t + 10*HZ)) { - printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); - olympic_priv->srb_queued=0; - break ; - } - set_current_state(TASK_INTERRUPTIBLE) ; - } - remove_wait_queue(&olympic_priv->srb_wait,&wait) ; - set_current_state(TASK_RUNNING) ; - olympic_priv->srb_queued = 0 ; -#if OLYMPIC_DEBUG - printk("init_srb(%p): ",init_srb); - for(i=0;i<20;i++) - printk("%02x ",readb(init_srb+i)); - printk("\n"); -#endif - - /* If we get the same return response as we set, the interrupt wasn't raised and the open - * timed out. - */ - - switch (resp = readb(init_srb+2)) { - case OLYMPIC_CLEAR_RET_CODE: - printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ; - goto out; - case 0: - open_finished = 1; - break; - case 0x07: - if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */ - printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name); - open_finished = 0 ; - continue; - } - - err = readb(init_srb+7); - - if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { - printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name); - printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name); - } else { - printk(KERN_WARNING "%s: %s - %s\n", dev->name, - open_maj_error[(err & 0xf0) >> 4], - open_min_error[(err & 0x0f)]); - } - goto out; - - case 0x32: - printk(KERN_WARNING "%s: Invalid LAA: %pM\n", - dev->name, olympic_priv->olympic_laa); - goto out; - - default: - printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp); - goto out; - - } - } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */ - - if (readb(init_srb+18) & (1<<3)) - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name); - - if (readb(init_srb+18) & (1<<1)) - olympic_priv->olympic_ring_speed = 100 ; - else if (readb(init_srb+18) & 1) - olympic_priv->olympic_ring_speed = 16 ; - else - olympic_priv->olympic_ring_speed = 4 ; - - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed); - - olympic_priv->asb = swab16(readw(init_srb+8)); - olympic_priv->srb = swab16(readw(init_srb+10)); - olympic_priv->arb = swab16(readw(init_srb+12)); - olympic_priv->trb = swab16(readw(init_srb+16)); - - olympic_priv->olympic_receive_options = 0x01 ; - olympic_priv->olympic_copy_all_options = 0 ; - - /* setup rx ring */ - - writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */ - - writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */ - - for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) { - - struct sk_buff *skb; - - skb=dev_alloc_skb(olympic_priv->pkt_buf_sz); - if(skb == NULL) - break; - - skb->dev = dev; - - olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev, - skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ; - olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz); - olympic_priv->rx_ring_skb[i]=skb; - } - - if (i==0) { - printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name); - goto out; - } - - olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring, - sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE); - writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ); - writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA); - writew(i, olympic_mmio+RXDESCQCNT); - - olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring, - sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE); - writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ); - writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA); - - olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */ - olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1; - - writew(i, olympic_mmio+RXSTATQCNT); - -#if OLYMPIC_DEBUG - printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ)); - printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]); - printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) ); - printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) ); - printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) ); - - printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]); - printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n", - olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ; -#endif - - writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ); - -#if OLYMPIC_DEBUG - printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ)); - printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]); - printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]); -#endif - - writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM); - - /* setup tx ring */ - - writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */ - for(i=0;i<OLYMPIC_TX_RING_SIZE;i++) - olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef); - - olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE; - olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring, - sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ; - writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1); - writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1); - writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1); - - olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring, - sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE); - writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1); - writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1); - writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1); - - olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */ - olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */ - - writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */ - writel(0,olympic_mmio+EISR) ; - writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */ - writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM); - -#if OLYMPIC_DEBUG - printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); - printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK)); -#endif - - if (olympic_priv->olympic_network_monitor) { - u8 __iomem *oat; - u8 __iomem *opt; - u8 addr[6]; - oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr); - opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr); - - for (i = 0; i < 6; i++) - addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i); - printk("%s: Node Address: %pM\n", dev->name, addr); - printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); - - for (i = 0; i < 6; i++) - addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i); - printk("%s: NAUN Address: %pM\n", dev->name, addr); - } - - netif_start_queue(dev); - return 0; - -out: - free_irq(dev->irq, dev); - return -EIO; -} - -/* - * When we enter the rx routine we do not know how many frames have been - * queued on the rx channel. Therefore we start at the next rx status - * position and travel around the receive ring until we have completed - * all the frames. - * - * This means that we may process the frame before we receive the end - * of frame interrupt. This is why we always test the status instead - * of blindly processing the next frame. - * - * We also remove the last 4 bytes from the packet as well, these are - * just token ring trailer info and upset protocols that don't check - * their own length, i.e. SNA. - * - */ -static void olympic_rx(struct net_device *dev) -{ - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; - struct olympic_rx_status *rx_status; - struct olympic_rx_desc *rx_desc ; - int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len; - struct sk_buff *skb, *skb2; - int i; - - rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ; - - while (rx_status->status_buffercnt) { - u32 l_status_buffercnt; - - olympic_priv->rx_status_last_received++ ; - olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1); -#if OLYMPIC_DEBUG - printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); -#endif - length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; - buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; - i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */ - frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16; - -#if OLYMPIC_DEBUG - printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt); -#endif - l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt); - if(l_status_buffercnt & 0xC0000000) { - if (l_status_buffercnt & 0x3B000000) { - if (olympic_priv->olympic_message_level) { - if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ - printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name); - if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ - printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name); - if (l_status_buffercnt & (1<<27)) /* No receive buffers */ - printk(KERN_WARNING "%s: No receive buffers\n",dev->name); - if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ - printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name); - if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ - printk(KERN_WARNING "%s: Received Error Detect\n",dev->name); - } - olympic_priv->rx_ring_last_received += i ; - olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; - dev->stats.rx_errors++; - } else { - - if (buffer_cnt == 1) { - skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ; - } else { - skb = dev_alloc_skb(length) ; - } - - if (skb == NULL) { - printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ; - dev->stats.rx_dropped++; - /* Update counters even though we don't transfer the frame */ - olympic_priv->rx_ring_last_received += i ; - olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; - } else { - /* Optimise based upon number of buffers used. - If only one buffer is used we can simply swap the buffers around. - If more than one then we must use the new buffer and copy the information - first. Ideally all frames would be in a single buffer, this can be tuned by - altering the buffer size. If the length of the packet is less than - 1500 bytes we're going to copy it over anyway to stop packets getting - dropped from sockets with buffers smaller than our pkt_buf_sz. */ - - if (buffer_cnt==1) { - olympic_priv->rx_ring_last_received++ ; - olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); - rx_ring_last_received = olympic_priv->rx_ring_last_received ; - if (length > 1500) { - skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ; - /* unmap buffer */ - pci_unmap_single(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), - olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - skb_put(skb2,length-4); - skb2->protocol = tr_type_trans(skb2,dev); - olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer = - cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, - olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); - olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length = - cpu_to_le32(olympic_priv->pkt_buf_sz); - olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ; - netif_rx(skb2) ; - } else { - pci_dma_sync_single_for_cpu(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), - olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], - skb_put(skb,length - 4), - length - 4); - pci_dma_sync_single_for_device(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), - olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - skb->protocol = tr_type_trans(skb,dev) ; - netif_rx(skb) ; - } - } else { - do { /* Walk the buffers */ - olympic_priv->rx_ring_last_received++ ; - olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); - rx_ring_last_received = olympic_priv->rx_ring_last_received ; - pci_dma_sync_single_for_cpu(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), - olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); - cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); - skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], - skb_put(skb, cpy_length), - cpy_length); - pci_dma_sync_single_for_device(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), - olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - } while (--i) ; - skb_trim(skb,skb->len-4) ; - skb->protocol = tr_type_trans(skb,dev); - netif_rx(skb) ; - } - dev->stats.rx_packets++ ; - dev->stats.rx_bytes += length ; - } /* if skb == null */ - } /* If status & 0x3b */ - - } else { /*if buffercnt & 0xC */ - olympic_priv->rx_ring_last_received += i ; - olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ; - } - - rx_status->fragmentcnt_framelen = 0 ; - rx_status->status_buffercnt = 0 ; - rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]); - - writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ); - } /* while */ - -} - -static void olympic_freemem(struct net_device *dev) -{ - struct olympic_private *olympic_priv=netdev_priv(dev); - int i; - - for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) { - if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) { - dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]); - olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL; - } - if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) { - pci_unmap_single(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer), - olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); - } - olympic_priv->rx_status_last_received++; - olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1; - } - /* unmap rings */ - pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, - sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE); - pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr, - sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE); - - pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, - sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE); - pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr, - sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE); - - return ; -} - -static irqreturn_t olympic_interrupt(int irq, void *dev_id) -{ - struct net_device *dev= (struct net_device *)dev_id; - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; - u32 sisr; - u8 __iomem *adapter_check_area ; - - /* - * Read sisr but don't reset it yet. - * The indication bit may have been set but the interrupt latch - * bit may not be set, so we'd lose the interrupt later. - */ - sisr=readl(olympic_mmio+SISR) ; - if (!(sisr & SISR_MI)) /* Interrupt isn't for us */ - return IRQ_NONE; - sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */ - - spin_lock(&olympic_priv->olympic_lock); - - /* Hotswap gives us this on removal */ - if (sisr == 0xffffffff) { - printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ; - spin_unlock(&olympic_priv->olympic_lock) ; - return IRQ_NONE; - } - - if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK | - SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) { - - /* If we ever get this the adapter is seriously dead. Only a reset is going to - * bring it back to life. We're talking pci bus errors and such like :( */ - if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) { - printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ; - printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ; - printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ; - printk(KERN_ERR "or the linux-tr mailing list.\n") ; - wake_up_interruptible(&olympic_priv->srb_wait); - spin_unlock(&olympic_priv->olympic_lock) ; - return IRQ_HANDLED; - } /* SISR_ERR */ - - if(sisr & SISR_SRB_REPLY) { - if(olympic_priv->srb_queued==1) { - wake_up_interruptible(&olympic_priv->srb_wait); - } else if (olympic_priv->srb_queued==2) { - olympic_srb_bh(dev) ; - } - olympic_priv->srb_queued=0; - } /* SISR_SRB_REPLY */ - - /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure - we get all tx completions. */ - if (sisr & SISR_TX1_EOF) { - while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) { - olympic_priv->tx_ring_last_status++; - olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); - olympic_priv->free_tx_ring_entries++; - dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; - dev->stats.tx_packets++ ; - pci_unmap_single(olympic_priv->pdev, - le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), - olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); - dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]); - olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef); - olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0; - } - netif_wake_queue(dev); - } /* SISR_TX1_EOF */ - - if (sisr & SISR_RX_STATUS) { - olympic_rx(dev); - } /* SISR_RX_STATUS */ - - if (sisr & SISR_ADAPTER_CHECK) { - netif_stop_queue(dev); - printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); - writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA); - adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ; - printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ; - spin_unlock(&olympic_priv->olympic_lock) ; - return IRQ_HANDLED; - } /* SISR_ADAPTER_CHECK */ - - if (sisr & SISR_ASB_FREE) { - /* Wake up anything that is waiting for the asb response */ - if (olympic_priv->asb_queued) { - olympic_asb_bh(dev) ; - } - } /* SISR_ASB_FREE */ - - if (sisr & SISR_ARB_CMD) { - olympic_arb_cmd(dev) ; - } /* SISR_ARB_CMD */ - - if (sisr & SISR_TRB_REPLY) { - /* Wake up anything that is waiting for the trb response */ - if (olympic_priv->trb_queued) { - wake_up_interruptible(&olympic_priv->trb_wait); - } - olympic_priv->trb_queued = 0 ; - } /* SISR_TRB_REPLY */ - - if (sisr & SISR_RX_NOBUF) { - /* According to the documentation, we don't have to do anything, but trapping it keeps it out of - /var/log/messages. */ - } /* SISR_RX_NOBUF */ - } else { - printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr); - printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ; - } /* One if the interrupts we want */ - writel(SISR_MI,olympic_mmio+SISR_MASK_SUM); - - spin_unlock(&olympic_priv->olympic_lock) ; - return IRQ_HANDLED; -} - -static netdev_tx_t olympic_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; - unsigned long flags ; - - spin_lock_irqsave(&olympic_priv->olympic_lock, flags); - - netif_stop_queue(dev); - - if(olympic_priv->free_tx_ring_entries) { - olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer = - cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE)); - olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000)); - olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb; - olympic_priv->free_tx_ring_entries--; - - olympic_priv->tx_ring_free++; - olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1); - writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1); - netif_wake_queue(dev); - spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); - return NETDEV_TX_OK; - } else { - spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); - return NETDEV_TX_BUSY; - } - -} - - -static int olympic_close(struct net_device *dev) -{ - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb; - unsigned long t,flags; - - DECLARE_WAITQUEUE(wait,current) ; - - netif_stop_queue(dev); - - writel(olympic_priv->srb,olympic_mmio+LAPA); - srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); - - writeb(SRB_CLOSE_ADAPTER,srb+0); - writeb(0,srb+1); - writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); - - add_wait_queue(&olympic_priv->srb_wait,&wait) ; - set_current_state(TASK_INTERRUPTIBLE) ; - - spin_lock_irqsave(&olympic_priv->olympic_lock,flags); - olympic_priv->srb_queued=1; - - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags); - - while(olympic_priv->srb_queued) { - - t = schedule_timeout_interruptible(60*HZ); - - if(signal_pending(current)) { - printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); - printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR)); - olympic_priv->srb_queued=0; - break; - } - - if (t == 0) { - printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name); - } - olympic_priv->srb_queued=0; - } - remove_wait_queue(&olympic_priv->srb_wait,&wait) ; - - olympic_priv->rx_status_last_received++; - olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1; - - olympic_freemem(dev) ; - - /* reset tx/rx fifo's and busmaster logic */ - - writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL); - udelay(1); - writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); - -#if OLYMPIC_DEBUG - { - int i ; - printk("srb(%p): ",srb); - for(i=0;i<4;i++) - printk("%x ",readb(srb+i)); - printk("\n"); - } -#endif - free_irq(dev->irq,dev); - - return 0; - -} - -static void olympic_set_rx_mode(struct net_device *dev) -{ - struct olympic_private *olympic_priv = netdev_priv(dev); - u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; - u8 options = 0; - u8 __iomem *srb; - struct netdev_hw_addr *ha; - unsigned char dev_mc_address[4] ; - - writel(olympic_priv->srb,olympic_mmio+LAPA); - srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); - options = olympic_priv->olympic_copy_all_options; - - if (dev->flags&IFF_PROMISC) - options |= 0x61 ; - else - options &= ~0x61 ; - - /* Only issue the srb if there is a change in options */ - - if ((options ^ olympic_priv->olympic_copy_all_options)) { - - /* Now to issue the srb command to alter the copy.all.options */ - - writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb); - writeb(0,srb+1); - writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); - writeb(0,srb+3); - writeb(olympic_priv->olympic_receive_options,srb+4); - writeb(options,srb+5); - - olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ - - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - - olympic_priv->olympic_copy_all_options = options ; - - return ; - } - - /* Set the functional addresses we need for multicast */ - - dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - - netdev_for_each_mc_addr(ha, dev) { - dev_mc_address[0] |= ha->addr[2]; - dev_mc_address[1] |= ha->addr[3]; - dev_mc_address[2] |= ha->addr[4]; - dev_mc_address[3] |= ha->addr[5]; - } - - writeb(SRB_SET_FUNC_ADDRESS,srb+0); - writeb(0,srb+1); - writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); - writeb(0,srb+3); - writeb(0,srb+4); - writeb(0,srb+5); - writeb(dev_mc_address[0],srb+6); - writeb(dev_mc_address[1],srb+7); - writeb(dev_mc_address[2],srb+8); - writeb(dev_mc_address[3],srb+9); - - olympic_priv->srb_queued = 2 ; - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - -} - -static void olympic_srb_bh(struct net_device *dev) -{ - struct olympic_private *olympic_priv = netdev_priv(dev); - u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; - u8 __iomem *srb; - - writel(olympic_priv->srb,olympic_mmio+LAPA); - srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800)); - - switch (readb(srb)) { - - /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) - * At some point we should do something if we get an error, such as - * resetting the IFF_PROMISC flag in dev - */ - - case SRB_MODIFY_RECEIVE_OPTIONS: - switch (readb(srb+2)) { - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ; - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); - break ; - default: - if (olympic_priv->olympic_message_level) - printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ; - break ; - } /* switch srb[2] */ - break ; - - /* SRB_SET_GROUP_ADDRESS - Multicast group setting - */ - - case SRB_SET_GROUP_ADDRESS: - switch (readb(srb+2)) { - case 0x00: - break ; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); - break ; - case 0x3c: - printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ; - break ; - case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */ - printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ; - break ; - case 0x55: - printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ; - break ; - default: - break ; - } /* switch srb[2] */ - break ; - - /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list - */ - - case SRB_RESET_GROUP_ADDRESS: - switch (readb(srb+2)) { - case 0x00: - break ; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; - break ; - case 0x39: /* Must deal with this if individual multicast addresses used */ - printk(KERN_INFO "%s: Group address not found\n",dev->name); - break ; - default: - break ; - } /* switch srb[2] */ - break ; - - - /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode - */ - - case SRB_SET_FUNC_ADDRESS: - switch (readb(srb+2)) { - case 0x00: - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name); - break ; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; - break ; - default: - break ; - } /* switch srb[2] */ - break ; - - /* SRB_READ_LOG - Read and reset the adapter error counters - */ - - case SRB_READ_LOG: - switch (readb(srb+2)) { - case 0x00: - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; - break ; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; - break ; - - } /* switch srb[2] */ - break ; - - /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */ - - case SRB_READ_SR_COUNTERS: - switch (readb(srb+2)) { - case 0x00: - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; - break ; - case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); - break ; - case 0x04: - printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; - break ; - default: - break ; - } /* switch srb[2] */ - break ; - - default: - printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name); - break ; - } /* switch srb[0] */ - -} - -static int olympic_set_mac_address (struct net_device *dev, void *addr) -{ - struct sockaddr *saddr = addr ; - struct olympic_private *olympic_priv = netdev_priv(dev); - - if (netif_running(dev)) { - printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; - return -EIO ; - } - - memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ; - - if (olympic_priv->olympic_message_level) { - printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0], - olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2], - olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4], - olympic_priv->olympic_laa[5]); - } - - return 0 ; -} - -static void olympic_arb_cmd(struct net_device *dev) -{ - struct olympic_private *olympic_priv = netdev_priv(dev); - u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; - u8 __iomem *arb_block, *asb_block, *srb ; - u8 header_len ; - u16 frame_len, buffer_len ; - struct sk_buff *mac_frame ; - u8 __iomem *buf_ptr ; - u8 __iomem *frame_data ; - u16 buff_off ; - u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */ - u8 fdx_prot_error ; - u16 next_ptr; - - arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; - asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; - srb = (olympic_priv->olympic_lap + olympic_priv->srb) ; - - if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */ - - header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */ - frame_len = swab16(readw(arb_block + 10)) ; - - buff_off = swab16(readw(arb_block + 6)) ; - - buf_ptr = olympic_priv->olympic_lap + buff_off ; - -#if OLYMPIC_DEBUG -{ - int i; - frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; - - for (i=0 ; i < 14 ; i++) { - printk("Loc %d = %02x\n",i,readb(frame_data + i)); - } - - printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); -} -#endif - mac_frame = dev_alloc_skb(frame_len) ; - if (!mac_frame) { - printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name); - goto drop_frame; - } - - /* Walk the buffer chain, creating the frame */ - - do { - frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; - buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); - memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ; - next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); - } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr))); - - mac_frame->protocol = tr_type_trans(mac_frame, dev); - - if (olympic_priv->olympic_network_monitor) { - struct trh_hdr *mac_hdr; - printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name); - mac_hdr = tr_hdr(mac_frame); - printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n", - dev->name, mac_hdr->daddr); - printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n", - dev->name, mac_hdr->saddr); - } - netif_rx(mac_frame); - -drop_frame: - /* Now tell the card we have dealt with the received frame */ - - /* Set LISR Bit 1 */ - writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM); - - /* Is the ASB free ? */ - - if (readb(asb_block + 2) != 0xff) { - olympic_priv->asb_queued = 1 ; - writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); - return ; - /* Drop out and wait for the bottom half to be run */ - } - - writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */ - writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */ - writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */ - writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */ - - writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); - - olympic_priv->asb_queued = 2 ; - - return ; - - } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */ - lan_status = swab16(readw(arb_block+6)); - fdx_prot_error = readb(arb_block+8) ; - - /* Issue ARB Free */ - writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM); - - lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ; - - if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { - if (lan_status_diff & LSC_LWF) - printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name); - if (lan_status_diff & LSC_ARW) - printk(KERN_WARNING "%s: Auto removal error\n",dev->name); - if (lan_status_diff & LSC_FPE) - printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name); - if (lan_status_diff & LSC_RR) - printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name); - - /* Adapter has been closed by the hardware */ - - /* reset tx/rx fifo's and busmaster logic */ - - writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL); - udelay(1); - writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); - netif_stop_queue(dev); - olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; - printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); - } /* If serious error */ - - if (olympic_priv->olympic_message_level) { - if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); - if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing\n",dev->name); - if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); - if (lan_status_diff & LSC_TRAN_BCN) - printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); - if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); - if (lan_status_diff & LSC_RING_REC) - printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); - if (lan_status_diff & LSC_FDX_MODE) - printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name); - } - - if (lan_status_diff & LSC_CO) { - - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Counter Overflow\n", dev->name); - - /* Issue READ.LOG command */ - - writeb(SRB_READ_LOG, srb); - writeb(0,srb+1); - writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); - writeb(0,srb+3); - writeb(0,srb+4); - writeb(0,srb+5); - - olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ - - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - - } - - if (lan_status_diff & LSC_SR_CO) { - - if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name); - - /* Issue a READ.SR.COUNTERS */ - - writeb(SRB_READ_SR_COUNTERS,srb); - writeb(0,srb+1); - writeb(OLYMPIC_CLEAR_RET_CODE,srb+2); - writeb(0,srb+3); - - olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */ - - writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM); - - } - - olympic_priv->olympic_lan_status = lan_status ; - - } /* Lan.change.status */ - else - printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); -} - -static void olympic_asb_bh(struct net_device *dev) -{ - struct olympic_private *olympic_priv = netdev_priv(dev); - u8 __iomem *arb_block, *asb_block ; - - arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; - asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; - - if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */ - - writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */ - writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */ - writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */ - writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */ - - writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); - olympic_priv->asb_queued = 2 ; - - return ; - } - - if (olympic_priv->asb_queued == 2) { - switch (readb(asb_block+2)) { - case 0x01: - printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); - break ; - case 0x26: - printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); - break ; - case 0xFF: - /* Valid response, everything should be ok again */ - break ; - default: - printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name); - break ; - } - } - olympic_priv->asb_queued = 0 ; -} - -static int olympic_change_mtu(struct net_device *dev, int mtu) -{ - struct olympic_private *olympic_priv = netdev_priv(dev); - u16 max_mtu ; - - if (olympic_priv->olympic_ring_speed == 4) - max_mtu = 4500 ; - else - max_mtu = 18000 ; - - if (mtu > max_mtu) - return -EINVAL ; - if (mtu < 100) - return -EINVAL ; - - dev->mtu = mtu ; - olympic_priv->pkt_buf_sz = mtu + TR_HLEN ; - - return 0 ; -} - -static int olympic_proc_show(struct seq_file *m, void *v) -{ - struct net_device *dev = m->private; - struct olympic_private *olympic_priv=netdev_priv(dev); - u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; - u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; - u8 addr[6]; - u8 addr2[6]; - int i; - - seq_printf(m, - "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); - seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n", - dev->name); - - for (i = 0 ; i < 6 ; i++) - addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); - - seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n", - dev->name, - dev->dev_addr, addr, - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); - - seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name); - - seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", - dev->name) ; - - for (i = 0 ; i < 6 ; i++) - addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i); - for (i = 0 ; i < 6 ; i++) - addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); - - seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n", - dev->name, - readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), - readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3), - addr, addr2, - swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); - - seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", - dev->name) ; - - for (i = 0 ; i < 6 ; i++) - addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); - seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n", - dev->name, addr, - swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl)))); - - seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", - dev->name) ; - - for (i = 0 ; i < 6 ; i++) - addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); - seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n", - dev->name, - swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), - swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), - addr, - readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)), - readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1), - readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), - readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3)); - - return 0; -} - -static int olympic_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, olympic_proc_show, PDE(inode)->data); -} - -static const struct file_operations olympic_proc_ops = { - .open = olympic_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static void __devexit olympic_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev) ; - struct olympic_private *olympic_priv=netdev_priv(dev); - - if (olympic_priv->olympic_network_monitor) { - char proc_name[20] ; - strcpy(proc_name,"olympic_") ; - strcat(proc_name,dev->name) ; - remove_proc_entry(proc_name,init_net.proc_net); - } - unregister_netdev(dev) ; - iounmap(olympic_priv->olympic_mmio) ; - iounmap(olympic_priv->olympic_lap) ; - pci_release_regions(pdev) ; - pci_set_drvdata(pdev,NULL) ; - free_netdev(dev) ; -} - -static struct pci_driver olympic_driver = { - .name = "olympic", - .id_table = olympic_pci_tbl, - .probe = olympic_probe, - .remove = __devexit_p(olympic_remove_one), -}; - -static int __init olympic_pci_init(void) -{ - return pci_register_driver(&olympic_driver) ; -} - -static void __exit olympic_pci_cleanup(void) -{ - pci_unregister_driver(&olympic_driver) ; -} - - -module_init(olympic_pci_init) ; -module_exit(olympic_pci_cleanup) ; - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h deleted file mode 100644 index 30631bae4c9..00000000000 --- a/drivers/net/tokenring/olympic.h +++ /dev/null @@ -1,321 +0,0 @@ -/* - * olympic.h (c) 1999 Peter De Schrijver All Rights Reserved - * 1999,2000 Mike Phillips (mikep@linuxtr.net) - * - * Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset. - * - * Base Driver Skeleton: - * Written 1993-94 by Donald Becker. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#define CID 0x4e - -#define BCTL 0x70 -#define BCTL_SOFTRESET (1<<15) -#define BCTL_MIMREB (1<<6) -#define BCTL_MODE_INDICATOR (1<<5) - -#define GPR 0x4a -#define GPR_OPTI_BF (1<<6) -#define GPR_NEPTUNE_BF (1<<4) -#define GPR_AUTOSENSE (1<<2) -#define GPR_16MBPS (1<<3) - -#define PAG 0x85 -#define LBC 0x8e - -#define LISR 0x10 -#define LISR_SUM 0x14 -#define LISR_RWM 0x18 - -#define LISR_LIE (1<<15) -#define LISR_SLIM (1<<13) -#define LISR_SLI (1<<12) -#define LISR_PCMSRMASK (1<<11) -#define LISR_PCMSRINT (1<<10) -#define LISR_WOLMASK (1<<9) -#define LISR_WOL (1<<8) -#define LISR_SRB_CMD (1<<5) -#define LISR_ASB_REPLY (1<<4) -#define LISR_ASB_FREE_REQ (1<<2) -#define LISR_ARB_FREE (1<<1) -#define LISR_TRB_FRAME (1<<0) - -#define SISR 0x20 -#define SISR_SUM 0x24 -#define SISR_RWM 0x28 -#define SISR_RR 0x2C -#define SISR_RESMASK 0x30 -#define SISR_MASK 0x54 -#define SISR_MASK_SUM 0x58 -#define SISR_MASK_RWM 0x5C - -#define SISR_TX2_IDLE (1<<31) -#define SISR_TX2_HALT (1<<29) -#define SISR_TX2_EOF (1<<28) -#define SISR_TX1_IDLE (1<<27) -#define SISR_TX1_HALT (1<<25) -#define SISR_TX1_EOF (1<<24) -#define SISR_TIMEOUT (1<<23) -#define SISR_RX_NOBUF (1<<22) -#define SISR_RX_STATUS (1<<21) -#define SISR_RX_HALT (1<<18) -#define SISR_RX_EOF_EARLY (1<<16) -#define SISR_MI (1<<15) -#define SISR_PI (1<<13) -#define SISR_ERR (1<<9) -#define SISR_ADAPTER_CHECK (1<<6) -#define SISR_SRB_REPLY (1<<5) -#define SISR_ASB_FREE (1<<4) -#define SISR_ARB_CMD (1<<3) -#define SISR_TRB_REPLY (1<<2) - -#define EISR 0x34 -#define EISR_RWM 0x38 -#define EISR_MASK 0x3c -#define EISR_MASK_OPTIONS 0x001FFF7F - -#define LAPA 0x60 -#define LAPWWO 0x64 -#define LAPWWC 0x68 -#define LAPCTL 0x6C -#define LAIPD 0x78 -#define LAIPDDINC 0x7C - -#define TIMER 0x50 - -#define CLKCTL 0x74 -#define CLKCTL_PAUSE (1<<15) - -#define PM_CON 0x4 - -#define BMCTL_SUM 0x40 -#define BMCTL_RWM 0x44 -#define BMCTL_TX2_DIS (1<<30) -#define BMCTL_TX1_DIS (1<<26) -#define BMCTL_RX_DIS (1<<22) - -#define BMASR 0xcc - -#define RXDESCQ 0x90 -#define RXDESCQCNT 0x94 -#define RXCDA 0x98 -#define RXENQ 0x9C -#define RXSTATQ 0xA0 -#define RXSTATQCNT 0xA4 -#define RXCSA 0xA8 -#define RXCLEN 0xAC -#define RXHLEN 0xAE - -#define TXDESCQ_1 0xb0 -#define TXDESCQ_2 0xd0 -#define TXDESCQCNT_1 0xb4 -#define TXDESCQCNT_2 0xd4 -#define TXCDA_1 0xb8 -#define TXCDA_2 0xd8 -#define TXENQ_1 0xbc -#define TXENQ_2 0xdc -#define TXSTATQ_1 0xc0 -#define TXSTATQ_2 0xe0 -#define TXSTATQCNT_1 0xc4 -#define TXSTATQCNT_2 0xe4 -#define TXCSA_1 0xc8 -#define TXCSA_2 0xe8 -/* Cardbus */ -#define FERMASK 0xf4 -#define FERMASK_INT_BIT (1<<15) - -#define OLYMPIC_IO_SPACE 256 - -#define SRB_COMMAND_SIZE 50 - -#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */ - -/* Defines for LAN STATUS CHANGE reports */ -#define LSC_SIG_LOSS 0x8000 -#define LSC_HARD_ERR 0x4000 -#define LSC_SOFT_ERR 0x2000 -#define LSC_TRAN_BCN 0x1000 -#define LSC_LWF 0x0800 -#define LSC_ARW 0x0400 -#define LSC_FPE 0x0200 -#define LSC_RR 0x0100 -#define LSC_CO 0x0080 -#define LSC_SS 0x0040 -#define LSC_RING_REC 0x0020 -#define LSC_SR_CO 0x0010 -#define LSC_FDX_MODE 0x0004 - -/* Defines for OPEN ADAPTER command */ - -#define OPEN_ADAPTER_EXT_WRAP (1<<15) -#define OPEN_ADAPTER_DIS_HARDEE (1<<14) -#define OPEN_ADAPTER_DIS_SOFTERR (1<<13) -#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12) -#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11) -#define OPEN_ADAPTER_ENABLE_EC (1<<10) -#define OPEN_ADAPTER_CONTENDER (1<<8) -#define OPEN_ADAPTER_PASS_BEACON (1<<7) -#define OPEN_ADAPTER_ENABLE_FDX (1<<6) -#define OPEN_ADAPTER_ENABLE_RPL (1<<5) -#define OPEN_ADAPTER_INHIBIT_ETR (1<<4) -#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3) -#define OPEN_ADAPTER_USE_OPTS2 (1<<0) - -#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15) - -/* Defines for SRB Commands */ - -#define SRB_ACCESS_REGISTER 0x1f -#define SRB_CLOSE_ADAPTER 0x04 -#define SRB_CONFIGURE_BRIDGE 0x0c -#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a -#define SRB_MODIFY_BRIDGE_PARMS 0x15 -#define SRB_MODIFY_OPEN_OPTIONS 0x01 -#define SRB_MODIFY_RECEIVE_OPTIONS 0x17 -#define SRB_NO_OPERATION 0x00 -#define SRB_OPEN_ADAPTER 0x03 -#define SRB_READ_LOG 0x08 -#define SRB_READ_SR_COUNTERS 0x16 -#define SRB_RESET_GROUP_ADDRESS 0x02 -#define SRB_SAVE_CONFIGURATION 0x1b -#define SRB_SET_BRIDGE_PARMS 0x09 -#define SRB_SET_BRIDGE_TARGETS 0x10 -#define SRB_SET_FUNC_ADDRESS 0x07 -#define SRB_SET_GROUP_ADDRESS 0x06 -#define SRB_SET_GROUP_ADDR_OPTIONS 0x11 -#define SRB_UPDATE_WAKEUP_PATTERN 0x19 - -/* Clear return code */ - -#define OLYMPIC_CLEAR_RET_CODE 0xfe - -/* ARB Commands */ -#define ARB_RECEIVE_DATA 0x81 -#define ARB_LAN_CHANGE_STATUS 0x84 -/* ASB Response commands */ - -#define ASB_RECEIVE_DATA 0x81 - - -/* Olympic defaults for buffers */ - -#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */ -#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */ - -#define PKT_BUF_SZ 4096 /* Default packet size */ - -/* Olympic data structures */ - -/* xxxx These structures are all little endian in hardware. */ - -struct olympic_tx_desc { - __le32 buffer; - __le32 status_length; -}; - -struct olympic_tx_status { - __le32 status; -}; - -struct olympic_rx_desc { - __le32 buffer; - __le32 res_length; -}; - -struct olympic_rx_status { - __le32 fragmentcnt_framelen; - __le32 status_buffercnt; -}; -/* xxxx END These structures are all little endian in hardware. */ -/* xxxx There may be more, but I'm pretty sure about these */ - -struct mac_receive_buffer { - __le16 next ; - u8 padding ; - u8 frame_status ; - __le16 buffer_length ; - u8 frame_data ; -}; - -struct olympic_private { - - u16 srb; /* be16 */ - u16 trb; /* be16 */ - u16 arb; /* be16 */ - u16 asb; /* be16 */ - - u8 __iomem *olympic_mmio; - u8 __iomem *olympic_lap; - struct pci_dev *pdev ; - const char *olympic_card_name; - - spinlock_t olympic_lock ; - - volatile int srb_queued; /* True if an SRB is still posted */ - wait_queue_head_t srb_wait; - - volatile int asb_queued; /* True if an ASB is posted */ - - volatile int trb_queued; /* True if a TRB is posted */ - wait_queue_head_t trb_wait ; - - /* These must be on a 4 byte boundary. */ - struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE]; - struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE]; - struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE]; - struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE]; - - struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; - int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; - - u16 olympic_lan_status ; - u8 olympic_ring_speed ; - u16 pkt_buf_sz ; - u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor; - u16 olympic_addr_table_addr, olympic_parms_addr ; - u8 olympic_laa[6] ; - u32 rx_ring_dma_addr; - u32 rx_status_ring_dma_addr; - u32 tx_ring_dma_addr; - u32 tx_status_ring_dma_addr; -}; - -struct olympic_adapter_addr_table { - - u8 node_addr[6] ; - u8 reserved[4] ; - u8 func_addr[4] ; -} ; - -struct olympic_parameters_table { - - u8 phys_addr[4] ; - u8 up_node_addr[6] ; - u8 up_phys_addr[4] ; - u8 poll_addr[6] ; - u16 reserved ; - u16 acc_priority ; - u16 auth_source_class ; - u16 att_code ; - u8 source_addr[6] ; - u16 beacon_type ; - u16 major_vector ; - u16 lan_status ; - u16 soft_error_time ; - u16 reserved1 ; - u16 local_ring ; - u16 mon_error ; - u16 beacon_transmit ; - u16 beacon_receive ; - u16 frame_correl ; - u8 beacon_naun[6] ; - u32 reserved2 ; - u8 beacon_phys[4] ; -}; diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c deleted file mode 100644 index 62d90e40f9e..00000000000 --- a/drivers/net/tokenring/proteon.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * proteon.c: A network driver for Proteon ISA token ring cards. - * - * Based on tmspci written 1999 by Adam Fritzler - * - * Written 2003 by Jochen Friedrich - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver module supports the following cards: - * - Proteon 1392, 1392+ - * - * Maintainer(s): - * AF Adam Fritzler - * JF Jochen Friedrich jochen@scram.de - * - * Modification History: - * 02-Jan-03 JF Created - * - */ -static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/platform_device.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/pci.h> -#include <asm/dma.h> - -#include "tms380tr.h" - -#define PROTEON_IO_EXTENT 32 - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int portlist[] __initdata = { - 0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot. - 0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot. - 0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot. - 0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot. - 0 -}; - -/* A zero-terminated list of IRQs to be probed. */ -static unsigned short irqlist[] = { - 7, 6, 5, 4, 3, 12, 11, 10, 9, - 0 -}; - -/* A zero-terminated list of DMAs to be probed. */ -static int dmalist[] __initdata = { - 5, 6, 7, - 0 -}; - -static char cardname[] = "Proteon 1392\0"; -static u64 dma_mask = ISA_MAX_ADDRESS; -static int proteon_open(struct net_device *dev); -static void proteon_read_eeprom(struct net_device *dev); -static unsigned short proteon_setnselout_pins(struct net_device *dev); - -static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg) -{ - return inb(dev->base_addr + reg); -} - -static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg) -{ - return inw(dev->base_addr + reg); -} - -static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outb(val, dev->base_addr + reg); -} - -static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outw(val, dev->base_addr + reg); -} - -static int __init proteon_probe1(struct net_device *dev, int ioaddr) -{ - unsigned char chk1, chk2; - int i; - - if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname)) - return -ENODEV; - - - chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */ - if (chk1 != 0x1f) - goto nodev; - - chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */ - for (i=0; i<16; i++) { - chk2 = inb(ioaddr + 0x1e) & 0x07; - if (((chk1 + 1) & 0x07) != chk2) - goto nodev; - chk1 = chk2; - } - - dev->base_addr = ioaddr; - return 0; -nodev: - release_region(ioaddr, PROTEON_IO_EXTENT); - return -ENODEV; -} - -static struct net_device_ops proteon_netdev_ops __read_mostly; - -static int __init setup_card(struct net_device *dev, struct device *pdev) -{ - struct net_local *tp; - static int versionprinted; - const unsigned *port; - int j,err = 0; - - if (!dev) - return -ENOMEM; - - if (dev->base_addr) /* probe specific location */ - err = proteon_probe1(dev, dev->base_addr); - else { - for (port = portlist; *port; port++) { - err = proteon_probe1(dev, *port); - if (!err) - break; - } - } - if (err) - goto out5; - - /* At this point we have found a valid card. */ - - if (versionprinted++ == 0) - printk(KERN_DEBUG "%s", version); - - err = -EIO; - pdev->dma_mask = &dma_mask; - if (tmsdev_init(dev, pdev)) - goto out4; - - dev->base_addr &= ~3; - - proteon_read_eeprom(dev); - - printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n", - dev->dev_addr); - - tp = netdev_priv(dev); - tp->setnselout = proteon_setnselout_pins; - - tp->sifreadb = proteon_sifreadb; - tp->sifreadw = proteon_sifreadw; - tp->sifwriteb = proteon_sifwriteb; - tp->sifwritew = proteon_sifwritew; - - memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1); - - tp->tmspriv = NULL; - - dev->netdev_ops = &proteon_netdev_ops; - - if (dev->irq == 0) - { - for(j = 0; irqlist[j] != 0; j++) - { - dev->irq = irqlist[j]; - if (!request_irq(dev->irq, tms380tr_interrupt, 0, - cardname, dev)) - break; - } - - if(irqlist[j] == 0) - { - printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n"); - goto out3; - } - } - else - { - for(j = 0; irqlist[j] != 0; j++) - if (irqlist[j] == dev->irq) - break; - if (irqlist[j] == 0) - { - printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n", - dev->irq); - goto out3; - } - if (request_irq(dev->irq, tms380tr_interrupt, 0, - cardname, dev)) - { - printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n", - dev->irq); - goto out3; - } - } - - if (dev->dma == 0) - { - for(j = 0; dmalist[j] != 0; j++) - { - dev->dma = dmalist[j]; - if (!request_dma(dev->dma, cardname)) - break; - } - - if(dmalist[j] == 0) - { - printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n"); - goto out2; - } - } - else - { - for(j = 0; dmalist[j] != 0; j++) - if (dmalist[j] == dev->dma) - break; - if (dmalist[j] == 0) - { - printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n", - dev->dma); - goto out2; - } - if (request_dma(dev->dma, cardname)) - { - printk(KERN_INFO "proteon.c: Selected DMA %d not available\n", - dev->dma); - goto out2; - } - } - - err = register_netdev(dev); - if (err) - goto out; - - printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n", - dev->name, dev->base_addr, dev->irq, dev->dma); - - return 0; -out: - free_dma(dev->dma); -out2: - free_irq(dev->irq, dev); -out3: - tmsdev_term(dev); -out4: - release_region(dev->base_addr, PROTEON_IO_EXTENT); -out5: - return err; -} - -/* - * Reads MAC address from adapter RAM, which should've read it from - * the onboard ROM. - * - * Calling this on a board that does not support it can be a very - * dangerous thing. The Madge board, for instance, will lock your - * machine hard when this is called. Luckily, its supported in a - * separate driver. --ASF - */ -static void proteon_read_eeprom(struct net_device *dev) -{ - int i; - - /* Address: 0000:0000 */ - proteon_sifwritew(dev, 0, SIFADX); - proteon_sifwritew(dev, 0, SIFADR); - - /* Read six byte MAC address data */ - dev->addr_len = 6; - for(i = 0; i < 6; i++) - dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8; -} - -static unsigned short proteon_setnselout_pins(struct net_device *dev) -{ - return 0; -} - -static int proteon_open(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned short val = 0; - int i; - - /* Proteon reset sequence */ - outb(0, dev->base_addr + 0x11); - mdelay(20); - outb(0x04, dev->base_addr + 0x11); - mdelay(20); - outb(0, dev->base_addr + 0x11); - mdelay(100); - - /* set control/status reg */ - val = inb(dev->base_addr + 0x11); - val |= 0x78; - val &= 0xf9; - if(tp->DataRate == SPEED_4) - val |= 0x20; - else - val &= ~0x20; - - outb(val, dev->base_addr + 0x11); - outb(0xff, dev->base_addr + 0x12); - for(i = 0; irqlist[i] != 0; i++) - { - if(irqlist[i] == dev->irq) - break; - } - val = i; - i = (7 - dev->dma) << 4; - val |= i; - outb(val, dev->base_addr + 0x13); - - return tms380tr_open(dev); -} - -#define ISATR_MAX_ADAPTERS 3 - -static int io[ISATR_MAX_ADAPTERS]; -static int irq[ISATR_MAX_ADAPTERS]; -static int dma[ISATR_MAX_ADAPTERS]; - -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(dma, int, NULL, 0); - -static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS]; - -static struct platform_driver proteon_driver = { - .driver = { - .name = "proteon", - }, -}; - -static int __init proteon_init(void) -{ - struct net_device *dev; - struct platform_device *pdev; - int i, num = 0, err = 0; - - proteon_netdev_ops = tms380tr_netdev_ops; - proteon_netdev_ops.ndo_open = proteon_open; - proteon_netdev_ops.ndo_stop = tms380tr_close; - - err = platform_driver_register(&proteon_driver); - if (err) - return err; - - for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { - dev = alloc_trdev(sizeof(struct net_local)); - if (!dev) - continue; - - dev->base_addr = io[i]; - dev->irq = irq[i]; - dev->dma = dma[i]; - pdev = platform_device_register_simple("proteon", - i, NULL, 0); - if (IS_ERR(pdev)) { - free_netdev(dev); - continue; - } - err = setup_card(dev, &pdev->dev); - if (!err) { - proteon_dev[i] = pdev; - platform_set_drvdata(pdev, dev); - ++num; - } else { - platform_device_unregister(pdev); - free_netdev(dev); - } - } - - printk(KERN_NOTICE "proteon.c: %d cards found.\n", num); - /* Probe for cards. */ - if (num == 0) { - printk(KERN_NOTICE "proteon.c: No cards found.\n"); - platform_driver_unregister(&proteon_driver); - return -ENODEV; - } - return 0; -} - -static void __exit proteon_cleanup(void) -{ - struct net_device *dev; - int i; - - for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { - struct platform_device *pdev = proteon_dev[i]; - - if (!pdev) - continue; - dev = platform_get_drvdata(pdev); - unregister_netdev(dev); - release_region(dev->base_addr, PROTEON_IO_EXTENT); - free_irq(dev->irq, dev); - free_dma(dev->dma); - tmsdev_term(dev); - free_netdev(dev); - platform_set_drvdata(pdev, NULL); - platform_device_unregister(pdev); - } - platform_driver_unregister(&proteon_driver); -} - -module_init(proteon_init); -module_exit(proteon_cleanup); diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c deleted file mode 100644 index ee11e93dc30..00000000000 --- a/drivers/net/tokenring/skisa.c +++ /dev/null @@ -1,432 +0,0 @@ -/* - * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards. - * - * Based on tmspci written 1999 by Adam Fritzler - * - * Written 2000 by Jochen Friedrich - * Dedicated to my girlfriend Steffi Bopp - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver module supports the following cards: - * - SysKonnect TR4/16(+) ISA (SK-4190) - * - * Maintainer(s): - * AF Adam Fritzler - * JF Jochen Friedrich jochen@scram.de - * - * Modification History: - * 14-Jan-01 JF Created - * 28-Oct-02 JF Fixed probe of card for static compilation. - * Fixed module init to not make hotplug go wild. - * 09-Nov-02 JF Fixed early bail out on out of memory - * situations if multiple cards are found. - * Cleaned up some unnecessary console SPAM. - * 09-Dec-02 JF Fixed module reference counting. - * 02-Jan-03 JF Renamed to skisa.c - * - */ -static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/platform_device.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/pci.h> -#include <asm/dma.h> - -#include "tms380tr.h" - -#define SK_ISA_IO_EXTENT 32 - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int portlist[] __initdata = { - 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK - 0 -}; - -/* A zero-terminated list of IRQs to be probed. - * Used again after initial probe for sktr_chipset_init, called from sktr_open. - */ -static const unsigned short irqlist[] = { - 3, 5, 9, 10, 11, 12, 15, - 0 -}; - -/* A zero-terminated list of DMAs to be probed. */ -static int dmalist[] __initdata = { - 5, 6, 7, - 0 -}; - -static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; -static u64 dma_mask = ISA_MAX_ADDRESS; -static int sk_isa_open(struct net_device *dev); -static void sk_isa_read_eeprom(struct net_device *dev); -static unsigned short sk_isa_setnselout_pins(struct net_device *dev); - -static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg) -{ - return inb(dev->base_addr + reg); -} - -static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg) -{ - return inw(dev->base_addr + reg); -} - -static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outb(val, dev->base_addr + reg); -} - -static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outw(val, dev->base_addr + reg); -} - - -static int __init sk_isa_probe1(struct net_device *dev, int ioaddr) -{ - unsigned char old, chk1, chk2; - - if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname)) - return -ENODEV; - - old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */ - - chk1 = 0; /* Begin with check value 0 */ - do { - /* Write new SIFADR value */ - outb(chk1, ioaddr + SIFADR); - - /* Read, invert and write */ - chk2 = inb(ioaddr + SIFADD); - chk2 ^= 0x0FE; - outb(chk2, ioaddr + SIFADR); - - /* Read, invert and compare */ - chk2 = inb(ioaddr + SIFADD); - chk2 ^= 0x0FE; - - if(chk1 != chk2) { - release_region(ioaddr, SK_ISA_IO_EXTENT); - return -ENODEV; - } - - chk1 -= 2; - } while(chk1 != 0); /* Repeat 128 times (all byte values) */ - - /* Restore the SIFADR value */ - outb(old, ioaddr + SIFADR); - - dev->base_addr = ioaddr; - return 0; -} - -static struct net_device_ops sk_isa_netdev_ops __read_mostly; - -static int __init setup_card(struct net_device *dev, struct device *pdev) -{ - struct net_local *tp; - static int versionprinted; - const unsigned *port; - int j, err = 0; - - if (!dev) - return -ENOMEM; - - if (dev->base_addr) /* probe specific location */ - err = sk_isa_probe1(dev, dev->base_addr); - else { - for (port = portlist; *port; port++) { - err = sk_isa_probe1(dev, *port); - if (!err) - break; - } - } - if (err) - goto out5; - - /* At this point we have found a valid card. */ - - if (versionprinted++ == 0) - printk(KERN_DEBUG "%s", version); - - err = -EIO; - pdev->dma_mask = &dma_mask; - if (tmsdev_init(dev, pdev)) - goto out4; - - dev->base_addr &= ~3; - - sk_isa_read_eeprom(dev); - - printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n", - dev->dev_addr); - - tp = netdev_priv(dev); - tp->setnselout = sk_isa_setnselout_pins; - - tp->sifreadb = sk_isa_sifreadb; - tp->sifreadw = sk_isa_sifreadw; - tp->sifwriteb = sk_isa_sifwriteb; - tp->sifwritew = sk_isa_sifwritew; - - memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1); - - tp->tmspriv = NULL; - - dev->netdev_ops = &sk_isa_netdev_ops; - - if (dev->irq == 0) - { - for(j = 0; irqlist[j] != 0; j++) - { - dev->irq = irqlist[j]; - if (!request_irq(dev->irq, tms380tr_interrupt, 0, - isa_cardname, dev)) - break; - } - - if(irqlist[j] == 0) - { - printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n"); - goto out3; - } - } - else - { - for(j = 0; irqlist[j] != 0; j++) - if (irqlist[j] == dev->irq) - break; - if (irqlist[j] == 0) - { - printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n", - dev->irq); - goto out3; - } - if (request_irq(dev->irq, tms380tr_interrupt, 0, - isa_cardname, dev)) - { - printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n", - dev->irq); - goto out3; - } - } - - if (dev->dma == 0) - { - for(j = 0; dmalist[j] != 0; j++) - { - dev->dma = dmalist[j]; - if (!request_dma(dev->dma, isa_cardname)) - break; - } - - if(dmalist[j] == 0) - { - printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n"); - goto out2; - } - } - else - { - for(j = 0; dmalist[j] != 0; j++) - if (dmalist[j] == dev->dma) - break; - if (dmalist[j] == 0) - { - printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n", - dev->dma); - goto out2; - } - if (request_dma(dev->dma, isa_cardname)) - { - printk(KERN_INFO "skisa.c: Selected DMA %d not available\n", - dev->dma); - goto out2; - } - } - - err = register_netdev(dev); - if (err) - goto out; - - printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n", - dev->name, dev->base_addr, dev->irq, dev->dma); - - return 0; -out: - free_dma(dev->dma); -out2: - free_irq(dev->irq, dev); -out3: - tmsdev_term(dev); -out4: - release_region(dev->base_addr, SK_ISA_IO_EXTENT); -out5: - return err; -} - -/* - * Reads MAC address from adapter RAM, which should've read it from - * the onboard ROM. - * - * Calling this on a board that does not support it can be a very - * dangerous thing. The Madge board, for instance, will lock your - * machine hard when this is called. Luckily, its supported in a - * separate driver. --ASF - */ -static void sk_isa_read_eeprom(struct net_device *dev) -{ - int i; - - /* Address: 0000:0000 */ - sk_isa_sifwritew(dev, 0, SIFADX); - sk_isa_sifwritew(dev, 0, SIFADR); - - /* Read six byte MAC address data */ - dev->addr_len = 6; - for(i = 0; i < 6; i++) - dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8; -} - -static unsigned short sk_isa_setnselout_pins(struct net_device *dev) -{ - return 0; -} - -static int sk_isa_open(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned short val = 0; - unsigned short oldval; - int i; - - val = 0; - for(i = 0; irqlist[i] != 0; i++) - { - if(irqlist[i] == dev->irq) - break; - } - - val |= CYCLE_TIME << 2; - val |= i << 4; - i = dev->dma - 5; - val |= i; - if(tp->DataRate == SPEED_4) - val |= LINE_SPEED_BIT; - else - val &= ~LINE_SPEED_BIT; - oldval = sk_isa_sifreadb(dev, POSREG); - /* Leave cycle bits alone */ - oldval |= 0xf3; - val &= oldval; - sk_isa_sifwriteb(dev, val, POSREG); - - return tms380tr_open(dev); -} - -#define ISATR_MAX_ADAPTERS 3 - -static int io[ISATR_MAX_ADAPTERS]; -static int irq[ISATR_MAX_ADAPTERS]; -static int dma[ISATR_MAX_ADAPTERS]; - -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(dma, int, NULL, 0); - -static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; - -static struct platform_driver sk_isa_driver = { - .driver = { - .name = "skisa", - }, -}; - -static int __init sk_isa_init(void) -{ - struct net_device *dev; - struct platform_device *pdev; - int i, num = 0, err = 0; - - sk_isa_netdev_ops = tms380tr_netdev_ops; - sk_isa_netdev_ops.ndo_open = sk_isa_open; - sk_isa_netdev_ops.ndo_stop = tms380tr_close; - - err = platform_driver_register(&sk_isa_driver); - if (err) - return err; - - for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { - dev = alloc_trdev(sizeof(struct net_local)); - if (!dev) - continue; - - dev->base_addr = io[i]; - dev->irq = irq[i]; - dev->dma = dma[i]; - pdev = platform_device_register_simple("skisa", - i, NULL, 0); - if (IS_ERR(pdev)) { - free_netdev(dev); - continue; - } - err = setup_card(dev, &pdev->dev); - if (!err) { - sk_isa_dev[i] = pdev; - platform_set_drvdata(sk_isa_dev[i], dev); - ++num; - } else { - platform_device_unregister(pdev); - free_netdev(dev); - } - } - - printk(KERN_NOTICE "skisa.c: %d cards found.\n", num); - /* Probe for cards. */ - if (num == 0) { - printk(KERN_NOTICE "skisa.c: No cards found.\n"); - platform_driver_unregister(&sk_isa_driver); - return -ENODEV; - } - return 0; -} - -static void __exit sk_isa_cleanup(void) -{ - struct net_device *dev; - int i; - - for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { - struct platform_device *pdev = sk_isa_dev[i]; - - if (!pdev) - continue; - dev = platform_get_drvdata(pdev); - unregister_netdev(dev); - release_region(dev->base_addr, SK_ISA_IO_EXTENT); - free_irq(dev->irq, dev); - free_dma(dev->dma); - tmsdev_term(dev); - free_netdev(dev); - platform_set_drvdata(pdev, NULL); - platform_device_unregister(pdev); - } - platform_driver_unregister(&sk_isa_driver); -} - -module_init(sk_isa_init); -module_exit(sk_isa_cleanup); diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c deleted file mode 100644 index cb35fb79e01..00000000000 --- a/drivers/net/tokenring/smctr.c +++ /dev/null @@ -1,5717 +0,0 @@ -/* - * smctr.c: A network driver for the SMC Token Ring Adapters. - * - * Written by Jay Schulist <jschlst@samba.org> - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This device driver works with the following SMC adapters: - * - SMC TokenCard Elite (8115T, chips 825/584) - * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594) - * - * Source(s): - * - SMC TokenCard SDK. - * - * Maintainer(s): - * JS Jay Schulist <jschlst@samba.org> - * - * Changes: - * 07102000 JS Fixed a timing problem in smctr_wait_cmd(); - * Also added a bit more discriptive error msgs. - * 07122000 JS Fixed problem with detecting a card with - * module io/irq/mem specified. - * - * To do: - * 1. Multicast support. - * - * Initial 2.5 cleanup Alan Cox <alan@lxorguk.ukuu.org.uk> 2002/10/28 - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/time.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/mca-legacy.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/trdevice.h> -#include <linux/bitops.h> -#include <linux/firmware.h> - -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/irq.h> - -#if BITS_PER_LONG == 64 -#error FIXME: driver does not support 64-bit platforms -#endif - -#include "smctr.h" /* Our Stuff */ - -static const char version[] __initdata = - KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n"; -static const char cardname[] = "smctr"; - - -#define SMCTR_IO_EXTENT 20 - -#ifdef CONFIG_MCA_LEGACY -static unsigned int smctr_posid = 0x6ec6; -#endif - -static int ringspeed; - -/* SMC Name of the Adapter. */ -static char smctr_name[] = "SMC TokenCard"; -static char *smctr_model = "Unknown"; - -/* Use 0 for production, 1 for verification, 2 for debug, and - * 3 for very verbose debug. - */ -#ifndef SMCTR_DEBUG -#define SMCTR_DEBUG 1 -#endif -static unsigned int smctr_debug = SMCTR_DEBUG; - -/* smctr.c prototypes and functions are arranged alphabeticly - * for clearity, maintainability and pure old fashion fun. - */ -/* A */ -static int smctr_alloc_shared_memory(struct net_device *dev); - -/* B */ -static int smctr_bypass_state(struct net_device *dev); - -/* C */ -static int smctr_checksum_firmware(struct net_device *dev); -static int __init smctr_chk_isa(struct net_device *dev); -static int smctr_chg_rx_mask(struct net_device *dev); -static int smctr_clear_int(struct net_device *dev); -static int smctr_clear_trc_reset(int ioaddr); -static int smctr_close(struct net_device *dev); - -/* D */ -static int smctr_decode_firmware(struct net_device *dev, - const struct firmware *fw); -static int smctr_disable_16bit(struct net_device *dev); -static int smctr_disable_adapter_ctrl_store(struct net_device *dev); -static int smctr_disable_bic_int(struct net_device *dev); - -/* E */ -static int smctr_enable_16bit(struct net_device *dev); -static int smctr_enable_adapter_ctrl_store(struct net_device *dev); -static int smctr_enable_adapter_ram(struct net_device *dev); -static int smctr_enable_bic_int(struct net_device *dev); - -/* G */ -static int __init smctr_get_boardid(struct net_device *dev, int mca); -static int smctr_get_group_address(struct net_device *dev); -static int smctr_get_functional_address(struct net_device *dev); -static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev); -static int smctr_get_physical_drop_number(struct net_device *dev); -static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); -static int smctr_get_station_id(struct net_device *dev); -static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, - __u16 bytes_count); -static int smctr_get_upstream_neighbor_addr(struct net_device *dev); - -/* H */ -static int smctr_hardware_send_packet(struct net_device *dev, - struct net_local *tp); -/* I */ -static int smctr_init_acbs(struct net_device *dev); -static int smctr_init_adapter(struct net_device *dev); -static int smctr_init_card_real(struct net_device *dev); -static int smctr_init_rx_bdbs(struct net_device *dev); -static int smctr_init_rx_fcbs(struct net_device *dev); -static int smctr_init_shared_memory(struct net_device *dev); -static int smctr_init_tx_bdbs(struct net_device *dev); -static int smctr_init_tx_fcbs(struct net_device *dev); -static int smctr_internal_self_test(struct net_device *dev); -static irqreturn_t smctr_interrupt(int irq, void *dev_id); -static int smctr_issue_enable_int_cmd(struct net_device *dev, - __u16 interrupt_enable_mask); -static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, - __u16 ibits); -static int smctr_issue_init_timers_cmd(struct net_device *dev); -static int smctr_issue_init_txrx_cmd(struct net_device *dev); -static int smctr_issue_insert_cmd(struct net_device *dev); -static int smctr_issue_read_ring_status_cmd(struct net_device *dev); -static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt); -static int smctr_issue_remove_cmd(struct net_device *dev); -static int smctr_issue_resume_acb_cmd(struct net_device *dev); -static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue); -static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue); -static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue); -static int smctr_issue_test_internal_rom_cmd(struct net_device *dev); -static int smctr_issue_test_hic_cmd(struct net_device *dev); -static int smctr_issue_test_mac_reg_cmd(struct net_device *dev); -static int smctr_issue_trc_loopback_cmd(struct net_device *dev); -static int smctr_issue_tri_loopback_cmd(struct net_device *dev); -static int smctr_issue_write_byte_cmd(struct net_device *dev, - short aword_cnt, void *byte); -static int smctr_issue_write_word_cmd(struct net_device *dev, - short aword_cnt, void *word); - -/* J */ -static int smctr_join_complete_state(struct net_device *dev); - -/* L */ -static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev); -static int smctr_load_firmware(struct net_device *dev); -static int smctr_load_node_addr(struct net_device *dev); -static int smctr_lobe_media_test(struct net_device *dev); -static int smctr_lobe_media_test_cmd(struct net_device *dev); -static int smctr_lobe_media_test_state(struct net_device *dev); - -/* M */ -static int smctr_make_8025_hdr(struct net_device *dev, - MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc); -static int smctr_make_access_pri(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv); -static int smctr_make_auth_funct_class(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_corr(struct net_device *dev, - MAC_SUB_VECTOR *tsv, __u16 correlator); -static int smctr_make_funct_addr(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_group_addr(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_phy_drop_num(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); -static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv); -static int smctr_make_ring_station_status(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_ring_station_version(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_tx_status_code(struct net_device *dev, - MAC_SUB_VECTOR *tsv, __u16 tx_fstatus); -static int smctr_make_upstream_neighbor_addr(struct net_device *dev, - MAC_SUB_VECTOR *tsv); -static int smctr_make_wrap_data(struct net_device *dev, - MAC_SUB_VECTOR *tsv); - -/* O */ -static int smctr_open(struct net_device *dev); -static int smctr_open_tr(struct net_device *dev); - -/* P */ -struct net_device *smctr_probe(int unit); -static int __init smctr_probe1(struct net_device *dev, int ioaddr); -static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, - struct net_device *dev, __u16 rx_status); - -/* R */ -static int smctr_ram_memory_test(struct net_device *dev); -static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator); -static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator); -static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf); -static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, - MAC_HEADER *rmf, __u16 *correlator); -static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator); -static int smctr_reset_adapter(struct net_device *dev); -static int smctr_restart_tx_chain(struct net_device *dev, short queue); -static int smctr_ring_status_chg(struct net_device *dev); -static int smctr_rx_frame(struct net_device *dev); - -/* S */ -static int smctr_send_dat(struct net_device *dev); -static netdev_tx_t smctr_send_packet(struct sk_buff *skb, - struct net_device *dev); -static int smctr_send_lobe_media_test(struct net_device *dev); -static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator); -static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator); -static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator); -static int smctr_send_rpt_tx_forward(struct net_device *dev, - MAC_HEADER *rmf, __u16 tx_fstatus); -static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, - __u16 rcode, __u16 correlator); -static int smctr_send_rq_init(struct net_device *dev); -static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, - __u16 *tx_fstatus); -static int smctr_set_auth_access_pri(struct net_device *dev, - MAC_SUB_VECTOR *rsv); -static int smctr_set_auth_funct_class(struct net_device *dev, - MAC_SUB_VECTOR *rsv); -static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, - __u16 *correlator); -static int smctr_set_error_timer_value(struct net_device *dev, - MAC_SUB_VECTOR *rsv); -static int smctr_set_frame_forward(struct net_device *dev, - MAC_SUB_VECTOR *rsv, __u8 dc_sc); -static int smctr_set_local_ring_num(struct net_device *dev, - MAC_SUB_VECTOR *rsv); -static unsigned short smctr_set_ctrl_attention(struct net_device *dev); -static void smctr_set_multicast_list(struct net_device *dev); -static int smctr_set_page(struct net_device *dev, __u8 *buf); -static int smctr_set_phy_drop(struct net_device *dev, - MAC_SUB_VECTOR *rsv); -static int smctr_set_ring_speed(struct net_device *dev); -static int smctr_set_rx_look_ahead(struct net_device *dev); -static int smctr_set_trc_reset(int ioaddr); -static int smctr_setup_single_cmd(struct net_device *dev, - __u16 command, __u16 subcommand); -static int smctr_setup_single_cmd_w_data(struct net_device *dev, - __u16 command, __u16 subcommand); -static char *smctr_malloc(struct net_device *dev, __u16 size); -static int smctr_status_chg(struct net_device *dev); - -/* T */ -static void smctr_timeout(struct net_device *dev); -static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, - __u16 queue); -static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue); -static unsigned short smctr_tx_move_frame(struct net_device *dev, - struct sk_buff *skb, __u8 *pbuff, unsigned int bytes); - -/* U */ -static int smctr_update_err_stats(struct net_device *dev); -static int smctr_update_rx_chain(struct net_device *dev, __u16 queue); -static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, - __u16 queue); - -/* W */ -static int smctr_wait_cmd(struct net_device *dev); -static int smctr_wait_while_cbusy(struct net_device *dev); - -#define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X) -#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X) -#define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X)) - -/* Allocate Adapter Shared Memory. - * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the - * function "get_num_rx_bdbs" below!!! - * - * Order of memory allocation: - * - * 0. Initial System Configuration Block Pointer - * 1. System Configuration Block - * 2. System Control Block - * 3. Action Command Block - * 4. Interrupt Status Block - * - * 5. MAC TX FCB'S - * 6. NON-MAC TX FCB'S - * 7. MAC TX BDB'S - * 8. NON-MAC TX BDB'S - * 9. MAC RX FCB'S - * 10. NON-MAC RX FCB'S - * 11. MAC RX BDB'S - * 12. NON-MAC RX BDB'S - * 13. MAC TX Data Buffer( 1, 256 byte buffer) - * 14. MAC RX Data Buffer( 1, 256 byte buffer) - * - * 15. NON-MAC TX Data Buffer - * 16. NON-MAC RX Data Buffer - */ -static int smctr_alloc_shared_memory(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name); - - /* Allocate initial System Control Block pointer. - * This pointer is located in the last page, last offset - 4. - */ - tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400) - - (long)ISCP_BLOCK_SIZE); - - /* Allocate System Control Blocks. */ - tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock)); - PARAGRAPH_BOUNDRY(tp->sh_mem_used); - - tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock)); - PARAGRAPH_BOUNDRY(tp->sh_mem_used); - - tp->acb_head = (ACBlock *)smctr_malloc(dev, - sizeof(ACBlock)*tp->num_acbs); - PARAGRAPH_BOUNDRY(tp->sh_mem_used); - - tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock)); - PARAGRAPH_BOUNDRY(tp->sh_mem_used); - - tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE); - PARAGRAPH_BOUNDRY(tp->sh_mem_used); - - /* Allocate transmit FCBs. */ - tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, - sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]); - - tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, - sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]); - - tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev, - sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]); - - /* Allocate transmit BDBs. */ - tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, - sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]); - - tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, - sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]); - - tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev, - sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]); - - /* Allocate receive FCBs. */ - tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, - sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]); - - tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev, - sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]); - - /* Allocate receive BDBs. */ - tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, - sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]); - - tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); - - tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, - sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]); - - tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0); - - /* Allocate MAC transmit buffers. - * MAC Tx Buffers doen't have to be on an ODD Boundary. - */ - tp->tx_buff_head[MAC_QUEUE] - = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]); - tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE]; - tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); - - /* Allocate BUG transmit buffers. */ - tp->tx_buff_head[BUG_QUEUE] - = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]); - tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE]; - tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0); - - /* Allocate MAC receive data buffers. - * MAC Rx buffer doesn't have to be on a 256 byte boundary. - */ - tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, - RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]); - tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); - - /* Allocate Non-MAC transmit buffers. - * ?? For maximum Netware performance, put Tx Buffers on - * ODD Boundary and then restore malloc to Even Boundrys. - */ - smctr_malloc(dev, 1L); - tp->tx_buff_head[NON_MAC_QUEUE] - = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]); - tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE]; - tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); - smctr_malloc(dev, 1L); - - /* Allocate Non-MAC receive data buffers. - * To guarantee a minimum of 256 contiguous memory to - * UM_Receive_Packet's lookahead pointer, before a page - * change or ring end is encountered, place each rx buffer on - * a 256 byte boundary. - */ - smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used)); - tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, - RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]); - tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); - - return 0; -} - -/* Enter Bypass state. */ -static int smctr_bypass_state(struct net_device *dev) -{ - int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name); - - err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE); - - return err; -} - -static int smctr_checksum_firmware(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - __u16 i, checksum = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name); - - smctr_enable_adapter_ctrl_store(dev); - - for(i = 0; i < CS_RAM_SIZE; i += 2) - checksum += *((__u16 *)(tp->ram_access + i)); - - tp->microcode_version = *(__u16 *)(tp->ram_access - + CS_RAM_VERSION_OFFSET); - tp->microcode_version >>= 8; - - smctr_disable_adapter_ctrl_store(dev); - - if(checksum) - return checksum; - - return 0; -} - -static int __init smctr_chk_mca(struct net_device *dev) -{ -#ifdef CONFIG_MCA_LEGACY - struct net_local *tp = netdev_priv(dev); - int current_slot; - __u8 r1, r2, r3, r4, r5; - - current_slot = mca_find_unused_adapter(smctr_posid, 0); - if(current_slot == MCA_NOTFOUND) - return -ENODEV; - - mca_set_adapter_name(current_slot, smctr_name); - mca_mark_as_used(current_slot); - tp->slot_num = current_slot; - - r1 = mca_read_stored_pos(tp->slot_num, 2); - r2 = mca_read_stored_pos(tp->slot_num, 3); - - if(tp->slot_num) - outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT)); - else - outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT)); - - r1 = inb(CNFG_POS_REG1); - r2 = inb(CNFG_POS_REG0); - - tp->bic_type = BIC_594_CHIP; - - /* IO */ - r2 = mca_read_stored_pos(tp->slot_num, 2); - r2 &= 0xF0; - dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800; - request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name); - - /* IRQ */ - r5 = mca_read_stored_pos(tp->slot_num, 5); - r5 &= 0xC; - switch(r5) - { - case 0: - dev->irq = 3; - break; - - case 0x4: - dev->irq = 4; - break; - - case 0x8: - dev->irq = 10; - break; - - default: - dev->irq = 15; - break; - } - if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) { - release_region(dev->base_addr, SMCTR_IO_EXTENT); - return -ENODEV; - } - - /* Get RAM base */ - r3 = mca_read_stored_pos(tp->slot_num, 3); - tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000; - if (r3 & 0x8) - tp->ram_base += 0x010000; - if (r3 & 0x80) - tp->ram_base += 0xF00000; - - /* Get Ram Size */ - r3 &= 0x30; - r3 >>= 4; - - tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3; - tp->ram_size = (__u16)CNFG_SIZE_64KB; - tp->board_id |= TOKEN_MEDIA; - - r4 = mca_read_stored_pos(tp->slot_num, 4); - tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000; - if (r4 & 0x8) - tp->rom_base += 0x010000; - - /* Get ROM size. */ - r4 >>= 4; - switch (r4) { - case 0: - tp->rom_size = CNFG_SIZE_8KB; - break; - case 1: - tp->rom_size = CNFG_SIZE_16KB; - break; - case 2: - tp->rom_size = CNFG_SIZE_32KB; - break; - default: - tp->rom_size = ROM_DISABLE; - } - - /* Get Media Type. */ - r5 = mca_read_stored_pos(tp->slot_num, 5); - r5 &= CNFG_MEDIA_TYPE_MASK; - switch(r5) - { - case (0): - tp->media_type = MEDIA_STP_4; - break; - - case (1): - tp->media_type = MEDIA_STP_16; - break; - - case (3): - tp->media_type = MEDIA_UTP_16; - break; - - default: - tp->media_type = MEDIA_UTP_4; - break; - } - tp->media_menu = 14; - - r2 = mca_read_stored_pos(tp->slot_num, 2); - if(!(r2 & 0x02)) - tp->mode_bits |= EARLY_TOKEN_REL; - - /* Disable slot */ - outb(CNFG_POS_CONTROL_REG, 0); - - tp->board_id = smctr_get_boardid(dev, 1); - switch(tp->board_id & 0xffff) - { - case WD8115TA: - smctr_model = "8115T/A"; - break; - - case WD8115T: - if(tp->extra_info & CHIP_REV_MASK) - smctr_model = "8115T rev XE"; - else - smctr_model = "8115T rev XD"; - break; - - default: - smctr_model = "Unknown"; - break; - } - - return 0; -#else - return -1; -#endif /* CONFIG_MCA_LEGACY */ -} - -static int smctr_chg_rx_mask(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name); - - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if(tp->mode_bits & LOOPING_MODE_MASK) - tp->config_word0 |= RX_OWN_BIT; - else - tp->config_word0 &= ~RX_OWN_BIT; - - if(tp->receive_mask & PROMISCUOUS_MODE) - tp->config_word0 |= PROMISCUOUS_BIT; - else - tp->config_word0 &= ~PROMISCUOUS_BIT; - - if(tp->receive_mask & ACCEPT_ERR_PACKETS) - tp->config_word0 |= SAVBAD_BIT; - else - tp->config_word0 &= ~SAVBAD_BIT; - - if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) - tp->config_word0 |= RXATMAC; - else - tp->config_word0 &= ~RXATMAC; - - if(tp->receive_mask & ACCEPT_MULTI_PROM) - tp->config_word1 |= MULTICAST_ADDRESS_BIT; - else - tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; - - if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) - tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; - else - { - if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) - tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; - else - tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; - } - - if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0, - &tp->config_word0))) - { - return err; - } - - if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1, - &tp->config_word1))) - { - return err; - } - - smctr_disable_16bit(dev); - - return 0; -} - -static int smctr_clear_int(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR); - - return 0; -} - -static int smctr_clear_trc_reset(int ioaddr) -{ - __u8 r; - - r = inb(ioaddr + MSR); - outb(~MSR_RST & r, ioaddr + MSR); - - return 0; -} - -/* - * The inverse routine to smctr_open(). - */ -static int smctr_close(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - struct sk_buff *skb; - int err; - - netif_stop_queue(dev); - - tp->cleanup = 1; - - /* Check to see if adapter is already in a closed state. */ - if(tp->status != OPEN) - return 0; - - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if((err = smctr_issue_remove_cmd(dev))) - { - smctr_disable_16bit(dev); - return err; - } - - for(;;) - { - skb = skb_dequeue(&tp->SendSkbQueue); - if(skb == NULL) - break; - tp->QueueSkb++; - dev_kfree_skb(skb); - } - - - return 0; -} - -static int smctr_decode_firmware(struct net_device *dev, - const struct firmware *fw) -{ - struct net_local *tp = netdev_priv(dev); - short bit = 0x80, shift = 12; - DECODE_TREE_NODE *tree; - short branch, tsize; - __u16 buff = 0; - long weight; - __u8 *ucode; - __u16 *mem; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name); - - weight = *(long *)(fw->data + WEIGHT_OFFSET); - tsize = *(__u8 *)(fw->data + TREE_SIZE_OFFSET); - tree = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET); - ucode = (__u8 *)(fw->data + TREE_OFFSET - + (tsize * sizeof(DECODE_TREE_NODE))); - mem = (__u16 *)(tp->ram_access); - - while(weight) - { - branch = ROOT; - while((tree + branch)->tag != LEAF && weight) - { - branch = *ucode & bit ? (tree + branch)->llink - : (tree + branch)->rlink; - - bit >>= 1; - weight--; - - if(bit == 0) - { - bit = 0x80; - ucode++; - } - } - - buff |= (tree + branch)->info << shift; - shift -= 4; - - if(shift < 0) - { - *(mem++) = SWAP_BYTES(buff); - buff = 0; - shift = 12; - } - } - - /* The following assumes the Control Store Memory has - * been initialized to zero. If the last partial word - * is zero, it will not be written. - */ - if(buff) - *(mem++) = SWAP_BYTES(buff); - - return 0; -} - -static int smctr_disable_16bit(struct net_device *dev) -{ - return 0; -} - -/* - * On Exit, Adapter is: - * 1. TRC is in a reset state and un-initialized. - * 2. Adapter memory is enabled. - * 3. Control Store memory is out of context (-WCSS is 1). - */ -static int smctr_disable_adapter_ctrl_store(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name); - - tp->trc_mask |= CSR_WCSS; - outb(tp->trc_mask, ioaddr + CSR); - - return 0; -} - -static int smctr_disable_bic_int(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY - | CSR_MSKTINT | CSR_WCSS; - outb(tp->trc_mask, ioaddr + CSR); - - return 0; -} - -static int smctr_enable_16bit(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - __u8 r; - - if(tp->adapter_bus == BUS_ISA16_TYPE) - { - r = inb(dev->base_addr + LAAR); - outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR); - } - - return 0; -} - -/* - * To enable the adapter control store memory: - * 1. Adapter must be in a RESET state. - * 2. Adapter memory must be enabled. - * 3. Control Store Memory is in context (-WCSS is 0). - */ -static int smctr_enable_adapter_ctrl_store(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name); - - smctr_set_trc_reset(ioaddr); - smctr_enable_adapter_ram(dev); - - tp->trc_mask &= ~CSR_WCSS; - outb(tp->trc_mask, ioaddr + CSR); - - return 0; -} - -static int smctr_enable_adapter_ram(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - __u8 r; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name); - - r = inb(ioaddr + MSR); - outb(MSR_MEMB | r, ioaddr + MSR); - - return 0; -} - -static int smctr_enable_bic_int(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - __u8 r; - - switch(tp->bic_type) - { - case (BIC_584_CHIP): - tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; - outb(tp->trc_mask, ioaddr + CSR); - r = inb(ioaddr + IRR); - outb(r | IRR_IEN, ioaddr + IRR); - break; - - case (BIC_594_CHIP): - tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS; - outb(tp->trc_mask, ioaddr + CSR); - r = inb(ioaddr + IMCCR); - outb(r | IMCCR_EIL, ioaddr + IMCCR); - break; - } - - return 0; -} - -static int __init smctr_chk_isa(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - __u8 r1, r2, b, chksum = 0; - __u16 r; - int i; - int err = -ENODEV; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr); - - if((ioaddr & 0x1F) != 0) - goto out; - - /* Grab the region so that no one else tries to probe our ioports. */ - if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) { - err = -EBUSY; - goto out; - } - - /* Checksum SMC node address */ - for(i = 0; i < 8; i++) - { - b = inb(ioaddr + LAR0 + i); - chksum += b; - } - - if (chksum != NODE_ADDR_CKSUM) - goto out2; - - b = inb(ioaddr + BDID); - if(b != BRD_ID_8115T) - { - printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name); - goto out2; - } - - /* Check for 8115T Board ID */ - r2 = 0; - for(r = 0; r < 8; r++) - { - r1 = inb(ioaddr + 0x8 + r); - r2 += r1; - } - - /* value of RegF adds up the sum to 0xFF */ - if((r2 != 0xFF) && (r2 != 0xEE)) - goto out2; - - /* Get adapter ID */ - tp->board_id = smctr_get_boardid(dev, 0); - switch(tp->board_id & 0xffff) - { - case WD8115TA: - smctr_model = "8115T/A"; - break; - - case WD8115T: - if(tp->extra_info & CHIP_REV_MASK) - smctr_model = "8115T rev XE"; - else - smctr_model = "8115T rev XD"; - break; - - default: - smctr_model = "Unknown"; - break; - } - - /* Store BIC type. */ - tp->bic_type = BIC_584_CHIP; - tp->nic_type = NIC_825_CHIP; - - /* Copy Ram Size */ - tp->ram_usable = CNFG_SIZE_16KB; - tp->ram_size = CNFG_SIZE_64KB; - - /* Get 58x Ram Base */ - r1 = inb(ioaddr); - r1 &= 0x3F; - - r2 = inb(ioaddr + CNFG_LAAR_584); - r2 &= CNFG_LAAR_MASK; - r2 <<= 3; - r2 |= ((r1 & 0x38) >> 3); - - tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13); - - /* Get 584 Irq */ - r1 = 0; - r1 = inb(ioaddr + CNFG_ICR_583); - r1 &= CNFG_ICR_IR2_584; - - r2 = inb(ioaddr + CNFG_IRR_583); - r2 &= CNFG_IRR_IRQS; /* 0x60 */ - r2 >>= 5; - - switch(r2) - { - case 0: - if(r1 == 0) - dev->irq = 2; - else - dev->irq = 10; - break; - - case 1: - if(r1 == 0) - dev->irq = 3; - else - dev->irq = 11; - break; - - case 2: - if(r1 == 0) - { - if(tp->extra_info & ALTERNATE_IRQ_BIT) - dev->irq = 5; - else - dev->irq = 4; - } - else - dev->irq = 15; - break; - - case 3: - if(r1 == 0) - dev->irq = 7; - else - dev->irq = 4; - break; - - default: - printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name); - goto out2; - } - - if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) - goto out2; - - /* Get 58x Rom Base */ - r1 = inb(ioaddr + CNFG_BIO_583); - r1 &= 0x3E; - r1 |= 0x40; - - tp->rom_base = (__u32)r1 << 13; - - /* Get 58x Rom Size */ - r1 = inb(ioaddr + CNFG_BIO_583); - r1 &= 0xC0; - if(r1 == 0) - tp->rom_size = ROM_DISABLE; - else - { - r1 >>= 6; - tp->rom_size = (__u16)CNFG_SIZE_8KB << r1; - } - - /* Get 58x Boot Status */ - r1 = inb(ioaddr + CNFG_GP2); - - tp->mode_bits &= (~BOOT_STATUS_MASK); - - if(r1 & CNFG_GP2_BOOT_NIBBLE) - tp->mode_bits |= BOOT_TYPE_1; - - /* Get 58x Zero Wait State */ - tp->mode_bits &= (~ZERO_WAIT_STATE_MASK); - - r1 = inb(ioaddr + CNFG_IRR_583); - - if(r1 & CNFG_IRR_ZWS) - tp->mode_bits |= ZERO_WAIT_STATE_8_BIT; - - if(tp->board_id & BOARD_16BIT) - { - r1 = inb(ioaddr + CNFG_LAAR_584); - - if(r1 & CNFG_LAAR_ZWS) - tp->mode_bits |= ZERO_WAIT_STATE_16_BIT; - } - - /* Get 584 Media Menu */ - tp->media_menu = 14; - r1 = inb(ioaddr + CNFG_IRR_583); - - tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */ - if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA) - { - /* Get Advanced Features */ - if(((r1 & 0x6) >> 1) == 0x3) - tp->media_type |= MEDIA_UTP_16; - else - { - if(((r1 & 0x6) >> 1) == 0x2) - tp->media_type |= MEDIA_STP_16; - else - { - if(((r1 & 0x6) >> 1) == 0x1) - tp->media_type |= MEDIA_UTP_4; - - else - tp->media_type |= MEDIA_STP_4; - } - } - - r1 = inb(ioaddr + CNFG_GP2); - if(!(r1 & 0x2) ) /* GP2_ETRD */ - tp->mode_bits |= EARLY_TOKEN_REL; - - /* see if the chip is corrupted - if(smctr_read_584_chksum(ioaddr)) - { - printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name); - free_irq(dev->irq, dev); - goto out2; - } - */ - } - - return 0; - -out2: - release_region(ioaddr, SMCTR_IO_EXTENT); -out: - return err; -} - -static int __init smctr_get_boardid(struct net_device *dev, int mca) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - __u8 r, r1, IdByte; - __u16 BoardIdMask; - - tp->board_id = BoardIdMask = 0; - - if(mca) - { - BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); - tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT); - } - else - { - BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT); - tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K - + NIC_825_BIT + ALTERNATE_IRQ_BIT); - } - - if(!mca) - { - r = inb(ioaddr + BID_REG_1); - r &= 0x0c; - outb(r, ioaddr + BID_REG_1); - r = inb(ioaddr + BID_REG_1); - - if(r & BID_SIXTEEN_BIT_BIT) - { - tp->extra_info |= SLOT_16BIT; - tp->adapter_bus = BUS_ISA16_TYPE; - } - else - tp->adapter_bus = BUS_ISA8_TYPE; - } - else - tp->adapter_bus = BUS_MCA_TYPE; - - /* Get Board Id Byte */ - IdByte = inb(ioaddr + BID_BOARD_ID_BYTE); - - /* if Major version > 1.0 then - * return; - */ - if(IdByte & 0xF8) - return -1; - - r1 = inb(ioaddr + BID_REG_1); - r1 &= BID_ICR_MASK; - r1 |= BID_OTHER_BIT; - - outb(r1, ioaddr + BID_REG_1); - r1 = inb(ioaddr + BID_REG_3); - - r1 &= BID_EAR_MASK; - r1 |= BID_ENGR_PAGE; - - outb(r1, ioaddr + BID_REG_3); - r1 = inb(ioaddr + BID_REG_1); - r1 &= BID_ICR_MASK; - r1 |= (BID_RLA | BID_OTHER_BIT); - - outb(r1, ioaddr + BID_REG_1); - - r1 = inb(ioaddr + BID_REG_1); - while(r1 & BID_RECALL_DONE_MASK) - r1 = inb(ioaddr + BID_REG_1); - - r = inb(ioaddr + BID_LAR_0 + BID_REG_6); - - /* clear chip rev bits */ - tp->extra_info &= ~CHIP_REV_MASK; - tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6); - - r1 = inb(ioaddr + BID_REG_1); - r1 &= BID_ICR_MASK; - r1 |= BID_OTHER_BIT; - - outb(r1, ioaddr + BID_REG_1); - r1 = inb(ioaddr + BID_REG_3); - - r1 &= BID_EAR_MASK; - r1 |= BID_EA6; - - outb(r1, ioaddr + BID_REG_3); - r1 = inb(ioaddr + BID_REG_1); - - r1 &= BID_ICR_MASK; - r1 |= BID_RLA; - - outb(r1, ioaddr + BID_REG_1); - r1 = inb(ioaddr + BID_REG_1); - - while(r1 & BID_RECALL_DONE_MASK) - r1 = inb(ioaddr + BID_REG_1); - - return BoardIdMask; -} - -static int smctr_get_group_address(struct net_device *dev) -{ - smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); - - return smctr_wait_cmd(dev); -} - -static int smctr_get_functional_address(struct net_device *dev) -{ - smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); - - return smctr_wait_cmd(dev); -} - -/* Calculate number of Non-MAC receive BDB's and data buffers. - * This function must simulate allocateing shared memory exactly - * as the allocate_shared_memory function above. - */ -static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int mem_used = 0; - - /* Allocate System Control Blocks. */ - mem_used += sizeof(SCGBlock); - - mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); - mem_used += sizeof(SCLBlock); - - mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); - mem_used += sizeof(ACBlock) * tp->num_acbs; - - mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); - mem_used += sizeof(ISBlock); - - mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); - mem_used += MISC_DATA_SIZE; - - /* Allocate transmit FCB's. */ - mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); - - mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]; - mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]; - mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]; - - /* Allocate transmit BDBs. */ - mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]; - mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]; - mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]; - - /* Allocate receive FCBs. */ - mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]; - mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]; - - /* Allocate receive BDBs. */ - mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]; - - /* Allocate MAC transmit buffers. - * MAC transmit buffers don't have to be on an ODD Boundary. - */ - mem_used += tp->tx_buff_size[MAC_QUEUE]; - - /* Allocate BUG transmit buffers. */ - mem_used += tp->tx_buff_size[BUG_QUEUE]; - - /* Allocate MAC receive data buffers. - * MAC receive buffers don't have to be on a 256 byte boundary. - */ - mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]; - - /* Allocate Non-MAC transmit buffers. - * For maximum Netware performance, put Tx Buffers on - * ODD Boundary,and then restore malloc to Even Boundrys. - */ - mem_used += 1L; - mem_used += tp->tx_buff_size[NON_MAC_QUEUE]; - mem_used += 1L; - - /* CALCULATE NUMBER OF NON-MAC RX BDB'S - * AND NON-MAC RX DATA BUFFERS - * - * Make sure the mem_used offset at this point is the - * same as in allocate_shared memory or the following - * boundary adjustment will be incorrect (i.e. not allocating - * the non-mac receive buffers above cannot change the 256 - * byte offset). - * - * Since this cannot be guaranteed, adding the full 256 bytes - * to the amount of shared memory used at this point will guaranteed - * that the rx data buffers do not overflow shared memory. - */ - mem_used += 0x100; - - return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)); -} - -static int smctr_get_physical_drop_number(struct net_device *dev) -{ - smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); - - return smctr_wait_cmd(dev); -} - -static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue) -{ - struct net_local *tp = netdev_priv(dev); - BDBlock *bdb; - - bdb = (BDBlock *)((__u32)tp->ram_access - + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr)); - - tp->rx_fcb_curr[queue]->bdb_ptr = bdb; - - return (__u8 *)bdb->data_block_ptr; -} - -static int smctr_get_station_id(struct net_device *dev) -{ - smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); - - return smctr_wait_cmd(dev); -} - -/* - * Get the current statistics. This may be called with the card open - * or closed. - */ -static struct net_device_stats *smctr_get_stats(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - return (struct net_device_stats *)&tp->MacStat; -} - -static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, - __u16 bytes_count) -{ - struct net_local *tp = netdev_priv(dev); - FCBlock *pFCB; - BDBlock *pbdb; - unsigned short alloc_size; - unsigned short *temp; - - if(smctr_debug > 20) - printk(KERN_DEBUG "smctr_get_tx_fcb\n"); - - /* check if there is enough FCB blocks */ - if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) - return (FCBlock *)(-1L); - - /* round off the input pkt size to the nearest even number */ - alloc_size = (bytes_count + 1) & 0xfffe; - - /* check if enough mem */ - if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) - return (FCBlock *)(-1L); - - /* check if past the end ; - * if exactly enough mem to end of ring, alloc from front. - * this avoids update of curr when curr = end - */ - if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size) - >= (unsigned long)(tp->tx_buff_end[queue])) - { - /* check if enough memory from ring head */ - alloc_size = alloc_size + - (__u16)((__u32)tp->tx_buff_end[queue] - - (__u32)tp->tx_buff_curr[queue]); - - if((tp->tx_buff_used[queue] + alloc_size) - > tp->tx_buff_size[queue]) - { - return (FCBlock *)(-1L); - } - - /* ring wrap */ - tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; - } - - tp->tx_buff_used[queue] += alloc_size; - tp->num_tx_fcbs_used[queue]++; - tp->tx_fcb_curr[queue]->frame_length = bytes_count; - tp->tx_fcb_curr[queue]->memory_alloc = alloc_size; - temp = tp->tx_buff_curr[queue]; - tp->tx_buff_curr[queue] - = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe)); - - pbdb = tp->tx_fcb_curr[queue]->bdb_ptr; - pbdb->buffer_length = bytes_count; - pbdb->data_block_ptr = temp; - pbdb->trc_data_block_ptr = TRC_POINTER(temp); - - pFCB = tp->tx_fcb_curr[queue]; - tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; - - return pFCB; -} - -static int smctr_get_upstream_neighbor_addr(struct net_device *dev) -{ - smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); - - return smctr_wait_cmd(dev); -} - -static int smctr_hardware_send_packet(struct net_device *dev, - struct net_local *tp) -{ - struct tr_statistics *tstat = &tp->MacStat; - struct sk_buff *skb; - FCBlock *fcb; - - if(smctr_debug > 10) - printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); - - if(tp->status != OPEN) - return -1; - - if(tp->monitor_state_ready != 1) - return -1; - - for(;;) - { - /* Send first buffer from queue */ - skb = skb_dequeue(&tp->SendSkbQueue); - if(skb == NULL) - return -1; - - tp->QueueSkb++; - - if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) - return -1; - - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len)) - == (FCBlock *)(-1L)) - { - smctr_disable_16bit(dev); - return -1; - } - - smctr_tx_move_frame(dev, skb, - (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len); - - smctr_set_page(dev, (__u8 *)fcb); - - smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE); - dev_kfree_skb(skb); - - tstat->tx_packets++; - - smctr_disable_16bit(dev); - } - - return 0; -} - -static int smctr_init_acbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i; - ACBlock *acb; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name); - - acb = tp->acb_head; - acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); - acb->cmd_info = ACB_CHAIN_END; - acb->cmd = 0; - acb->subcmd = 0; - acb->data_offset_lo = 0; - acb->data_offset_hi = 0; - acb->next_ptr - = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); - acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); - - for(i = 1; i < tp->num_acbs; i++) - { - acb = acb->next_ptr; - acb->cmd_done_status - = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); - acb->cmd_info = ACB_CHAIN_END; - acb->cmd = 0; - acb->subcmd = 0; - acb->data_offset_lo = 0; - acb->data_offset_hi = 0; - acb->next_ptr - = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); - acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); - } - - acb->next_ptr = tp->acb_head; - acb->trc_next_ptr = TRC_POINTER(tp->acb_head); - tp->acb_next = tp->acb_head->next_ptr; - tp->acb_curr = tp->acb_head->next_ptr; - tp->num_acbs_used = 0; - - return 0; -} - -static int smctr_init_adapter(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name); - - tp->status = CLOSED; - tp->page_offset_mask = (tp->ram_usable * 1024) - 1; - skb_queue_head_init(&tp->SendSkbQueue); - tp->QueueSkb = MAX_TX_QUEUE; - - if(!(tp->group_address_0 & 0x0080)) - tp->group_address_0 |= 0x00C0; - - if(!(tp->functional_address_0 & 0x00C0)) - tp->functional_address_0 |= 0x00C0; - - tp->functional_address[0] &= 0xFF7F; - - if(tp->authorized_function_classes == 0) - tp->authorized_function_classes = 0x7FFF; - - if(tp->authorized_access_priority == 0) - tp->authorized_access_priority = 0x06; - - smctr_disable_bic_int(dev); - smctr_set_trc_reset(dev->base_addr); - - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if(smctr_checksum_firmware(dev)) - { - printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); - return -ENOENT; - } - - if((err = smctr_ram_memory_test(dev))) - { - printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); - return -EIO; - } - - smctr_set_rx_look_ahead(dev); - smctr_load_node_addr(dev); - - /* Initialize adapter for Internal Self Test. */ - smctr_reset_adapter(dev); - if((err = smctr_init_card_real(dev))) - { - printk(KERN_ERR "%s: Initialization of card failed (%d)\n", - dev->name, err); - return -EINVAL; - } - - /* This routine clobbers the TRC's internal registers. */ - if((err = smctr_internal_self_test(dev))) - { - printk(KERN_ERR "%s: Card failed internal self test (%d)\n", - dev->name, err); - return -EINVAL; - } - - /* Re-Initialize adapter's internal registers */ - smctr_reset_adapter(dev); - if((err = smctr_init_card_real(dev))) - { - printk(KERN_ERR "%s: Initialization of card failed (%d)\n", - dev->name, err); - return -EINVAL; - } - - smctr_enable_bic_int(dev); - - if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) - return err; - - smctr_disable_16bit(dev); - - return 0; -} - -static int smctr_init_card_real(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name); - - tp->sh_mem_used = 0; - tp->num_acbs = NUM_OF_ACBS; - - /* Range Check Max Packet Size */ - if(tp->max_packet_size < 256) - tp->max_packet_size = 256; - else - { - if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY) - tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY; - } - - tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY - / tp->max_packet_size) - 1; - - if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS) - tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS; - else - { - if(tp->num_of_tx_buffs == 0) - tp->num_of_tx_buffs = 1; - } - - /* Tx queue constants */ - tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS; - tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS; - tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY; - tp->tx_buff_used [BUG_QUEUE] = 0; - tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING; - - tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS; - tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS; - tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY; - tp->tx_buff_used [MAC_QUEUE] = 0; - tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING; - - tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS; - tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS; - tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY; - tp->tx_buff_used [NON_MAC_QUEUE] = 0; - tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING; - - /* Receive Queue Constants */ - tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS; - tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS; - - if(tp->extra_info & CHIP_REV_MASK) - tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */ - else - tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */ - - tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev); - - smctr_alloc_shared_memory(dev); - smctr_init_shared_memory(dev); - - if((err = smctr_issue_init_timers_cmd(dev))) - return err; - - if((err = smctr_issue_init_txrx_cmd(dev))) - { - printk(KERN_ERR "%s: Hardware failure\n", dev->name); - return err; - } - - return 0; -} - -static int smctr_init_rx_bdbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, j; - BDBlock *bdb; - __u16 *buf; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name); - - for(i = 0; i < NUM_RX_QS_USED; i++) - { - bdb = tp->rx_bdb_head[i]; - buf = tp->rx_buff_head[i]; - bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING); - bdb->buffer_length = RX_DATA_BUFFER_SIZE; - bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); - bdb->data_block_ptr = buf; - bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); - - if(i == NON_MAC_QUEUE) - bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); - else - bdb->trc_data_block_ptr = TRC_POINTER(buf); - - for(j = 1; j < tp->num_rx_bdbs[i]; j++) - { - bdb->next_ptr->back_ptr = bdb; - bdb = bdb->next_ptr; - buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE); - bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); - bdb->buffer_length = RX_DATA_BUFFER_SIZE; - bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); - bdb->data_block_ptr = buf; - bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); - - if(i == NON_MAC_QUEUE) - bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf); - else - bdb->trc_data_block_ptr = TRC_POINTER(buf); - } - - bdb->next_ptr = tp->rx_bdb_head[i]; - bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]); - - tp->rx_bdb_head[i]->back_ptr = bdb; - tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr; - } - - return 0; -} - -static int smctr_init_rx_fcbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, j; - FCBlock *fcb; - - for(i = 0; i < NUM_RX_QS_USED; i++) - { - fcb = tp->rx_fcb_head[i]; - fcb->frame_status = 0; - fcb->frame_length = 0; - fcb->info = FCB_CHAIN_END; - fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock)); - if(i == NON_MAC_QUEUE) - fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); - else - fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); - - for(j = 1; j < tp->num_rx_fcbs[i]; j++) - { - fcb->next_ptr->back_ptr = fcb; - fcb = fcb->next_ptr; - fcb->frame_status = 0; - fcb->frame_length = 0; - fcb->info = FCB_WARNING; - fcb->next_ptr - = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); - - if(i == NON_MAC_QUEUE) - fcb->trc_next_ptr - = RX_FCB_TRC_POINTER(fcb->next_ptr); - else - fcb->trc_next_ptr - = TRC_POINTER(fcb->next_ptr); - } - - fcb->next_ptr = tp->rx_fcb_head[i]; - - if(i == NON_MAC_QUEUE) - fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr); - else - fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); - - tp->rx_fcb_head[i]->back_ptr = fcb; - tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr; - } - - return 0; -} - -static int smctr_init_shared_memory(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i; - __u32 *iscpb; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name); - - smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr); - - /* Initialize Initial System Configuration Point. (ISCP) */ - iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr); - *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr))); - - smctr_set_page(dev, (__u8 *)tp->ram_access); - - /* Initialize System Configuration Pointers. (SCP) */ - tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT - | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT - | SCGB_BURST_LENGTH); - - tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr); - tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head); - tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr); - tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2; - - /* Initialize System Control Block. (SCB) */ - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP; - tp->sclb_ptr->iack_code = 0; - tp->sclb_ptr->resume_control = 0; - tp->sclb_ptr->int_mask_control = 0; - tp->sclb_ptr->int_mask_state = 0; - - /* Initialize Interrupt Status Block. (ISB) */ - for(i = 0; i < NUM_OF_INTERRUPTS; i++) - { - tp->isb_ptr->IStatus[i].IType = 0xf0; - tp->isb_ptr->IStatus[i].ISubtype = 0; - } - - tp->current_isb_index = 0; - - /* Initialize Action Command Block. (ACB) */ - smctr_init_acbs(dev); - - /* Initialize transmit FCB's and BDB's. */ - smctr_link_tx_fcbs_to_bdbs(dev); - smctr_init_tx_bdbs(dev); - smctr_init_tx_fcbs(dev); - - /* Initialize receive FCB's and BDB's. */ - smctr_init_rx_bdbs(dev); - smctr_init_rx_fcbs(dev); - - return 0; -} - -static int smctr_init_tx_bdbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, j; - BDBlock *bdb; - - for(i = 0; i < NUM_TX_QS_USED; i++) - { - bdb = tp->tx_bdb_head[i]; - bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); - bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock)); - bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); - - for(j = 1; j < tp->num_tx_bdbs[i]; j++) - { - bdb->next_ptr->back_ptr = bdb; - bdb = bdb->next_ptr; - bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING); - bdb->next_ptr - = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr); - } - - bdb->next_ptr = tp->tx_bdb_head[i]; - bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]); - tp->tx_bdb_head[i]->back_ptr = bdb; - } - - return 0; -} - -static int smctr_init_tx_fcbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, j; - FCBlock *fcb; - - for(i = 0; i < NUM_TX_QS_USED; i++) - { - fcb = tp->tx_fcb_head[i]; - fcb->frame_status = 0; - fcb->frame_length = 0; - fcb->info = FCB_CHAIN_END; - fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); - fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); - - for(j = 1; j < tp->num_tx_fcbs[i]; j++) - { - fcb->next_ptr->back_ptr = fcb; - fcb = fcb->next_ptr; - fcb->frame_status = 0; - fcb->frame_length = 0; - fcb->info = FCB_CHAIN_END; - fcb->next_ptr - = (FCBlock *)(((char *)fcb) + sizeof(FCBlock)); - fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr); - } - - fcb->next_ptr = tp->tx_fcb_head[i]; - fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]); - - tp->tx_fcb_head[i]->back_ptr = fcb; - tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr; - tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr; - tp->num_tx_fcbs_used[i] = 0; - } - - return 0; -} - -static int smctr_internal_self_test(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if((err = smctr_issue_test_internal_rom_cmd(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - if(tp->acb_head->cmd_done_status & 0xff) - return -1; - - if((err = smctr_issue_test_hic_cmd(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - if(tp->acb_head->cmd_done_status & 0xff) - return -1; - - if((err = smctr_issue_test_mac_reg_cmd(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - if(tp->acb_head->cmd_done_status & 0xff) - return -1; - - return 0; -} - -/* - * The typical workload of the driver: Handle the network interface interrupts. - */ -static irqreturn_t smctr_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *tp; - int ioaddr; - __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00; - __u16 err1, err = NOT_MY_INTERRUPT; - __u8 isb_type, isb_subtype; - __u16 isb_index; - - ioaddr = dev->base_addr; - tp = netdev_priv(dev); - - if(tp->status == NOT_INITIALIZED) - return IRQ_NONE; - - spin_lock(&tp->lock); - - smctr_disable_bic_int(dev); - smctr_enable_16bit(dev); - - smctr_clear_int(dev); - - /* First read the LSB */ - while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0) - { - isb_index = tp->current_isb_index; - isb_type = tp->isb_ptr->IStatus[isb_index].IType; - isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype; - - (tp->current_isb_index)++; - if(tp->current_isb_index == NUM_OF_INTERRUPTS) - tp->current_isb_index = 0; - - if(isb_type >= 0x10) - { - smctr_disable_16bit(dev); - spin_unlock(&tp->lock); - return IRQ_HANDLED; - } - - err = HARDWARE_FAILED; - interrupt_ack_code = isb_index; - tp->isb_ptr->IStatus[isb_index].IType |= 0xf0; - - interrupt_unmask_bits |= (1 << (__u16)isb_type); - - switch(isb_type) - { - case ISB_IMC_MAC_TYPE_3: - smctr_disable_16bit(dev); - - switch(isb_subtype) - { - case 0: - tp->monitor_state = MS_MONITOR_FSM_INACTIVE; - break; - - case 1: - tp->monitor_state = MS_REPEAT_BEACON_STATE; - break; - - case 2: - tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE; - break; - - case 3: - tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break; - - case 4: - tp->monitor_state = MS_STANDBY_MONITOR_STATE; - break; - - case 5: - tp->monitor_state = MS_TRANSMIT_BEACON_STATE; - break; - - case 6: - tp->monitor_state = MS_ACTIVE_MONITOR_STATE; - break; - - case 7: - tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE; - break; - - case 8: /* diagnostic state */ - break; - - case 9: - tp->monitor_state = MS_BEACON_TEST_STATE; - if(smctr_lobe_media_test(dev)) - { - tp->ring_status_flags = RING_STATUS_CHANGED; - tp->ring_status = AUTO_REMOVAL_ERROR; - smctr_ring_status_chg(dev); - smctr_bypass_state(dev); - } - else - smctr_issue_insert_cmd(dev); - break; - - /* case 0x0a-0xff, illegal states */ - default: - break; - } - - tp->ring_status_flags = MONITOR_STATE_CHANGED; - err = smctr_ring_status_chg(dev); - - smctr_enable_16bit(dev); - break; - - /* Type 0x02 - MAC Error Counters Interrupt - * One or more MAC Error Counter is half full - * MAC Error Counters - * Lost_FR_Error_Counter - * RCV_Congestion_Counter - * FR_copied_Error_Counter - * FREQ_Error_Counter - * Token_Error_Counter - * Line_Error_Counter - * Internal_Error_Count - */ - case ISB_IMC_MAC_ERROR_COUNTERS: - /* Read 802.5 Error Counters */ - err = smctr_issue_read_ring_status_cmd(dev); - break; - - /* Type 0x04 - MAC Type 2 Interrupt - * HOST needs to enqueue MAC Frame for transmission - * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to - * TRC_Status_Changed_Indicate - */ - case ISB_IMC_MAC_TYPE_2: - err = smctr_issue_read_ring_status_cmd(dev); - break; - - - /* Type 0x05 - TX Frame Interrupt (FI). */ - case ISB_IMC_TX_FRAME: - /* BUG QUEUE for TRC stuck receive BUG */ - if(isb_subtype & TX_PENDING_PRIORITY_2) - { - if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) - break; - } - - /* NON-MAC frames only */ - if(isb_subtype & TX_PENDING_PRIORITY_1) - { - if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) - break; - } - - /* MAC frames only */ - if(isb_subtype & TX_PENDING_PRIORITY_0) - err = smctr_tx_complete(dev, MAC_QUEUE); break; - - /* Type 0x06 - TX END OF QUEUE (FE) */ - case ISB_IMC_END_OF_TX_QUEUE: - /* BUG queue */ - if(isb_subtype & TX_PENDING_PRIORITY_2) - { - /* ok to clear Receive FIFO overrun - * imask send_BUG now completes. - */ - interrupt_unmask_bits |= 0x800; - - tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING; - if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS) - break; - if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS) - break; - } - - /* NON-MAC queue only */ - if(isb_subtype & TX_PENDING_PRIORITY_1) - { - tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING; - if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS) - break; - if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS) - break; - } - - /* MAC queue only */ - if(isb_subtype & TX_PENDING_PRIORITY_0) - { - tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; - if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS) - break; - - err = smctr_restart_tx_chain(dev, MAC_QUEUE); - } - break; - - /* Type 0x07 - NON-MAC RX Resource Interrupt - * Subtype bit 12 - (BW) BDB warning - * Subtype bit 13 - (FW) FCB warning - * Subtype bit 14 - (BE) BDB End of chain - * Subtype bit 15 - (FE) FCB End of chain - */ - case ISB_IMC_NON_MAC_RX_RESOURCE: - tp->rx_fifo_overrun_count = 0; - tp->receive_queue_number = NON_MAC_QUEUE; - err1 = smctr_rx_frame(dev); - - if(isb_subtype & NON_MAC_RX_RESOURCE_FE) - { - if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; - - if(tp->ptr_rx_fcb_overruns) - (*tp->ptr_rx_fcb_overruns)++; - } - - if(isb_subtype & NON_MAC_RX_RESOURCE_BE) - { - if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break; - - if(tp->ptr_rx_bdb_overruns) - (*tp->ptr_rx_bdb_overruns)++; - } - err = err1; - break; - - /* Type 0x08 - MAC RX Resource Interrupt - * Subtype bit 12 - (BW) BDB warning - * Subtype bit 13 - (FW) FCB warning - * Subtype bit 14 - (BE) BDB End of chain - * Subtype bit 15 - (FE) FCB End of chain - */ - case ISB_IMC_MAC_RX_RESOURCE: - tp->receive_queue_number = MAC_QUEUE; - err1 = smctr_rx_frame(dev); - - if(isb_subtype & MAC_RX_RESOURCE_FE) - { - if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS) - break; - - if(tp->ptr_rx_fcb_overruns) - (*tp->ptr_rx_fcb_overruns)++; - } - - if(isb_subtype & MAC_RX_RESOURCE_BE) - { - if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS) - break; - - if(tp->ptr_rx_bdb_overruns) - (*tp->ptr_rx_bdb_overruns)++; - } - err = err1; - break; - - /* Type 0x09 - NON_MAC RX Frame Interrupt */ - case ISB_IMC_NON_MAC_RX_FRAME: - tp->rx_fifo_overrun_count = 0; - tp->receive_queue_number = NON_MAC_QUEUE; - err = smctr_rx_frame(dev); - break; - - /* Type 0x0A - MAC RX Frame Interrupt */ - case ISB_IMC_MAC_RX_FRAME: - tp->receive_queue_number = MAC_QUEUE; - err = smctr_rx_frame(dev); - break; - - /* Type 0x0B - TRC status - * TRC has encountered an error condition - * subtype bit 14 - transmit FIFO underrun - * subtype bit 15 - receive FIFO overrun - */ - case ISB_IMC_TRC_FIFO_STATUS: - if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN) - { - if(tp->ptr_tx_fifo_underruns) - (*tp->ptr_tx_fifo_underruns)++; - } - - if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN) - { - /* update overrun stuck receive counter - * if >= 3, has to clear it by sending - * back to back frames. We pick - * DAT(duplicate address MAC frame) - */ - tp->rx_fifo_overrun_count++; - - if(tp->rx_fifo_overrun_count >= 3) - { - tp->rx_fifo_overrun_count = 0; - - /* delay clearing fifo overrun - * imask till send_BUG tx - * complete posted - */ - interrupt_unmask_bits &= (~0x800); - printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev); - } - - if(tp->ptr_rx_fifo_overruns) - (*tp->ptr_rx_fifo_overruns)++; - } - - err = SUCCESS; - break; - - /* Type 0x0C - Action Command Status Interrupt - * Subtype bit 14 - CB end of command chain (CE) - * Subtype bit 15 - CB command interrupt (CI) - */ - case ISB_IMC_COMMAND_STATUS: - err = SUCCESS; - if(tp->acb_head->cmd == ACB_CMD_HIC_NOP) - { - printk(KERN_ERR "i1\n"); - smctr_disable_16bit(dev); - - /* XXXXXXXXXXXXXXXXX */ - /* err = UM_Interrupt(dev); */ - - smctr_enable_16bit(dev); - } - else - { - if((tp->acb_head->cmd - == ACB_CMD_READ_TRC_STATUS) && - (tp->acb_head->subcmd - == RW_TRC_STATUS_BLOCK)) - { - if(tp->ptr_bcn_type) - { - *(tp->ptr_bcn_type) - = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type; - } - - if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED) - { - smctr_update_err_stats(dev); - } - - if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED) - { - tp->ring_status - = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status; - smctr_disable_16bit(dev); - err = smctr_ring_status_chg(dev); - smctr_enable_16bit(dev); - if((tp->ring_status & REMOVE_RECEIVED) && - (tp->config_word0 & NO_AUTOREMOVE)) - { - smctr_issue_remove_cmd(dev); - } - - if(err != SUCCESS) - { - tp->acb_pending = 0; - break; - } - } - - if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED) - { - if(tp->ptr_una) - { - tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]); - tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]); - tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]); - } - - } - - if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) { - err = smctr_send_rq_init(dev); - } - } - } - - tp->acb_pending = 0; - break; - - /* Type 0x0D - MAC Type 1 interrupt - * Subtype -- 00 FR_BCN received at S12 - * 01 FR_BCN received at S21 - * 02 FR_DAT(DA=MA, A<>0) received at S21 - * 03 TSM_EXP at S21 - * 04 FR_REMOVE received at S42 - * 05 TBR_EXP, BR_FLAG_SET at S42 - * 06 TBT_EXP at S53 - */ - case ISB_IMC_MAC_TYPE_1: - if(isb_subtype > 8) - { - err = HARDWARE_FAILED; - break; - } - - err = SUCCESS; - switch(isb_subtype) - { - case 0: - tp->join_state = JS_BYPASS_STATE; - if(tp->status != CLOSED) - { - tp->status = CLOSED; - err = smctr_status_chg(dev); - } - break; - - case 1: - tp->join_state = JS_LOBE_TEST_STATE; - break; - - case 2: - tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE; - break; - - case 3: - tp->join_state = JS_AWAIT_NEW_MONITOR_STATE; - break; - - case 4: - tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE; - break; - - case 5: - tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE; - break; - - case 6: - tp->join_state = JS_REQUEST_INITIALIZATION_STATE; - break; - - case 7: - tp->join_state = JS_JOIN_COMPLETE_STATE; - tp->status = OPEN; - err = smctr_status_chg(dev); - break; - - case 8: - tp->join_state = JS_BYPASS_WAIT_STATE; - break; - } - break ; - - /* Type 0x0E - TRC Initialization Sequence Interrupt - * Subtype -- 00-FF Initializatin sequence complete - */ - case ISB_IMC_TRC_INTRNL_TST_STATUS: - tp->status = INITIALIZED; - smctr_disable_16bit(dev); - err = smctr_status_chg(dev); - smctr_enable_16bit(dev); - break; - - /* other interrupt types, illegal */ - default: - break; - } - - if(err != SUCCESS) - break; - } - - /* Checking the ack code instead of the unmask bits here is because : - * while fixing the stuck receive, DAT frame are sent and mask off - * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0) - * but we still want to issue ack to ISB - */ - if(!(interrupt_ack_code & 0xff00)) - smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits); - - smctr_disable_16bit(dev); - smctr_enable_bic_int(dev); - spin_unlock(&tp->lock); - - return IRQ_HANDLED; -} - -static int smctr_issue_enable_int_cmd(struct net_device *dev, - __u16 interrupt_enable_mask) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - tp->sclb_ptr->int_mask_control = interrupt_enable_mask; - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_wait_while_cbusy(dev)) - return -1; - - tp->sclb_ptr->int_mask_control = ibits; - tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0; - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_init_timers_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i; - int err; - __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE; - tp->config_word1 = 0; - - if((tp->media_type == MEDIA_STP_16) || - (tp->media_type == MEDIA_UTP_16) || - (tp->media_type == MEDIA_STP_16_UTP_16)) - { - tp->config_word0 |= FREQ_16MB_BIT; - } - - if(tp->mode_bits & EARLY_TOKEN_REL) - tp->config_word0 |= ETREN; - - if(tp->mode_bits & LOOPING_MODE_MASK) - tp->config_word0 |= RX_OWN_BIT; - else - tp->config_word0 &= ~RX_OWN_BIT; - - if(tp->receive_mask & PROMISCUOUS_MODE) - tp->config_word0 |= PROMISCUOUS_BIT; - else - tp->config_word0 &= ~PROMISCUOUS_BIT; - - if(tp->receive_mask & ACCEPT_ERR_PACKETS) - tp->config_word0 |= SAVBAD_BIT; - else - tp->config_word0 &= ~SAVBAD_BIT; - - if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) - tp->config_word0 |= RXATMAC; - else - tp->config_word0 &= ~RXATMAC; - - if(tp->receive_mask & ACCEPT_MULTI_PROM) - tp->config_word1 |= MULTICAST_ADDRESS_BIT; - else - tp->config_word1 &= ~MULTICAST_ADDRESS_BIT; - - if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING) - tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS; - else - { - if(tp->receive_mask & ACCEPT_SOURCE_ROUTING) - tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT; - else - tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS; - } - - if((tp->media_type == MEDIA_STP_16) || - (tp->media_type == MEDIA_UTP_16) || - (tp->media_type == MEDIA_STP_16_UTP_16)) - { - tp->config_word1 |= INTERFRAME_SPACING_16; - } - else - tp->config_word1 |= INTERFRAME_SPACING_4; - - *pTimer_Struc++ = tp->config_word0; - *pTimer_Struc++ = tp->config_word1; - - if((tp->media_type == MEDIA_STP_4) || - (tp->media_type == MEDIA_UTP_4) || - (tp->media_type == MEDIA_STP_4_UTP_4)) - { - *pTimer_Struc++ = 0x00FA; /* prescale */ - *pTimer_Struc++ = 0x2710; /* TPT_limit */ - *pTimer_Struc++ = 0x2710; /* TQP_limit */ - *pTimer_Struc++ = 0x0A28; /* TNT_limit */ - *pTimer_Struc++ = 0x3E80; /* TBT_limit */ - *pTimer_Struc++ = 0x3A98; /* TSM_limit */ - *pTimer_Struc++ = 0x1B58; /* TAM_limit */ - *pTimer_Struc++ = 0x00C8; /* TBR_limit */ - *pTimer_Struc++ = 0x07D0; /* TER_limit */ - *pTimer_Struc++ = 0x000A; /* TGT_limit */ - *pTimer_Struc++ = 0x1162; /* THT_limit */ - *pTimer_Struc++ = 0x07D0; /* TRR_limit */ - *pTimer_Struc++ = 0x1388; /* TVX_limit */ - *pTimer_Struc++ = 0x0000; /* reserved */ - } - else - { - *pTimer_Struc++ = 0x03E8; /* prescale */ - *pTimer_Struc++ = 0x9C40; /* TPT_limit */ - *pTimer_Struc++ = 0x9C40; /* TQP_limit */ - *pTimer_Struc++ = 0x0A28; /* TNT_limit */ - *pTimer_Struc++ = 0x3E80; /* TBT_limit */ - *pTimer_Struc++ = 0x3A98; /* TSM_limit */ - *pTimer_Struc++ = 0x1B58; /* TAM_limit */ - *pTimer_Struc++ = 0x00C8; /* TBR_limit */ - *pTimer_Struc++ = 0x07D0; /* TER_limit */ - *pTimer_Struc++ = 0x000A; /* TGT_limit */ - *pTimer_Struc++ = 0x4588; /* THT_limit */ - *pTimer_Struc++ = 0x1F40; /* TRR_limit */ - *pTimer_Struc++ = 0x4E20; /* TVX_limit */ - *pTimer_Struc++ = 0x0000; /* reserved */ - } - - /* Set node address. */ - *pTimer_Struc++ = dev->dev_addr[0] << 8 - | (dev->dev_addr[1] & 0xFF); - *pTimer_Struc++ = dev->dev_addr[2] << 8 - | (dev->dev_addr[3] & 0xFF); - *pTimer_Struc++ = dev->dev_addr[4] << 8 - | (dev->dev_addr[5] & 0xFF); - - /* Set group address. */ - *pTimer_Struc++ = tp->group_address_0 << 8 - | tp->group_address_0 >> 8; - *pTimer_Struc++ = tp->group_address[0] << 8 - | tp->group_address[0] >> 8; - *pTimer_Struc++ = tp->group_address[1] << 8 - | tp->group_address[1] >> 8; - - /* Set functional address. */ - *pTimer_Struc++ = tp->functional_address_0 << 8 - | tp->functional_address_0 >> 8; - *pTimer_Struc++ = tp->functional_address[0] << 8 - | tp->functional_address[0] >> 8; - *pTimer_Struc++ = tp->functional_address[1] << 8 - | tp->functional_address[1] >> 8; - - /* Set Bit-Wise group address. */ - *pTimer_Struc++ = tp->bitwise_group_address[0] << 8 - | tp->bitwise_group_address[0] >> 8; - *pTimer_Struc++ = tp->bitwise_group_address[1] << 8 - | tp->bitwise_group_address[1] >> 8; - - /* Set ring number address. */ - *pTimer_Struc++ = tp->source_ring_number; - *pTimer_Struc++ = tp->target_ring_number; - - /* Physical drop number. */ - *pTimer_Struc++ = (unsigned short)0; - *pTimer_Struc++ = (unsigned short)0; - - /* Product instance ID. */ - for(i = 0; i < 9; i++) - *pTimer_Struc++ = (unsigned short)0; - - err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0); - - return err; -} - -static int smctr_issue_init_txrx_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i; - int err; - void **txrx_ptrs = (void *)tp->misc_command_data; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - { - printk(KERN_ERR "%s: Hardware failure\n", dev->name); - return err; - } - - /* Initialize Transmit Queue Pointers that are used, to point to - * a single FCB. - */ - for(i = 0; i < NUM_TX_QS_USED; i++) - *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]); - - /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */ - for(; i < MAX_TX_QS; i++) - *txrx_ptrs++ = (void *)0; - - /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are - * used, to point to a single FCB and a BDB chain of buffers. - */ - for(i = 0; i < NUM_RX_QS_USED; i++) - { - *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]); - *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]); - } - - /* Initialize Receive Queue Pointers that are NOT used to ZERO. */ - for(; i < MAX_RX_QS; i++) - { - *txrx_ptrs++ = (void *)0; - *txrx_ptrs++ = (void *)0; - } - - err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0); - - return err; -} - -static int smctr_issue_insert_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP); - - return err; -} - -static int smctr_issue_read_ring_status_cmd(struct net_device *dev) -{ - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS, - RW_TRC_STATUS_BLOCK); - - return err; -} - -static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt) -{ - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE, - aword_cnt); - - return err; -} - -static int smctr_issue_remove_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - tp->sclb_ptr->resume_control = 0; - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_resume_acb_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - tp->sclb_ptr->resume_control = SCLB_RC_ACB; - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; - - tp->acb_pending = 1; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if(queue == MAC_QUEUE) - tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB; - else - tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB; - - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name); - - if(smctr_wait_while_cbusy(dev)) - return -1; - - if(queue == MAC_QUEUE) - tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB; - else - tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB; - - tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name); - - if(smctr_wait_while_cbusy(dev)) - return -1; - - tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue); - tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID; - - smctr_set_ctrl_attention(dev); - - return 0; -} - -static int smctr_issue_test_internal_rom_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, - TRC_INTERNAL_ROM_TEST); - - return err; -} - -static int smctr_issue_test_hic_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST, - TRC_HOST_INTERFACE_REG_TEST); - - return err; -} - -static int smctr_issue_test_mac_reg_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, - TRC_MAC_REGISTERS_TEST); - - return err; -} - -static int smctr_issue_trc_loopback_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, - TRC_INTERNAL_LOOPBACK); - - return err; -} - -static int smctr_issue_tri_loopback_cmd(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, - TRC_TRI_LOOPBACK); - - return err; -} - -static int smctr_issue_write_byte_cmd(struct net_device *dev, - short aword_cnt, void *byte) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int iword, ibyte; - int err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff); - iword++, ibyte += 2) - { - tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8) - | (*((__u8 *)byte + ibyte + 1)); - } - - return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, - aword_cnt); -} - -static int smctr_issue_write_word_cmd(struct net_device *dev, - short aword_cnt, void *word) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, err; - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = smctr_wait_cmd(dev))) - return err; - - for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++) - tp->misc_command_data[i] = *((__u16 *)word + i); - - err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, - aword_cnt); - - return err; -} - -static int smctr_join_complete_state(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, - JS_JOIN_COMPLETE_STATE); - - return err; -} - -static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, j; - FCBlock *fcb; - BDBlock *bdb; - - for(i = 0; i < NUM_TX_QS_USED; i++) - { - fcb = tp->tx_fcb_head[i]; - bdb = tp->tx_bdb_head[i]; - - for(j = 0; j < tp->num_tx_fcbs[i]; j++) - { - fcb->bdb_ptr = bdb; - fcb->trc_bdb_ptr = TRC_POINTER(bdb); - fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock)); - bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock)); - } - } - - return 0; -} - -static int smctr_load_firmware(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - const struct firmware *fw; - __u16 i, checksum = 0; - int err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name); - - if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) { - printk(KERN_ERR "%s: firmware not found\n", dev->name); - return UCODE_NOT_PRESENT; - } - - tp->num_of_tx_buffs = 4; - tp->mode_bits |= UMAC; - tp->receive_mask = 0; - tp->max_packet_size = 4177; - - /* Can only upload the firmware once per adapter reset. */ - if (tp->microcode_version != 0) { - err = (UCODE_PRESENT); - goto out; - } - - /* Verify the firmware exists and is there in the right amount. */ - if (!fw->data || - (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION)) - { - err = (UCODE_NOT_PRESENT); - goto out; - } - - /* UCODE_SIZE is not included in Checksum. */ - for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2) - checksum += *((__u16 *)(fw->data + 2 + i)); - if (checksum) { - err = (UCODE_NOT_PRESENT); - goto out; - } - - /* At this point we have a valid firmware image, lets kick it on up. */ - smctr_enable_adapter_ram(dev); - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if((smctr_checksum_firmware(dev)) || - (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version)) - { - smctr_enable_adapter_ctrl_store(dev); - - /* Zero out ram space for firmware. */ - for(i = 0; i < CS_RAM_SIZE; i += 2) - *((__u16 *)(tp->ram_access + i)) = 0; - - smctr_decode_firmware(dev, fw); - - tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET)) - = (tp->microcode_version << 8); - *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET)) - = ~(tp->microcode_version << 8) + 1; - - smctr_disable_adapter_ctrl_store(dev); - - if(smctr_checksum_firmware(dev)) - err = HARDWARE_FAILED; - } - else - err = UCODE_PRESENT; - - smctr_disable_16bit(dev); - out: - release_firmware(fw); - return err; -} - -static int smctr_load_node_addr(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - unsigned int i; - __u8 r; - - for(i = 0; i < 6; i++) - { - r = inb(ioaddr + LAR0 + i); - dev->dev_addr[i] = (char)r; - } - dev->addr_len = 6; - - return 0; -} - -/* Lobe Media Test. - * During the transmission of the initial 1500 lobe media MAC frames, - * the phase lock loop in the 805 chip may lock, and then un-lock, causing - * the 825 to go into a PURGE state. When performing a PURGE, the MCT - * microcode will not transmit any frames given to it by the host, and - * will consequently cause a timeout. - * - * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit - * queues other than the one used for the lobe_media_test should be - * disabled.!? - * - * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask - * has any multi-cast or promiscuous bits set, the receive_mask needs to - * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test - * run, and then the receive mask set back to its original value if the test - * is successful. - */ -static int smctr_lobe_media_test(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, perror = 0; - unsigned short saved_rcv_mask; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name); - - /* Clear receive mask for lobe test. */ - saved_rcv_mask = tp->receive_mask; - tp->receive_mask = 0; - - smctr_chg_rx_mask(dev); - - /* Setup the lobe media test. */ - smctr_lobe_media_test_cmd(dev); - if(smctr_wait_cmd(dev)) - goto err; - - /* Tx lobe media test frames. */ - for(i = 0; i < 1500; ++i) - { - if(smctr_send_lobe_media_test(dev)) - { - if(perror) - goto err; - else - { - perror = 1; - if(smctr_lobe_media_test_cmd(dev)) - goto err; - } - } - } - - if(smctr_send_dat(dev)) - { - if(smctr_send_dat(dev)) - goto err; - } - - /* Check if any frames received during test. */ - if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) || - (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status)) - goto err; - - /* Set receive mask to "Promisc" mode. */ - tp->receive_mask = saved_rcv_mask; - - smctr_chg_rx_mask(dev); - - return 0; -err: - smctr_reset_adapter(dev); - tp->status = CLOSED; - return LOBE_MEDIA_TEST_FAILED; -} - -static int smctr_lobe_media_test_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name); - - /* Change to lobe media test state. */ - if(tp->monitor_state != MS_BEACON_TEST_STATE) - { - smctr_lobe_media_test_state(dev); - if(smctr_wait_cmd(dev)) - { - printk(KERN_ERR "Lobe Failed test state\n"); - return LOBE_MEDIA_TEST_FAILED; - } - } - - err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, - TRC_LOBE_MEDIA_TEST); - - return err; -} - -static int smctr_lobe_media_test_state(struct net_device *dev) -{ - int err; - - err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, - JS_LOBE_TEST_STATE); - - return err; -} - -static int smctr_make_8025_hdr(struct net_device *dev, - MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc) -{ - tmf->ac = MSB(ac_fc); /* msb is access control */ - tmf->fc = LSB(ac_fc); /* lsb is frame control */ - - tmf->sa[0] = dev->dev_addr[0]; - tmf->sa[1] = dev->dev_addr[1]; - tmf->sa[2] = dev->dev_addr[2]; - tmf->sa[3] = dev->dev_addr[3]; - tmf->sa[4] = dev->dev_addr[4]; - tmf->sa[5] = dev->dev_addr[5]; - - switch(tmf->vc) - { - /* Send RQ_INIT to RPS */ - case RQ_INIT: - tmf->da[0] = 0xc0; - tmf->da[1] = 0x00; - tmf->da[2] = 0x00; - tmf->da[3] = 0x00; - tmf->da[4] = 0x00; - tmf->da[5] = 0x02; - break; - - /* Send RPT_TX_FORWARD to CRS */ - case RPT_TX_FORWARD: - tmf->da[0] = 0xc0; - tmf->da[1] = 0x00; - tmf->da[2] = 0x00; - tmf->da[3] = 0x00; - tmf->da[4] = 0x00; - tmf->da[5] = 0x10; - break; - - /* Everything else goes to sender */ - default: - tmf->da[0] = rmf->sa[0]; - tmf->da[1] = rmf->sa[1]; - tmf->da[2] = rmf->sa[2]; - tmf->da[3] = rmf->sa[3]; - tmf->da[4] = rmf->sa[4]; - tmf->da[5] = rmf->sa[5]; - break; - } - - return 0; -} - -static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - tsv->svi = AUTHORIZED_ACCESS_PRIORITY; - tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY; - - tsv->svv[0] = MSB(tp->authorized_access_priority); - tsv->svv[1] = LSB(tp->authorized_access_priority); - - return 0; -} - -static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - tsv->svi = ADDRESS_MODIFER; - tsv->svl = S_ADDRESS_MODIFER; - - tsv->svv[0] = 0; - tsv->svv[1] = 0; - - return 0; -} - -static int smctr_make_auth_funct_class(struct net_device *dev, - MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - tsv->svi = AUTHORIZED_FUNCTION_CLASS; - tsv->svl = S_AUTHORIZED_FUNCTION_CLASS; - - tsv->svv[0] = MSB(tp->authorized_function_classes); - tsv->svv[1] = LSB(tp->authorized_function_classes); - - return 0; -} - -static int smctr_make_corr(struct net_device *dev, - MAC_SUB_VECTOR *tsv, __u16 correlator) -{ - tsv->svi = CORRELATOR; - tsv->svl = S_CORRELATOR; - - tsv->svv[0] = MSB(correlator); - tsv->svv[1] = LSB(correlator); - - return 0; -} - -static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - smctr_get_functional_address(dev); - - tsv->svi = FUNCTIONAL_ADDRESS; - tsv->svl = S_FUNCTIONAL_ADDRESS; - - tsv->svv[0] = MSB(tp->misc_command_data[0]); - tsv->svv[1] = LSB(tp->misc_command_data[0]); - - tsv->svv[2] = MSB(tp->misc_command_data[1]); - tsv->svv[3] = LSB(tp->misc_command_data[1]); - - return 0; -} - -static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - smctr_get_group_address(dev); - - tsv->svi = GROUP_ADDRESS; - tsv->svl = S_GROUP_ADDRESS; - - tsv->svv[0] = MSB(tp->misc_command_data[0]); - tsv->svv[1] = LSB(tp->misc_command_data[0]); - - tsv->svv[2] = MSB(tp->misc_command_data[1]); - tsv->svv[3] = LSB(tp->misc_command_data[1]); - - /* Set Group Address Sub-vector to all zeros if only the - * Group Address/Functional Address Indicator is set. - */ - if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 && - tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00) - tsv->svv[0] = 0x00; - - return 0; -} - -static int smctr_make_phy_drop_num(struct net_device *dev, - MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - smctr_get_physical_drop_number(dev); - - tsv->svi = PHYSICAL_DROP; - tsv->svl = S_PHYSICAL_DROP; - - tsv->svv[0] = MSB(tp->misc_command_data[0]); - tsv->svv[1] = LSB(tp->misc_command_data[0]); - - tsv->svv[2] = MSB(tp->misc_command_data[1]); - tsv->svv[3] = LSB(tp->misc_command_data[1]); - - return 0; -} - -static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - int i; - - tsv->svi = PRODUCT_INSTANCE_ID; - tsv->svl = S_PRODUCT_INSTANCE_ID; - - for(i = 0; i < 18; i++) - tsv->svv[i] = 0xF0; - - return 0; -} - -static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - smctr_get_station_id(dev); - - tsv->svi = STATION_IDENTIFER; - tsv->svl = S_STATION_IDENTIFER; - - tsv->svv[0] = MSB(tp->misc_command_data[0]); - tsv->svv[1] = LSB(tp->misc_command_data[0]); - - tsv->svv[2] = MSB(tp->misc_command_data[1]); - tsv->svv[3] = LSB(tp->misc_command_data[1]); - - tsv->svv[4] = MSB(tp->misc_command_data[2]); - tsv->svv[5] = LSB(tp->misc_command_data[2]); - - return 0; -} - -static int smctr_make_ring_station_status(struct net_device *dev, - MAC_SUB_VECTOR * tsv) -{ - tsv->svi = RING_STATION_STATUS; - tsv->svl = S_RING_STATION_STATUS; - - tsv->svv[0] = 0; - tsv->svv[1] = 0; - tsv->svv[2] = 0; - tsv->svv[3] = 0; - tsv->svv[4] = 0; - tsv->svv[5] = 0; - - return 0; -} - -static int smctr_make_ring_station_version(struct net_device *dev, - MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - tsv->svi = RING_STATION_VERSION_NUMBER; - tsv->svl = S_RING_STATION_VERSION_NUMBER; - - tsv->svv[0] = 0xe2; /* EBCDIC - S */ - tsv->svv[1] = 0xd4; /* EBCDIC - M */ - tsv->svv[2] = 0xc3; /* EBCDIC - C */ - tsv->svv[3] = 0x40; /* EBCDIC - */ - tsv->svv[4] = 0xe5; /* EBCDIC - V */ - tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4); - tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f); - tsv->svv[7] = 0x40; /* EBCDIC - */ - tsv->svv[8] = 0xe7; /* EBCDIC - X */ - - if(tp->extra_info & CHIP_REV_MASK) - tsv->svv[9] = 0xc5; /* EBCDIC - E */ - else - tsv->svv[9] = 0xc4; /* EBCDIC - D */ - - return 0; -} - -static int smctr_make_tx_status_code(struct net_device *dev, - MAC_SUB_VECTOR *tsv, __u16 tx_fstatus) -{ - tsv->svi = TRANSMIT_STATUS_CODE; - tsv->svl = S_TRANSMIT_STATUS_CODE; - - tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR); - - /* Stripped frame status of Transmitted Frame */ - tsv->svv[1] = tx_fstatus & 0xff; - - return 0; -} - -static int smctr_make_upstream_neighbor_addr(struct net_device *dev, - MAC_SUB_VECTOR *tsv) -{ - struct net_local *tp = netdev_priv(dev); - - smctr_get_upstream_neighbor_addr(dev); - - tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS; - tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS; - - tsv->svv[0] = MSB(tp->misc_command_data[0]); - tsv->svv[1] = LSB(tp->misc_command_data[0]); - - tsv->svv[2] = MSB(tp->misc_command_data[1]); - tsv->svv[3] = LSB(tp->misc_command_data[1]); - - tsv->svv[4] = MSB(tp->misc_command_data[2]); - tsv->svv[5] = LSB(tp->misc_command_data[2]); - - return 0; -} - -static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv) -{ - tsv->svi = WRAP_DATA; - tsv->svl = S_WRAP_DATA; - - return 0; -} - -/* - * Open/initialize the board. This is called sometime after - * booting when the 'ifconfig' program is run. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is non-reboot way to recover if something goes wrong. - */ -static int smctr_open(struct net_device *dev) -{ - int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_open\n", dev->name); - - err = smctr_init_adapter(dev); - if(err < 0) - return err; - - return err; -} - -/* Interrupt driven open of Token card. */ -static int smctr_open_tr(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned long flags; - int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name); - - /* Now we can actually open the adapter. */ - if(tp->status == OPEN) - return 0; - if(tp->status != INITIALIZED) - return -1; - - /* FIXME: it would work a lot better if we masked the irq sources - on the card here, then we could skip the locking and poll nicely */ - spin_lock_irqsave(&tp->lock, flags); - - smctr_set_page(dev, (__u8 *)tp->ram_access); - - if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE))) - goto out; - - if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE))) - goto out; - - if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE))) - goto out; - - if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE))) - goto out; - - tp->status = CLOSED; - - /* Insert into the Ring or Enter Loopback Mode. */ - if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1) - { - tp->status = CLOSED; - - if(!(err = smctr_issue_trc_loopback_cmd(dev))) - { - if(!(err = smctr_wait_cmd(dev))) - tp->status = OPEN; - } - - smctr_status_chg(dev); - } - else - { - if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2) - { - tp->status = CLOSED; - if(!(err = smctr_issue_tri_loopback_cmd(dev))) - { - if(!(err = smctr_wait_cmd(dev))) - tp->status = OPEN; - } - - smctr_status_chg(dev); - } - else - { - if((tp->mode_bits & LOOPING_MODE_MASK) - == LOOPBACK_MODE_3) - { - tp->status = CLOSED; - if(!(err = smctr_lobe_media_test_cmd(dev))) - { - if(!(err = smctr_wait_cmd(dev))) - tp->status = OPEN; - } - smctr_status_chg(dev); - } - else - { - if(!(err = smctr_lobe_media_test(dev))) - err = smctr_issue_insert_cmd(dev); - else - { - if(err == LOBE_MEDIA_TEST_FAILED) - printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name); - } - } - } - } - -out: - spin_unlock_irqrestore(&tp->lock, flags); - - return err; -} - -/* Check for a network adapter of this type, - * and return device structure if one exists. - */ -struct net_device __init *smctr_probe(int unit) -{ - struct net_device *dev = alloc_trdev(sizeof(struct net_local)); - static const unsigned ports[] = { - 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300, - 0x320, 0x340, 0x360, 0x380, 0 - }; - const unsigned *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "tr%d", unit); - netdev_boot_setup_check(dev); - } - - if (dev->base_addr > 0x1ff) /* Check a single specified location. */ - err = smctr_probe1(dev, dev->base_addr); - else if(dev->base_addr != 0) /* Don't probe at all. */ - err =-ENXIO; - else { - for (port = ports; *port; port++) { - err = smctr_probe1(dev, *port); - if (!err) - break; - } - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: -#ifdef CONFIG_MCA_LEGACY - { struct net_local *tp = netdev_priv(dev); - if (tp->slot_num) - mca_mark_as_unused(tp->slot_num); - } -#endif - release_region(dev->base_addr, SMCTR_IO_EXTENT); - free_irq(dev->irq, dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops smctr_netdev_ops = { - .ndo_open = smctr_open, - .ndo_stop = smctr_close, - .ndo_start_xmit = smctr_send_packet, - .ndo_tx_timeout = smctr_timeout, - .ndo_get_stats = smctr_get_stats, - .ndo_set_rx_mode = smctr_set_multicast_list, -}; - -static int __init smctr_probe1(struct net_device *dev, int ioaddr) -{ - static unsigned version_printed; - struct net_local *tp = netdev_priv(dev); - int err; - __u32 *ram; - - if(smctr_debug && version_printed++ == 0) - printk(version); - - spin_lock_init(&tp->lock); - dev->base_addr = ioaddr; - - /* Actually detect an adapter now. */ - err = smctr_chk_isa(dev); - if(err < 0) - { - if ((err = smctr_chk_mca(dev)) < 0) { - err = -ENODEV; - goto out; - } - } - - tp = netdev_priv(dev); - dev->mem_start = tp->ram_base; - dev->mem_end = dev->mem_start + 0x10000; - ram = (__u32 *)phys_to_virt(dev->mem_start); - tp->ram_access = *(__u32 *)&ram; - tp->status = NOT_INITIALIZED; - - err = smctr_load_firmware(dev); - if(err != UCODE_PRESENT && err != SUCCESS) - { - printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err); - err = -EIO; - goto out; - } - - /* Allow user to specify ring speed on module insert. */ - if(ringspeed == 4) - tp->media_type = MEDIA_UTP_4; - else - tp->media_type = MEDIA_UTP_16; - - printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n", - dev->name, smctr_name, smctr_model, - (unsigned int)dev->base_addr, - dev->irq, tp->rom_base, tp->ram_base); - - dev->netdev_ops = &smctr_netdev_ops; - dev->watchdog_timeo = HZ; - return 0; - -out: - return err; -} - -static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, - struct net_device *dev, __u16 rx_status) -{ - struct net_local *tp = netdev_priv(dev); - struct sk_buff *skb; - __u16 rcode, correlator; - int err = 0; - __u8 xframe = 1; - - rmf->vl = SWAP_BYTES(rmf->vl); - if(rx_status & FCB_RX_STATUS_DA_MATCHED) - { - switch(rmf->vc) - { - /* Received MAC Frames Processed by RS. */ - case INIT: - if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED) - { - return rcode; - } - - if((err = smctr_send_rsp(dev, rmf, rcode, - correlator))) - { - return err; - } - break; - - case CHG_PARM: - if((rcode = smctr_rcv_chg_param(dev, rmf, - &correlator)) ==HARDWARE_FAILED) - { - return rcode; - } - - if((err = smctr_send_rsp(dev, rmf, rcode, - correlator))) - { - return err; - } - break; - - case RQ_ADDR: - if((rcode = smctr_rcv_rq_addr_state_attch(dev, - rmf, &correlator)) != POSITIVE_ACK) - { - if(rcode == HARDWARE_FAILED) - return rcode; - else - return smctr_send_rsp(dev, rmf, - rcode, correlator); - } - - if((err = smctr_send_rpt_addr(dev, rmf, - correlator))) - { - return err; - } - break; - - case RQ_ATTCH: - if((rcode = smctr_rcv_rq_addr_state_attch(dev, - rmf, &correlator)) != POSITIVE_ACK) - { - if(rcode == HARDWARE_FAILED) - return rcode; - else - return smctr_send_rsp(dev, rmf, - rcode, - correlator); - } - - if((err = smctr_send_rpt_attch(dev, rmf, - correlator))) - { - return err; - } - break; - - case RQ_STATE: - if((rcode = smctr_rcv_rq_addr_state_attch(dev, - rmf, &correlator)) != POSITIVE_ACK) - { - if(rcode == HARDWARE_FAILED) - return rcode; - else - return smctr_send_rsp(dev, rmf, - rcode, - correlator); - } - - if((err = smctr_send_rpt_state(dev, rmf, - correlator))) - { - return err; - } - break; - - case TX_FORWARD: { - __u16 uninitialized_var(tx_fstatus); - - if((rcode = smctr_rcv_tx_forward(dev, rmf)) - != POSITIVE_ACK) - { - if(rcode == HARDWARE_FAILED) - return rcode; - else - return smctr_send_rsp(dev, rmf, - rcode, - correlator); - } - - if((err = smctr_send_tx_forward(dev, rmf, - &tx_fstatus)) == HARDWARE_FAILED) - { - return err; - } - - if(err == A_FRAME_WAS_FORWARDED) - { - if((err = smctr_send_rpt_tx_forward(dev, - rmf, tx_fstatus)) - == HARDWARE_FAILED) - { - return err; - } - } - break; - } - - /* Received MAC Frames Processed by CRS/REM/RPS. */ - case RSP: - case RQ_INIT: - case RPT_NEW_MON: - case RPT_SUA_CHG: - case RPT_ACTIVE_ERR: - case RPT_NN_INCMP: - case RPT_ERROR: - case RPT_ATTCH: - case RPT_STATE: - case RPT_ADDR: - break; - - /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */ - default: - xframe = 0; - if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)) - { - rcode = smctr_rcv_unknown(dev, rmf, - &correlator); - if((err = smctr_send_rsp(dev, rmf,rcode, - correlator))) - { - return err; - } - } - - break; - } - } - else - { - /* 1. DA doesn't match (Promiscuous Mode). - * 2. Parse for Extended MAC Frame Type. - */ - switch(rmf->vc) - { - case RSP: - case INIT: - case RQ_INIT: - case RQ_ADDR: - case RQ_ATTCH: - case RQ_STATE: - case CHG_PARM: - case RPT_ADDR: - case RPT_ERROR: - case RPT_ATTCH: - case RPT_STATE: - case RPT_NEW_MON: - case RPT_SUA_CHG: - case RPT_NN_INCMP: - case RPT_ACTIVE_ERR: - break; - - default: - xframe = 0; - break; - } - } - - /* NOTE: UNKNOWN MAC frames will NOT be passed up unless - * ACCEPT_ATT_MAC_FRAMES is set. - */ - if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) && - (xframe == (__u8)0)) || - ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) && - (xframe == (__u8)1))) - { - rmf->vl = SWAP_BYTES(rmf->vl); - - if (!(skb = dev_alloc_skb(size))) - return -ENOMEM; - skb->len = size; - - /* Slide data into a sleek skb. */ - skb_put(skb, skb->len); - skb_copy_to_linear_data(skb, rmf, skb->len); - - /* Update Counters */ - tp->MacStat.rx_packets++; - tp->MacStat.rx_bytes += skb->len; - - /* Kick the packet on up. */ - skb->protocol = tr_type_trans(skb, dev); - netif_rx(skb); - err = 0; - } - - return err; -} - -/* Adapter RAM test. Incremental word ODD boundary data test. */ -static int smctr_ram_memory_test(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0, - word_read = 0, err_word = 0, err_pattern = 0; - unsigned int err_offset; - __u32 j, pword; - __u8 err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name); - - start_pattern = 0x0001; - pages_of_ram = tp->ram_size / tp->ram_usable; - pword = tp->ram_access; - - /* Incremental word ODD boundary test. */ - for(page = 0; (page < pages_of_ram) && (~err); - page++, start_pattern += 0x8000) - { - smctr_set_page(dev, (__u8 *)(tp->ram_access - + (page * tp->ram_usable * 1024) + 1)); - word_pattern = start_pattern; - - for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2) - *(__u16 *)(pword + j) = word_pattern++; - - word_pattern = start_pattern; - - for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err); - j += 2, word_pattern++) - { - word_read = *(__u16 *)(pword + j); - if(word_read != word_pattern) - { - err = (__u8)1; - err_offset = j; - err_word = word_read; - err_pattern = word_pattern; - return RAM_TEST_FAILED; - } - } - } - - /* Zero out memory. */ - for(page = 0; page < pages_of_ram && (~err); page++) - { - smctr_set_page(dev, (__u8 *)(tp->ram_access - + (page * tp->ram_usable * 1024))); - word_pattern = 0; - - for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2) - *(__u16 *)(pword + j) = word_pattern; - - for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2) - { - word_read = *(__u16 *)(pword + j); - if(word_read != word_pattern) - { - err = (__u8)1; - err_offset = j; - err_word = word_read; - err_pattern = word_pattern; - return RAM_TEST_FAILED; - } - } - } - - smctr_set_page(dev, (__u8 *)tp->ram_access); - - return 0; -} - -static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator) -{ - MAC_SUB_VECTOR *rsv; - signed short vlen; - __u16 rcode = POSITIVE_ACK; - unsigned int svectors = F_NO_SUB_VECTORS_FOUND; - - /* This Frame can only come from a CRS */ - if((rmf->dc_sc & SC_MASK) != SC_CRS) - return E_INAPPROPRIATE_SOURCE_CLASS; - - /* Remove MVID Length from total length. */ - vlen = (signed short)rmf->vl - 4; - - /* Point to First SVID */ - rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); - - /* Search for Appropriate SVID's. */ - while((vlen > 0) && (rcode == POSITIVE_ACK)) - { - switch(rsv->svi) - { - case CORRELATOR: - svectors |= F_CORRELATOR; - rcode = smctr_set_corr(dev, rsv, correlator); - break; - - case LOCAL_RING_NUMBER: - svectors |= F_LOCAL_RING_NUMBER; - rcode = smctr_set_local_ring_num(dev, rsv); - break; - - case ASSIGN_PHYSICAL_DROP: - svectors |= F_ASSIGN_PHYSICAL_DROP; - rcode = smctr_set_phy_drop(dev, rsv); - break; - - case ERROR_TIMER_VALUE: - svectors |= F_ERROR_TIMER_VALUE; - rcode = smctr_set_error_timer_value(dev, rsv); - break; - - case AUTHORIZED_FUNCTION_CLASS: - svectors |= F_AUTHORIZED_FUNCTION_CLASS; - rcode = smctr_set_auth_funct_class(dev, rsv); - break; - - case AUTHORIZED_ACCESS_PRIORITY: - svectors |= F_AUTHORIZED_ACCESS_PRIORITY; - rcode = smctr_set_auth_access_pri(dev, rsv); - break; - - default: - rcode = E_SUB_VECTOR_UNKNOWN; - break; - } - - /* Let Sender Know if SUM of SV length's is - * larger then length in MVID length field - */ - if((vlen -= rsv->svl) < 0) - rcode = E_VECTOR_LENGTH_ERROR; - - rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); - } - - if(rcode == POSITIVE_ACK) - { - /* Let Sender Know if MVID length field - * is larger then SUM of SV length's - */ - if(vlen != 0) - rcode = E_VECTOR_LENGTH_ERROR; - else - { - /* Let Sender Know if Expected SVID Missing */ - if((svectors & R_CHG_PARM) ^ R_CHG_PARM) - rcode = E_MISSING_SUB_VECTOR; - } - } - - return rcode; -} - -static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator) -{ - MAC_SUB_VECTOR *rsv; - signed short vlen; - __u16 rcode = POSITIVE_ACK; - unsigned int svectors = F_NO_SUB_VECTORS_FOUND; - - /* This Frame can only come from a RPS */ - if((rmf->dc_sc & SC_MASK) != SC_RPS) - return E_INAPPROPRIATE_SOURCE_CLASS; - - /* Remove MVID Length from total length. */ - vlen = (signed short)rmf->vl - 4; - - /* Point to First SVID */ - rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); - - /* Search for Appropriate SVID's */ - while((vlen > 0) && (rcode == POSITIVE_ACK)) - { - switch(rsv->svi) - { - case CORRELATOR: - svectors |= F_CORRELATOR; - rcode = smctr_set_corr(dev, rsv, correlator); - break; - - case LOCAL_RING_NUMBER: - svectors |= F_LOCAL_RING_NUMBER; - rcode = smctr_set_local_ring_num(dev, rsv); - break; - - case ASSIGN_PHYSICAL_DROP: - svectors |= F_ASSIGN_PHYSICAL_DROP; - rcode = smctr_set_phy_drop(dev, rsv); - break; - - case ERROR_TIMER_VALUE: - svectors |= F_ERROR_TIMER_VALUE; - rcode = smctr_set_error_timer_value(dev, rsv); - break; - - default: - rcode = E_SUB_VECTOR_UNKNOWN; - break; - } - - /* Let Sender Know if SUM of SV length's is - * larger then length in MVID length field - */ - if((vlen -= rsv->svl) < 0) - rcode = E_VECTOR_LENGTH_ERROR; - - rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); - } - - if(rcode == POSITIVE_ACK) - { - /* Let Sender Know if MVID length field - * is larger then SUM of SV length's - */ - if(vlen != 0) - rcode = E_VECTOR_LENGTH_ERROR; - else - { - /* Let Sender Know if Expected SV Missing */ - if((svectors & R_INIT) ^ R_INIT) - rcode = E_MISSING_SUB_VECTOR; - } - } - - return rcode; -} - -static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf) -{ - MAC_SUB_VECTOR *rsv; - signed short vlen; - __u16 rcode = POSITIVE_ACK; - unsigned int svectors = F_NO_SUB_VECTORS_FOUND; - - /* This Frame can only come from a CRS */ - if((rmf->dc_sc & SC_MASK) != SC_CRS) - return E_INAPPROPRIATE_SOURCE_CLASS; - - /* Remove MVID Length from total length */ - vlen = (signed short)rmf->vl - 4; - - /* Point to First SVID */ - rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); - - /* Search for Appropriate SVID's */ - while((vlen > 0) && (rcode == POSITIVE_ACK)) - { - switch(rsv->svi) - { - case FRAME_FORWARD: - svectors |= F_FRAME_FORWARD; - rcode = smctr_set_frame_forward(dev, rsv, - rmf->dc_sc); - break; - - default: - rcode = E_SUB_VECTOR_UNKNOWN; - break; - } - - /* Let Sender Know if SUM of SV length's is - * larger then length in MVID length field - */ - if((vlen -= rsv->svl) < 0) - rcode = E_VECTOR_LENGTH_ERROR; - - rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); - } - - if(rcode == POSITIVE_ACK) - { - /* Let Sender Know if MVID length field - * is larger then SUM of SV length's - */ - if(vlen != 0) - rcode = E_VECTOR_LENGTH_ERROR; - else - { - /* Let Sender Know if Expected SV Missing */ - if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD) - rcode = E_MISSING_SUB_VECTOR; - } - } - - return rcode; -} - -static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, - MAC_HEADER *rmf, __u16 *correlator) -{ - MAC_SUB_VECTOR *rsv; - signed short vlen; - __u16 rcode = POSITIVE_ACK; - unsigned int svectors = F_NO_SUB_VECTORS_FOUND; - - /* Remove MVID Length from total length */ - vlen = (signed short)rmf->vl - 4; - - /* Point to First SVID */ - rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); - - /* Search for Appropriate SVID's */ - while((vlen > 0) && (rcode == POSITIVE_ACK)) - { - switch(rsv->svi) - { - case CORRELATOR: - svectors |= F_CORRELATOR; - rcode = smctr_set_corr(dev, rsv, correlator); - break; - - default: - rcode = E_SUB_VECTOR_UNKNOWN; - break; - } - - /* Let Sender Know if SUM of SV length's is - * larger then length in MVID length field - */ - if((vlen -= rsv->svl) < 0) - rcode = E_VECTOR_LENGTH_ERROR; - - rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); - } - - if(rcode == POSITIVE_ACK) - { - /* Let Sender Know if MVID length field - * is larger then SUM of SV length's - */ - if(vlen != 0) - rcode = E_VECTOR_LENGTH_ERROR; - else - { - /* Let Sender Know if Expected SVID Missing */ - if((svectors & R_RQ_ATTCH_STATE_ADDR) - ^ R_RQ_ATTCH_STATE_ADDR) - rcode = E_MISSING_SUB_VECTOR; - } - } - - return rcode; -} - -static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, - __u16 *correlator) -{ - MAC_SUB_VECTOR *rsv; - signed short vlen; - - *correlator = 0; - - /* Remove MVID Length from total length */ - vlen = (signed short)rmf->vl - 4; - - /* Point to First SVID */ - rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER)); - - /* Search for CORRELATOR for RSP to UNKNOWN */ - while((vlen > 0) && (*correlator == 0)) - { - switch(rsv->svi) - { - case CORRELATOR: - smctr_set_corr(dev, rsv, correlator); - break; - - default: - break; - } - - vlen -= rsv->svl; - rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); - } - - return E_UNRECOGNIZED_VECTOR_ID; -} - -/* - * Reset the 825 NIC and exit w: - * 1. The NIC reset cleared (non-reset state), halted and un-initialized. - * 2. TINT masked. - * 3. CBUSY masked. - * 4. TINT clear. - * 5. CBUSY clear. - */ -static int smctr_reset_adapter(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr); - mdelay(200); /* ~2 ms */ - - smctr_clear_trc_reset(ioaddr); - mdelay(200); /* ~2 ms */ - - /* Remove any latched interrupts that occurred prior to reseting the - * adapter or possibily caused by line glitches due to the reset. - */ - outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR); - - return 0; -} - -static int smctr_restart_tx_chain(struct net_device *dev, short queue) -{ - struct net_local *tp = netdev_priv(dev); - int err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name); - - if(tp->num_tx_fcbs_used[queue] != 0 && - tp->tx_queue_status[queue] == NOT_TRANSMITING) - { - tp->tx_queue_status[queue] = TRANSMITING; - err = smctr_issue_resume_tx_fcb_cmd(dev, queue); - } - - return err; -} - -static int smctr_ring_status_chg(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name); - - /* Check for ring_status_flag: whenever MONITOR_STATE_BIT - * Bit is set, check value of monitor_state, only then we - * enable and start transmit/receive timeout (if and only - * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE) - */ - if(tp->ring_status_flags == MONITOR_STATE_CHANGED) - { - if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) || - (tp->monitor_state == MS_STANDBY_MONITOR_STATE)) - { - tp->monitor_state_ready = 1; - } - else - { - /* if adapter is NOT in either active monitor - * or standby monitor state => Disable - * transmit/receive timeout. - */ - tp->monitor_state_ready = 0; - - /* Ring speed problem, switching to auto mode. */ - if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE && - !tp->cleanup) - { - printk(KERN_INFO "%s: Incorrect ring speed switching.\n", - dev->name); - smctr_set_ring_speed(dev); - } - } - } - - if(!(tp->ring_status_flags & RING_STATUS_CHANGED)) - return 0; - - switch(tp->ring_status) - { - case RING_RECOVERY: - printk(KERN_INFO "%s: Ring Recovery\n", dev->name); - break; - - case SINGLE_STATION: - printk(KERN_INFO "%s: Single Statinon\n", dev->name); - break; - - case COUNTER_OVERFLOW: - printk(KERN_INFO "%s: Counter Overflow\n", dev->name); - break; - - case REMOVE_RECEIVED: - printk(KERN_INFO "%s: Remove Received\n", dev->name); - break; - - case AUTO_REMOVAL_ERROR: - printk(KERN_INFO "%s: Auto Remove Error\n", dev->name); - break; - - case LOBE_WIRE_FAULT: - printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name); - break; - - case TRANSMIT_BEACON: - printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); - break; - - case SOFT_ERROR: - printk(KERN_INFO "%s: Soft Error\n", dev->name); - break; - - case HARD_ERROR: - printk(KERN_INFO "%s: Hard Error\n", dev->name); - break; - - case SIGNAL_LOSS: - printk(KERN_INFO "%s: Signal Loss\n", dev->name); - break; - - default: - printk(KERN_INFO "%s: Unknown ring status change\n", - dev->name); - break; - } - - return 0; -} - -static int smctr_rx_frame(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - __u16 queue, status, rx_size, err = 0; - __u8 *pbuff; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name); - - queue = tp->receive_queue_number; - - while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS) - { - err = HARDWARE_FAILED; - - if(((status & 0x007f) == 0) || - ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0)) - { - /* frame length less the CRC (4 bytes) + FS (1 byte) */ - rx_size = tp->rx_fcb_curr[queue]->frame_length - 5; - - pbuff = smctr_get_rx_pointer(dev, queue); - - smctr_set_page(dev, pbuff); - smctr_disable_16bit(dev); - - /* pbuff points to addr within one page */ - pbuff = (__u8 *)PAGE_POINTER(pbuff); - - if(queue == NON_MAC_QUEUE) - { - struct sk_buff *skb; - - skb = dev_alloc_skb(rx_size); - if (skb) { - skb_put(skb, rx_size); - - skb_copy_to_linear_data(skb, pbuff, rx_size); - - /* Update Counters */ - tp->MacStat.rx_packets++; - tp->MacStat.rx_bytes += skb->len; - - /* Kick the packet on up. */ - skb->protocol = tr_type_trans(skb, dev); - netif_rx(skb); - } else { - } - } - else - smctr_process_rx_packet((MAC_HEADER *)pbuff, - rx_size, dev, status); - } - - smctr_enable_16bit(dev); - smctr_set_page(dev, (__u8 *)tp->ram_access); - smctr_update_rx_chain(dev, queue); - - if(err != SUCCESS) - break; - } - - return err; -} - -static int smctr_send_dat(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int i, err; - MAC_HEADER *tmf; - FCBlock *fcb; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name); - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, - sizeof(MAC_HEADER))) == (FCBlock *)(-1L)) - { - return OUT_OF_RESOURCES; - } - - /* Initialize DAT Data Fields. */ - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->ac = MSB(AC_FC_DAT); - tmf->fc = LSB(AC_FC_DAT); - - for(i = 0; i < 6; i++) - { - tmf->sa[i] = dev->dev_addr[i]; - tmf->da[i] = dev->dev_addr[i]; - - } - - tmf->vc = DAT; - tmf->dc_sc = DC_RS | SC_RS; - tmf->vl = 4; - tmf->vl = SWAP_BYTES(tmf->vl); - - /* Start Transmit. */ - if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) - return err; - - /* Wait for Transmit to Complete */ - for(i = 0; i < 10000; i++) - { - if(fcb->frame_status & FCB_COMMAND_DONE) - break; - mdelay(1); - } - - /* Check if GOOD frame Tx'ed. */ - if(!(fcb->frame_status & FCB_COMMAND_DONE) || - fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) - { - return INITIALIZE_FAILED; - } - - /* De-allocated Tx FCB and Frame Buffer - * The FCB must be de-allocated manually if executing with - * interrupts disabled, other wise the ISR (LM_Service_Events) - * will de-allocate it when the interrupt occurs. - */ - tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; - smctr_update_tx_chain(dev, fcb, MAC_QUEUE); - - return 0; -} - -static void smctr_timeout(struct net_device *dev) -{ - /* - * If we get here, some higher level has decided we are broken. - * There should really be a "kick me" function call instead. - * - * Resetting the token ring adapter takes a long time so just - * fake transmission time and go on trying. Our own timeout - * routine is in sktr_timer_chk() - */ - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * Gets skb from system, queues it and checks if it can be sent - */ -static netdev_tx_t smctr_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name); - - /* - * Block a transmit overlap - */ - - netif_stop_queue(dev); - - if(tp->QueueSkb == 0) - return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */ - - tp->QueueSkb--; - skb_queue_tail(&tp->SendSkbQueue, skb); - smctr_hardware_send_packet(dev, tp); - if(tp->QueueSkb > 0) - netif_wake_queue(dev); - - return NETDEV_TX_OK; -} - -static int smctr_send_lobe_media_test(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - MAC_SUB_VECTOR *tsv; - MAC_HEADER *tmf; - FCBlock *fcb; - __u32 i; - int err; - - if(smctr_debug > 15) - printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name); - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr) - + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L)) - { - return OUT_OF_RESOURCES; - } - - /* Initialize DAT Data Fields. */ - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST); - tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST); - - for(i = 0; i < 6; i++) - { - tmf->da[i] = 0; - tmf->sa[i] = dev->dev_addr[i]; - } - - tmf->vc = LOBE_MEDIA_TEST; - tmf->dc_sc = DC_RS | SC_RS; - tmf->vl = 4; - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_wrap_data(dev, tsv); - tmf->vl += tsv->svl; - - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_wrap_data(dev, tsv); - tmf->vl += tsv->svl; - - /* Start Transmit. */ - tmf->vl = SWAP_BYTES(tmf->vl); - if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) - return err; - - /* Wait for Transmit to Complete. (10 ms). */ - for(i=0; i < 10000; i++) - { - if(fcb->frame_status & FCB_COMMAND_DONE) - break; - mdelay(1); - } - - /* Check if GOOD frame Tx'ed */ - if(!(fcb->frame_status & FCB_COMMAND_DONE) || - fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) - { - return LOBE_MEDIA_TEST_FAILED; - } - - /* De-allocated Tx FCB and Frame Buffer - * The FCB must be de-allocated manually if executing with - * interrupts disabled, other wise the ISR (LM_Service_Events) - * will de-allocate it when the interrupt occurs. - */ - tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; - smctr_update_tx_chain(dev, fcb, MAC_QUEUE); - - return 0; -} - -static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator) -{ - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS - + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS)) - == (FCBlock *)(-1L)) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RPT_ADDR; - tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_corr(dev, tsv, correlator); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_phy_drop_num(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_upstream_neighbor_addr(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_addr_mod(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_group_addr(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_funct_addr(dev, tsv); - - tmf->vl += tsv->svl; - - /* Subtract out MVID and MVL which is - * include in both vl and MAC_HEADER - */ -/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; - fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; -*/ - tmf->vl = SWAP_BYTES(tmf->vl); - - return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); -} - -static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator) -{ - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS - + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY)) - == (FCBlock *)(-1L)) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RPT_ATTCH; - tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_corr(dev, tsv, correlator); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_product_id(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_funct_addr(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_auth_funct_class(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_access_pri(dev, tsv); - - tmf->vl += tsv->svl; - - /* Subtract out MVID and MVL which is - * include in both vl and MAC_HEADER - */ -/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; - fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; -*/ - tmf->vl = SWAP_BYTES(tmf->vl); - - return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); -} - -static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, - __u16 correlator) -{ - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER - + S_RING_STATION_STATUS + S_STATION_IDENTIFER)) - == (FCBlock *)(-1L)) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RPT_STATE; - tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_corr(dev, tsv, correlator); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_ring_station_version(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_ring_station_status(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_station_id(dev, tsv); - - tmf->vl += tsv->svl; - - /* Subtract out MVID and MVL which is - * include in both vl and MAC_HEADER - */ -/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; - fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; -*/ - tmf->vl = SWAP_BYTES(tmf->vl); - - return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); -} - -static int smctr_send_rpt_tx_forward(struct net_device *dev, - MAC_HEADER *rmf, __u16 tx_fstatus) -{ - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L)) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RPT_TX_FORWARD; - tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_tx_status_code(dev, tsv, tx_fstatus); - - tmf->vl += tsv->svl; - - /* Subtract out MVID and MVL which is - * include in both vl and MAC_HEADER - */ -/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; - fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; -*/ - tmf->vl = SWAP_BYTES(tmf->vl); - - return smctr_trc_send_packet(dev, fcb, MAC_QUEUE); -} - -static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, - __u16 rcode, __u16 correlator) -{ - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L)) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RSP; - tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_corr(dev, tsv, correlator); - - return 0; -} - -static int smctr_send_rq_init(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - MAC_HEADER *tmf; - MAC_SUB_VECTOR *tsv; - FCBlock *fcb; - unsigned int i, count = 0; - __u16 fstatus; - int err; - - do { - if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) - + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS - + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER)) - == (FCBlock *)(-1L))) - { - return 0; - } - - tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; - tmf->vc = RQ_INIT; - tmf->dc_sc = DC_RPS | SC_RS; - tmf->vl = 4; - - smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT); - - tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); - smctr_make_product_id(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_upstream_neighbor_addr(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_ring_station_version(dev, tsv); - - tmf->vl += tsv->svl; - tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl); - smctr_make_addr_mod(dev, tsv); - - tmf->vl += tsv->svl; - - /* Subtract out MVID and MVL which is - * include in both vl and MAC_HEADER - */ -/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4; - fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4; -*/ - tmf->vl = SWAP_BYTES(tmf->vl); - - if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) - return err; - - /* Wait for Transmit to Complete */ - for(i = 0; i < 10000; i++) - { - if(fcb->frame_status & FCB_COMMAND_DONE) - break; - mdelay(1); - } - - /* Check if GOOD frame Tx'ed */ - fstatus = fcb->frame_status; - - if(!(fstatus & FCB_COMMAND_DONE)) - return HARDWARE_FAILED; - - if(!(fstatus & FCB_TX_STATUS_E)) - count++; - - /* De-allocated Tx FCB and Frame Buffer - * The FCB must be de-allocated manually if executing with - * interrupts disabled, other wise the ISR (LM_Service_Events) - * will de-allocate it when the interrupt occurs. - */ - tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; - smctr_update_tx_chain(dev, fcb, MAC_QUEUE); - } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS)); - - return smctr_join_complete_state(dev); -} - -static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, - __u16 *tx_fstatus) -{ - struct net_local *tp = netdev_priv(dev); - FCBlock *fcb; - unsigned int i; - int err; - - /* Check if this is the END POINT of the Transmit Forward Chain. */ - if(rmf->vl <= 18) - return 0; - - /* Allocate Transmit FCB only by requesting 0 bytes - * of data buffer. - */ - if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L)) - return 0; - - /* Set pointer to Transmit Frame Buffer to the data - * portion of the received TX Forward frame, making - * sure to skip over the Vector Code (vc) and Vector - * length (vl). - */ - fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf - + sizeof(MAC_HEADER) + 2); - fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf - + sizeof(MAC_HEADER) + 2); - - fcb->frame_length = rmf->vl - 4 - 2; - fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2; - - if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) - return err; - - /* Wait for Transmit to Complete */ - for(i = 0; i < 10000; i++) - { - if(fcb->frame_status & FCB_COMMAND_DONE) - break; - mdelay(1); - } - - /* Check if GOOD frame Tx'ed */ - if(!(fcb->frame_status & FCB_COMMAND_DONE)) - { - if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE))) - return err; - - for(i = 0; i < 10000; i++) - { - if(fcb->frame_status & FCB_COMMAND_DONE) - break; - mdelay(1); - } - - if(!(fcb->frame_status & FCB_COMMAND_DONE)) - return HARDWARE_FAILED; - } - - *tx_fstatus = fcb->frame_status; - - return A_FRAME_WAS_FORWARDED; -} - -static int smctr_set_auth_access_pri(struct net_device *dev, - MAC_SUB_VECTOR *rsv) -{ - struct net_local *tp = netdev_priv(dev); - - if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY) - return E_SUB_VECTOR_LENGTH_ERROR; - - tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]); - - return POSITIVE_ACK; -} - -static int smctr_set_auth_funct_class(struct net_device *dev, - MAC_SUB_VECTOR *rsv) -{ - struct net_local *tp = netdev_priv(dev); - - if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS) - return E_SUB_VECTOR_LENGTH_ERROR; - - tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]); - - return POSITIVE_ACK; -} - -static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, - __u16 *correlator) -{ - if(rsv->svl != S_CORRELATOR) - return E_SUB_VECTOR_LENGTH_ERROR; - - *correlator = (rsv->svv[0] << 8 | rsv->svv[1]); - - return POSITIVE_ACK; -} - -static int smctr_set_error_timer_value(struct net_device *dev, - MAC_SUB_VECTOR *rsv) -{ - __u16 err_tval; - int err; - - if(rsv->svl != S_ERROR_TIMER_VALUE) - return E_SUB_VECTOR_LENGTH_ERROR; - - err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10; - - smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval); - - if((err = smctr_wait_cmd(dev))) - return err; - - return POSITIVE_ACK; -} - -static int smctr_set_frame_forward(struct net_device *dev, - MAC_SUB_VECTOR *rsv, __u8 dc_sc) -{ - if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD)) - return E_SUB_VECTOR_LENGTH_ERROR; - - if((dc_sc & DC_MASK) != DC_CRS) - { - if(rsv->svl >= 2 && rsv->svl < 20) - return E_TRANSMIT_FORWARD_INVALID; - - if((rsv->svv[0] != 0) || (rsv->svv[1] != 0)) - return E_TRANSMIT_FORWARD_INVALID; - } - - return POSITIVE_ACK; -} - -static int smctr_set_local_ring_num(struct net_device *dev, - MAC_SUB_VECTOR *rsv) -{ - struct net_local *tp = netdev_priv(dev); - - if(rsv->svl != S_LOCAL_RING_NUMBER) - return E_SUB_VECTOR_LENGTH_ERROR; - - if(tp->ptr_local_ring_num) - *(__u16 *)(tp->ptr_local_ring_num) - = (rsv->svv[0] << 8 | rsv->svv[1]); - - return POSITIVE_ACK; -} - -static unsigned short smctr_set_ctrl_attention(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if(tp->bic_type == BIC_585_CHIP) - outb((tp->trc_mask | HWR_CA), ioaddr + HWR); - else - { - outb((tp->trc_mask | CSR_CA), ioaddr + CSR); - outb(tp->trc_mask, ioaddr + CSR); - } - - return 0; -} - -static void smctr_set_multicast_list(struct net_device *dev) -{ - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name); -} - -static int smctr_set_page(struct net_device *dev, __u8 *buf) -{ - struct net_local *tp = netdev_priv(dev); - __u8 amask; - __u32 tptr; - - tptr = (__u32)buf - (__u32)tp->ram_access; - amask = (__u8)((tptr & PR_PAGE_MASK) >> 8); - outb(amask, dev->base_addr + PR); - - return 0; -} - -static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv) -{ - int err; - - if(rsv->svl != S_PHYSICAL_DROP) - return E_SUB_VECTOR_LENGTH_ERROR; - - smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]); - if((err = smctr_wait_cmd(dev))) - return err; - - return POSITIVE_ACK; -} - -/* Reset the ring speed to the opposite of what it was. This auto-pilot - * mode requires a complete reset and re-init of the adapter. - */ -static int smctr_set_ring_speed(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - if(tp->media_type == MEDIA_UTP_16) - tp->media_type = MEDIA_UTP_4; - else - tp->media_type = MEDIA_UTP_16; - - smctr_enable_16bit(dev); - - /* Re-Initialize adapter's internal registers */ - smctr_reset_adapter(dev); - - if((err = smctr_init_card_real(dev))) - return err; - - smctr_enable_bic_int(dev); - - if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) - return err; - - smctr_disable_16bit(dev); - - return 0; -} - -static int smctr_set_rx_look_ahead(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - __u16 sword, rword; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name); - - tp->adapter_flags &= ~(FORCED_16BIT_MODE); - tp->adapter_flags |= RX_VALID_LOOKAHEAD; - - if(tp->adapter_bus == BUS_ISA16_TYPE) - { - sword = *((__u16 *)(tp->ram_access)); - *((__u16 *)(tp->ram_access)) = 0x1234; - - smctr_disable_16bit(dev); - rword = *((__u16 *)(tp->ram_access)); - smctr_enable_16bit(dev); - - if(rword != 0x1234) - tp->adapter_flags |= FORCED_16BIT_MODE; - - *((__u16 *)(tp->ram_access)) = sword; - } - - return 0; -} - -static int smctr_set_trc_reset(int ioaddr) -{ - __u8 r; - - r = inb(ioaddr + MSR); - outb(MSR_RST | r, ioaddr + MSR); - - return 0; -} - -/* - * This function can be called if the adapter is busy or not. - */ -static int smctr_setup_single_cmd(struct net_device *dev, - __u16 command, __u16 subcommand) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int err; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name); - - if((err = smctr_wait_while_cbusy(dev))) - return err; - - if((err = (unsigned int)smctr_wait_cmd(dev))) - return err; - - tp->acb_head->cmd_done_status = 0; - tp->acb_head->cmd = command; - tp->acb_head->subcmd = subcommand; - - err = smctr_issue_resume_acb_cmd(dev); - - return err; -} - -/* - * This function can not be called with the adapter busy. - */ -static int smctr_setup_single_cmd_w_data(struct net_device *dev, - __u16 command, __u16 subcommand) -{ - struct net_local *tp = netdev_priv(dev); - - tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE; - tp->acb_head->cmd = command; - tp->acb_head->subcmd = subcommand; - tp->acb_head->data_offset_lo - = (__u16)TRC_POINTER(tp->misc_command_data); - - return smctr_issue_resume_acb_cmd(dev); -} - -static char *smctr_malloc(struct net_device *dev, __u16 size) -{ - struct net_local *tp = netdev_priv(dev); - char *m; - - m = (char *)(tp->ram_access + tp->sh_mem_used); - tp->sh_mem_used += (__u32)size; - - return m; -} - -static int smctr_status_chg(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name); - - switch(tp->status) - { - case OPEN: - break; - - case CLOSED: - break; - - /* Interrupt driven open() completion. XXX */ - case INITIALIZED: - tp->group_address_0 = 0; - tp->group_address[0] = 0; - tp->group_address[1] = 0; - tp->functional_address_0 = 0; - tp->functional_address[0] = 0; - tp->functional_address[1] = 0; - smctr_open_tr(dev); - break; - - default: - printk(KERN_INFO "%s: status change unknown %x\n", - dev->name, tp->status); - break; - } - - return 0; -} - -static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, - __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - int err = 0; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name); - - fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS; - if(tp->num_tx_fcbs[queue] != 1) - fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS; - - if(tp->tx_queue_status[queue] == NOT_TRANSMITING) - { - tp->tx_queue_status[queue] = TRANSMITING; - err = smctr_issue_resume_tx_fcb_cmd(dev, queue); - } - - return err; -} - -static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - __u16 status, err = 0; - int cstatus; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name); - - while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS) - { - if(status & 0x7e00 ) - { - err = HARDWARE_FAILED; - break; - } - - if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue], - queue)) != SUCCESS) - break; - - smctr_disable_16bit(dev); - - if(tp->mode_bits & UMAC) - { - if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2))) - cstatus = NO_SUCH_DESTINATION; - else - { - if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2))) - cstatus = DEST_OUT_OF_RESOURCES; - else - { - if(status & FCB_TX_STATUS_E) - cstatus = MAX_COLLISIONS; - else - cstatus = SUCCESS; - } - } - } - else - cstatus = SUCCESS; - - if(queue == BUG_QUEUE) - err = SUCCESS; - - smctr_enable_16bit(dev); - if(err != SUCCESS) - break; - } - - return err; -} - -static unsigned short smctr_tx_move_frame(struct net_device *dev, - struct sk_buff *skb, __u8 *pbuff, unsigned int bytes) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int ram_usable; - __u32 flen, len, offset = 0; - __u8 *frag, *page; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name); - - ram_usable = ((unsigned int)tp->ram_usable) << 10; - frag = skb->data; - flen = skb->len; - - while(flen > 0 && bytes > 0) - { - smctr_set_page(dev, pbuff); - - offset = SMC_PAGE_OFFSET(pbuff); - - if(offset + flen > ram_usable) - len = ram_usable - offset; - else - len = flen; - - if(len > bytes) - len = bytes; - - page = (char *) (offset + tp->ram_access); - memcpy(page, frag, len); - - flen -=len; - bytes -= len; - frag += len; - pbuff += len; - } - - return 0; -} - -/* Update the error statistic counters for this adapter. */ -static int smctr_update_err_stats(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - struct tr_statistics *tstat = &tp->MacStat; - - if(tstat->internal_errors) - tstat->internal_errors - += *(tp->misc_command_data + 0) & 0x00ff; - - if(tstat->line_errors) - tstat->line_errors += *(tp->misc_command_data + 0) >> 8; - - if(tstat->A_C_errors) - tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff; - - if(tstat->burst_errors) - tstat->burst_errors += *(tp->misc_command_data + 1) >> 8; - - if(tstat->abort_delimiters) - tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8; - - if(tstat->recv_congest_count) - tstat->recv_congest_count - += *(tp->misc_command_data + 3) & 0x00ff; - - if(tstat->lost_frames) - tstat->lost_frames - += *(tp->misc_command_data + 3) >> 8; - - if(tstat->frequency_errors) - tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff; - - if(tstat->frame_copied_errors) - tstat->frame_copied_errors - += *(tp->misc_command_data + 4) >> 8; - - if(tstat->token_errors) - tstat->token_errors += *(tp->misc_command_data + 5) >> 8; - - return 0; -} - -static int smctr_update_rx_chain(struct net_device *dev, __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - FCBlock *fcb; - BDBlock *bdb; - __u16 size, len; - - fcb = tp->rx_fcb_curr[queue]; - len = fcb->frame_length; - - fcb->frame_status = 0; - fcb->info = FCB_CHAIN_END; - fcb->back_ptr->info = FCB_WARNING; - - tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr; - - /* update RX BDBs */ - size = (len >> RX_BDB_SIZE_SHIFT); - if(len & RX_DATA_BUFFER_SIZE_MASK) - size += sizeof(BDBlock); - size &= (~RX_BDB_SIZE_MASK); - - /* check if wrap around */ - bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size)); - if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue]) - { - bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue]) - + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue])); - } - - bdb->back_ptr->info = BDB_CHAIN_END; - tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END; - tp->rx_bdb_curr[queue] = bdb; - - return 0; -} - -static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, - __u16 queue) -{ - struct net_local *tp = netdev_priv(dev); - - if(smctr_debug > 20) - printk(KERN_DEBUG "smctr_update_tx_chain\n"); - - if(tp->num_tx_fcbs_used[queue] <= 0) - return HARDWARE_FAILED; - else - { - if(tp->tx_buff_used[queue] < fcb->memory_alloc) - { - tp->tx_buff_used[queue] = 0; - return HARDWARE_FAILED; - } - - tp->tx_buff_used[queue] -= fcb->memory_alloc; - - /* if all transmit buffer are cleared - * need to set the tx_buff_curr[] to tx_buff_head[] - * otherwise, tx buffer will be segregate and cannot - * accommodate and buffer greater than (curr - head) and - * (end - curr) since we do not allow wrap around allocation. - */ - if(tp->tx_buff_used[queue] == 0) - tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; - - tp->num_tx_fcbs_used[queue]--; - fcb->frame_status = 0; - tp->tx_fcb_end[queue] = fcb->next_ptr; - netif_wake_queue(dev); - return 0; - } -} - -static int smctr_wait_cmd(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int loop_count = 0x20000; - - if(smctr_debug > 10) - printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name); - - while(loop_count) - { - if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE) - break; - udelay(1); - loop_count--; - } - - if(loop_count == 0) - return HARDWARE_FAILED; - - if(tp->acb_head->cmd_done_status & 0xff) - return HARDWARE_FAILED; - - return 0; -} - -static int smctr_wait_while_cbusy(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int timeout = 0x20000; - int ioaddr = dev->base_addr; - __u8 r; - - if(tp->bic_type == BIC_585_CHIP) - { - while(timeout) - { - r = inb(ioaddr + HWR); - if((r & HWR_CBUSY) == 0) - break; - timeout--; - } - } - else - { - while(timeout) - { - r = inb(ioaddr + CSR); - if((r & CSR_CBUSY) == 0) - break; - timeout--; - } - } - - if(timeout) - return 0; - else - return HARDWARE_FAILED; -} - -#ifdef MODULE - -static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS]; -static int io[SMCTR_MAX_ADAPTERS]; -static int irq[SMCTR_MAX_ADAPTERS]; - -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE("tr_smctr.bin"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param(ringspeed, int, 0); - -static struct net_device * __init setup_card(int n) -{ - struct net_device *dev = alloc_trdev(sizeof(struct net_local)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - dev->irq = irq[n]; - err = smctr_probe1(dev, io[n]); - if (err) - goto out; - - err = register_netdev(dev); - if (err) - goto out1; - return dev; - out1: -#ifdef CONFIG_MCA_LEGACY - { struct net_local *tp = netdev_priv(dev); - if (tp->slot_num) - mca_mark_as_unused(tp->slot_num); - } -#endif - release_region(dev->base_addr, SMCTR_IO_EXTENT); - free_irq(dev->irq, dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -int __init init_module(void) -{ - int i, found = 0; - struct net_device *dev; - - for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { - dev = io[0]? setup_card(i) : smctr_probe(-1); - if (!IS_ERR(dev)) { - ++found; - dev_smctr[i] = dev; - } - } - - return found ? 0 : -ENODEV; -} - -void __exit cleanup_module(void) -{ - int i; - - for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) { - struct net_device *dev = dev_smctr[i]; - - if (dev) { - - unregister_netdev(dev); -#ifdef CONFIG_MCA_LEGACY - { struct net_local *tp = netdev_priv(dev); - if (tp->slot_num) - mca_mark_as_unused(tp->slot_num); - } -#endif - release_region(dev->base_addr, SMCTR_IO_EXTENT); - if (dev->irq) - free_irq(dev->irq, dev); - - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h deleted file mode 100644 index 6e5700ab4fc..00000000000 --- a/drivers/net/tokenring/smctr.h +++ /dev/null @@ -1,1585 +0,0 @@ -/* smctr.h: SMC Token Ring driver header for Linux - * - * Authors: - * - Jay Schulist <jschlst@samba.org> - */ - -#ifndef __LINUX_SMCTR_H -#define __LINUX_SMCTR_H - -#ifdef __KERNEL__ - -#define MAX_TX_QUEUE 10 - -#define SMC_HEADER_SIZE 14 - -#define SMC_PAGE_OFFSET(X) (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) - -#define INIT 0x0D -#define RQ_ATTCH 0x10 -#define RQ_STATE 0x0F -#define RQ_ADDR 0x0E -#define CHG_PARM 0x0C -#define RSP 0x00 -#define TX_FORWARD 0x09 - -#define AC_FC_DAT ((3<<13) | 1) -#define DAT 0x07 - -#define RPT_NEW_MON 0x25 -#define RPT_SUA_CHG 0x26 -#define RPT_ACTIVE_ERR 0x28 -#define RPT_NN_INCMP 0x27 -#define RPT_ERROR 0x29 - -#define RQ_INIT 0x20 -#define RPT_ATTCH 0x24 -#define RPT_STATE 0x23 -#define RPT_ADDR 0x22 - -#define POSITIVE_ACK 0x0001 -#define A_FRAME_WAS_FORWARDED 0x8888 - -#define GROUP_ADDRESS 0x2B -#define PHYSICAL_DROP 0x0B -#define AUTHORIZED_ACCESS_PRIORITY 0x07 -#define AUTHORIZED_FUNCTION_CLASS 0x06 -#define FUNCTIONAL_ADDRESS 0x2C -#define RING_STATION_STATUS 0x29 -#define TRANSMIT_STATUS_CODE 0x2A -#define IBM_PASS_SOURCE_ADDR 0x01 -#define AC_FC_RPT_TX_FORWARD ((0<<13) | 0) -#define AC_FC_RPT_STATE ((0<<13) | 0) -#define AC_FC_RPT_ADDR ((0<<13) | 0) -#define CORRELATOR 0x09 - -#define POSITIVE_ACK 0x0001 /* */ -#define E_MAC_DATA_INCOMPLETE 0x8001 /* not used */ -#define E_VECTOR_LENGTH_ERROR 0x8002 /* */ -#define E_UNRECOGNIZED_VECTOR_ID 0x8003 /* */ -#define E_INAPPROPRIATE_SOURCE_CLASS 0x8004 /* */ -#define E_SUB_VECTOR_LENGTH_ERROR 0x8005 /* */ -#define E_TRANSMIT_FORWARD_INVALID 0x8006 /* def. by IBM */ -#define E_MISSING_SUB_VECTOR 0x8007 /* */ -#define E_SUB_VECTOR_UNKNOWN 0x8008 /* */ -#define E_MAC_HEADER_TOO_LONG 0x8009 /* */ -#define E_FUNCTION_DISABLED 0x800A /* not used */ - -#define A_FRAME_WAS_FORWARDED 0x8888 /* used by send_TX_FORWARD */ - -#define UPSTREAM_NEIGHBOR_ADDRESS 0x02 -#define LOCAL_RING_NUMBER 0x03 -#define ASSIGN_PHYSICAL_DROP 0x04 -#define ERROR_TIMER_VALUE 0x05 -#define AUTHORIZED_FUNCTION_CLASS 0x06 -#define AUTHORIZED_ACCESS_PRIORITY 0x07 -#define CORRELATOR 0x09 -#define PHYSICAL_DROP 0x0B -#define RESPONSE_CODE 0x20 -#define ADDRESS_MODIFER 0x21 -#define PRODUCT_INSTANCE_ID 0x22 -#define RING_STATION_VERSION_NUMBER 0x23 -#define WRAP_DATA 0x26 -#define FRAME_FORWARD 0x27 -#define STATION_IDENTIFER 0x28 -#define RING_STATION_STATUS 0x29 -#define TRANSMIT_STATUS_CODE 0x2A -#define GROUP_ADDRESS 0x2B -#define FUNCTIONAL_ADDRESS 0x2C - -#define F_NO_SUB_VECTORS_FOUND 0x0000 -#define F_UPSTREAM_NEIGHBOR_ADDRESS 0x0001 -#define F_LOCAL_RING_NUMBER 0x0002 -#define F_ASSIGN_PHYSICAL_DROP 0x0004 -#define F_ERROR_TIMER_VALUE 0x0008 -#define F_AUTHORIZED_FUNCTION_CLASS 0x0010 -#define F_AUTHORIZED_ACCESS_PRIORITY 0x0020 -#define F_CORRELATOR 0x0040 -#define F_PHYSICAL_DROP 0x0080 -#define F_RESPONSE_CODE 0x0100 -#define F_PRODUCT_INSTANCE_ID 0x0200 -#define F_RING_STATION_VERSION_NUMBER 0x0400 -#define F_STATION_IDENTIFER 0x0800 -#define F_RING_STATION_STATUS 0x1000 -#define F_GROUP_ADDRESS 0x2000 -#define F_FUNCTIONAL_ADDRESS 0x4000 -#define F_FRAME_FORWARD 0x8000 - -#define R_INIT 0x00 -#define R_RQ_ATTCH_STATE_ADDR 0x00 -#define R_CHG_PARM 0x00 -#define R_TX_FORWARD F_FRAME_FORWARD - - -#define UPSTREAM_NEIGHBOR_ADDRESS 0x02 -#define ADDRESS_MODIFER 0x21 -#define RING_STATION_VERSION_NUMBER 0x23 -#define PRODUCT_INSTANCE_ID 0x22 - -#define RPT_TX_FORWARD 0x2A - -#define AC_FC_INIT (3<<13) | 0 /* */ -#define AC_FC_RQ_INIT ((3<<13) | 0) /* */ -#define AC_FC_RQ_ATTCH (3<<13) | 0 /* DC = SC of rx frame */ -#define AC_FC_RQ_STATE (3<<13) | 0 /* DC = SC of rx frame */ -#define AC_FC_RQ_ADDR (3<<13) | 0 /* DC = SC of rx frame */ -#define AC_FC_CHG_PARM (3<<13) | 0 /* */ -#define AC_FC_RSP (0<<13) | 0 /* DC = SC of rx frame */ -#define AC_FC_RPT_ATTCH (0<<13) | 0 - -#define S_UPSTREAM_NEIGHBOR_ADDRESS 6 + 2 -#define S_LOCAL_RING_NUMBER 2 + 2 -#define S_ASSIGN_PHYSICAL_DROP 4 + 2 -#define S_ERROR_TIMER_VALUE 2 + 2 -#define S_AUTHORIZED_FUNCTION_CLASS 2 + 2 -#define S_AUTHORIZED_ACCESS_PRIORITY 2 + 2 -#define S_CORRELATOR 2 + 2 -#define S_PHYSICAL_DROP 4 + 2 -#define S_RESPONSE_CODE 4 + 2 -#define S_ADDRESS_MODIFER 2 + 2 -#define S_PRODUCT_INSTANCE_ID 18 + 2 -#define S_RING_STATION_VERSION_NUMBER 10 + 2 -#define S_STATION_IDENTIFER 6 + 2 -#define S_RING_STATION_STATUS 6 + 2 -#define S_GROUP_ADDRESS 4 + 2 -#define S_FUNCTIONAL_ADDRESS 4 + 2 -#define S_FRAME_FORWARD 252 + 2 -#define S_TRANSMIT_STATUS_CODE 2 + 2 - -#define ISB_IMC_RES0 0x0000 /* */ -#define ISB_IMC_MAC_TYPE_3 0x0001 /* MAC_ARC_INDICATE */ -#define ISB_IMC_MAC_ERROR_COUNTERS 0x0002 /* */ -#define ISB_IMC_RES1 0x0003 /* */ -#define ISB_IMC_MAC_TYPE_2 0x0004 /* QUE_MAC_INDICATE */ -#define ISB_IMC_TX_FRAME 0x0005 /* */ -#define ISB_IMC_END_OF_TX_QUEUE 0x0006 /* */ -#define ISB_IMC_NON_MAC_RX_RESOURCE 0x0007 /* */ -#define ISB_IMC_MAC_RX_RESOURCE 0x0008 /* */ -#define ISB_IMC_NON_MAC_RX_FRAME 0x0009 /* */ -#define ISB_IMC_MAC_RX_FRAME 0x000A /* */ -#define ISB_IMC_TRC_FIFO_STATUS 0x000B /* */ -#define ISB_IMC_COMMAND_STATUS 0x000C /* */ -#define ISB_IMC_MAC_TYPE_1 0x000D /* Self Removed */ -#define ISB_IMC_TRC_INTRNL_TST_STATUS 0x000E /* */ -#define ISB_IMC_RES2 0x000F /* */ - -#define NON_MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */ -#define NON_MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */ -#define NON_MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */ -#define NON_MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */ -#define RAW_NON_MAC_RX_RESOURCE_BW 0x1000 /* */ -#define RAW_NON_MAC_RX_RESOURCE_FW 0x2000 /* */ -#define RAW_NON_MAC_RX_RESOURCE_BE 0x4000 /* */ -#define RAW_NON_MAC_RX_RESOURCE_FE 0x8000 /* */ - -#define MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */ -#define MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */ -#define MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */ -#define MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */ -#define RAW_MAC_RX_RESOURCE_BW 0x1000 /* */ -#define RAW_MAC_RX_RESOURCE_FW 0x2000 /* */ -#define RAW_MAC_RX_RESOURCE_BE 0x4000 /* */ -#define RAW_MAC_RX_RESOURCE_FE 0x8000 /* */ - -#define TRC_FIFO_STATUS_TX_UNDERRUN 0x40 /* shifted right 8 bits */ -#define TRC_FIFO_STATUS_RX_OVERRUN 0x80 /* shifted right 8 bits */ -#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000 /* */ -#define RAW_TRC_FIFO_STATUS_RX_OVERRUN 0x8000 /* */ - -#define CSR_CLRTINT 0x08 - -#define MSB(X) ((__u8)((__u16) X >> 8)) -#define LSB(X) ((__u8)((__u16) X & 0xff)) - -#define AC_FC_LOBE_MEDIA_TEST ((3<<13) | 0) -#define S_WRAP_DATA 248 + 2 /* 500 + 2 */ -#define WRAP_DATA 0x26 -#define LOBE_MEDIA_TEST 0x08 - -/* Destination Class (dc) */ - -#define DC_MASK 0xF0 -#define DC_RS 0x00 -#define DC_CRS 0x40 -#define DC_RPS 0x50 -#define DC_REM 0x60 - -/* Source Classes (sc) */ - -#define SC_MASK 0x0F -#define SC_RS 0x00 -#define SC_CRS 0x04 -#define SC_RPS 0x05 -#define SC_REM 0x06 - -#define PR 0x11 -#define PR_PAGE_MASK 0x0C000 - -#define MICROCHANNEL 0x0008 -#define INTERFACE_CHIP 0x0010 -#define BOARD_16BIT 0x0040 -#define PAGED_RAM 0x0080 -#define WD8115TA (TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM) -#define WD8115T (TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM) - -#define BRD_ID_8316 0x50 - -#define r587_SER 0x001 -#define SER_DIN 0x80 -#define SER_DOUT 0x40 -#define SER_CLK 0x20 -#define SER_ECS 0x10 -#define SER_E806 0x08 -#define SER_PNP 0x04 -#define SER_BIO 0x02 -#define SER_16B 0x01 - -#define r587_IDR 0x004 -#define IDR_IRQ_MASK 0x0F0 -#define IDR_DCS_MASK 0x007 -#define IDR_RWS 0x008 - - -#define r587_BIO 0x003 -#define BIO_ENB 0x080 -#define BIO_MASK 0x03F - -#define r587_PCR 0x005 -#define PCR_RAMS 0x040 - - - -#define NUM_ADDR_BITS 8 - -#define ISA_MAX_ADDRESS 0x00ffffff - -#define SMCTR_MAX_ADAPTERS 7 - -#define MC_TABLE_ENTRIES 16 - -#define MAXFRAGMENTS 32 - -#define CHIP_REV_MASK 0x3000 - -#define MAX_TX_QS 8 -#define NUM_TX_QS_USED 3 - -#define MAX_RX_QS 2 -#define NUM_RX_QS_USED 2 - -#define INTEL_DATA_FORMAT 0x4000 -#define INTEL_ADDRESS_POINTER_FORMAT 0x8000 -#define PAGE_POINTER(X) ((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access) -#define SWAP_WORDS(X) (((X & 0xFFFF) << 16) | (X >> 16)) - -#define INTERFACE_CHIP 0x0010 /* Soft Config Adapter */ -#define ADVANCED_FEATURES 0x0020 /* Adv. netw. interface features */ -#define BOARD_16BIT 0x0040 /* 16 bit capability */ -#define PAGED_RAM 0x0080 /* Adapter has paged RAM */ - -#define PAGED_ROM 0x0100 /* Adapter has paged ROM */ - -#define RAM_SIZE_UNKNOWN 0x0000 /* Unknown RAM size */ -#define RAM_SIZE_0K 0x0001 /* 0K RAM */ -#define RAM_SIZE_8K 0x0002 /* 8k RAM */ -#define RAM_SIZE_16K 0x0003 /* 16k RAM */ -#define RAM_SIZE_32K 0x0004 /* 32k RAM */ -#define RAM_SIZE_64K 0x0005 /* 64k RAM */ -#define RAM_SIZE_RESERVED_6 0x0006 /* Reserved RAM size */ -#define RAM_SIZE_RESERVED_7 0x0007 /* Reserved RAM size */ -#define RAM_SIZE_MASK 0x0007 /* Isolates RAM Size */ - -#define TOKEN_MEDIA 0x0005 - -#define BID_REG_0 0x00 -#define BID_REG_1 0x01 -#define BID_REG_2 0x02 -#define BID_REG_3 0x03 -#define BID_REG_4 0x04 -#define BID_REG_5 0x05 -#define BID_REG_6 0x06 -#define BID_REG_7 0x07 -#define BID_LAR_0 0x08 -#define BID_LAR_1 0x09 -#define BID_LAR_2 0x0A -#define BID_LAR_3 0x0B -#define BID_LAR_4 0x0C -#define BID_LAR_5 0x0D - -#define BID_BOARD_ID_BYTE 0x0E -#define BID_CHCKSM_BYTE 0x0F -#define BID_LAR_OFFSET 0x08 - -#define BID_MSZ_583_BIT 0x08 -#define BID_SIXTEEN_BIT_BIT 0x01 - -#define BID_BOARD_REV_MASK 0x1E - -#define BID_MEDIA_TYPE_BIT 0x01 -#define BID_SOFT_CONFIG_BIT 0x20 -#define BID_RAM_SIZE_BIT 0x40 -#define BID_BUS_TYPE_BIT 0x80 - -#define BID_CR 0x10 - -#define BID_TXP 0x04 /* Transmit Packet Command */ - -#define BID_TCR_DIFF 0x0D /* Transmit Configuration Register */ - -#define BID_TCR_VAL 0x18 /* Value to Test 8390 or 690 */ -#define BID_PS0 0x00 /* Register Page Select 0 */ -#define BID_PS1 0x40 /* Register Page Select 1 */ -#define BID_PS2 0x80 /* Register Page Select 2 */ -#define BID_PS_MASK 0x3F /* For Masking Off Page Select Bits */ - -#define BID_EEPROM_0 0x08 -#define BID_EEPROM_1 0x09 -#define BID_EEPROM_2 0x0A -#define BID_EEPROM_3 0x0B -#define BID_EEPROM_4 0x0C -#define BID_EEPROM_5 0x0D -#define BID_EEPROM_6 0x0E -#define BID_EEPROM_7 0x0F - -#define BID_OTHER_BIT 0x02 -#define BID_ICR_MASK 0x0C -#define BID_EAR_MASK 0x0F -#define BID_ENGR_PAGE 0x0A0 -#define BID_RLA 0x10 -#define BID_EA6 0x80 -#define BID_RECALL_DONE_MASK 0x10 -#define BID_BID_EEPROM_OVERRIDE 0xFFB0 -#define BID_EXTRA_EEPROM_OVERRIDE 0xFFD0 -#define BID_EEPROM_MEDIA_MASK 0x07 -#define BID_STARLAN_TYPE 0x00 -#define BID_ETHERNET_TYPE 0x01 -#define BID_TP_TYPE 0x02 -#define BID_EW_TYPE 0x03 -#define BID_TOKEN_RING_TYPE 0x04 -#define BID_UTP2_TYPE 0x05 -#define BID_EEPROM_IRQ_MASK 0x18 -#define BID_PRIMARY_IRQ 0x00 -#define BID_ALTERNATE_IRQ_1 0x08 -#define BID_ALTERNATE_IRQ_2 0x10 -#define BID_ALTERNATE_IRQ_3 0x18 -#define BID_EEPROM_RAM_SIZE_MASK 0xE0 -#define BID_EEPROM_RAM_SIZE_RES1 0x00 -#define BID_EEPROM_RAM_SIZE_RES2 0x20 -#define BID_EEPROM_RAM_SIZE_8K 0x40 -#define BID_EEPROM_RAM_SIZE_16K 0x60 -#define BID_EEPROM_RAM_SIZE_32K 0x80 -#define BID_EEPROM_RAM_SIZE_64K 0xA0 -#define BID_EEPROM_RAM_SIZE_RES3 0xC0 -#define BID_EEPROM_RAM_SIZE_RES4 0xE0 -#define BID_EEPROM_BUS_TYPE_MASK 0x07 -#define BID_EEPROM_BUS_TYPE_AT 0x00 -#define BID_EEPROM_BUS_TYPE_MCA 0x01 -#define BID_EEPROM_BUS_TYPE_EISA 0x02 -#define BID_EEPROM_BUS_TYPE_NEC 0x03 -#define BID_EEPROM_BUS_SIZE_MASK 0x18 -#define BID_EEPROM_BUS_SIZE_8BIT 0x00 -#define BID_EEPROM_BUS_SIZE_16BIT 0x08 -#define BID_EEPROM_BUS_SIZE_32BIT 0x10 -#define BID_EEPROM_BUS_SIZE_64BIT 0x18 -#define BID_EEPROM_BUS_MASTER 0x20 -#define BID_EEPROM_RAM_PAGING 0x40 -#define BID_EEPROM_ROM_PAGING 0x80 -#define BID_EEPROM_PAGING_MASK 0xC0 -#define BID_EEPROM_LOW_COST 0x08 -#define BID_EEPROM_IO_MAPPED 0x10 -#define BID_EEPROM_HMI 0x01 -#define BID_EEPROM_AUTO_MEDIA_DETECT 0x01 -#define BID_EEPROM_CHIP_REV_MASK 0x0C - -#define BID_EEPROM_LAN_ADDR 0x30 - -#define BID_EEPROM_MEDIA_OPTION 0x54 -#define BID_EEPROM_MEDIA_UTP 0x01 -#define BID_EEPROM_4MB_RING 0x08 -#define BID_EEPROM_16MB_RING 0x10 -#define BID_EEPROM_MEDIA_STP 0x40 - -#define BID_EEPROM_MISC_DATA 0x56 -#define BID_EEPROM_EARLY_TOKEN_RELEASE 0x02 - -#define CNFG_ID_8003E 0x6fc0 -#define CNFG_ID_8003S 0x6fc1 -#define CNFG_ID_8003W 0x6fc2 -#define CNFG_ID_8115TRA 0x6ec6 -#define CNFG_ID_8013E 0x61C8 -#define CNFG_ID_8013W 0x61C9 -#define CNFG_ID_BISTRO03E 0xEFE5 -#define CNFG_ID_BISTRO13E 0xEFD5 -#define CNFG_ID_BISTRO13W 0xEFD4 -#define CNFG_MSR_583 0x0 -#define CNFG_ICR_583 0x1 -#define CNFG_IAR_583 0x2 -#define CNFG_BIO_583 0x3 -#define CNFG_EAR_583 0x3 -#define CNFG_IRR_583 0x4 -#define CNFG_LAAR_584 0x5 -#define CNFG_GP2 0x7 -#define CNFG_LAAR_MASK 0x1F -#define CNFG_LAAR_ZWS 0x20 -#define CNFG_LAAR_L16E 0x40 -#define CNFG_ICR_IR2_584 0x04 -#define CNFG_ICR_MASK 0x08 -#define CNFG_ICR_MSZ 0x08 -#define CNFG_ICR_RLA 0x10 -#define CNFG_ICR_STO 0x80 -#define CNFG_IRR_IRQS 0x60 -#define CNFG_IRR_IEN 0x80 -#define CNFG_IRR_ZWS 0x01 -#define CNFG_GP2_BOOT_NIBBLE 0x0F -#define CNFG_IRR_OUT2 0x04 -#define CNFG_IRR_OUT1 0x02 - -#define CNFG_SIZE_8KB 8 -#define CNFG_SIZE_16KB 16 -#define CNFG_SIZE_32KB 32 -#define CNFG_SIZE_64KB 64 -#define CNFG_SIZE_128KB 128 -#define CNFG_SIZE_256KB 256 -#define ROM_DISABLE 0x0 - -#define CNFG_SLOT_ENABLE_BIT 0x08 - -#define CNFG_POS_CONTROL_REG 0x096 -#define CNFG_POS_REG0 0x100 -#define CNFG_POS_REG1 0x101 -#define CNFG_POS_REG2 0x102 -#define CNFG_POS_REG3 0x103 -#define CNFG_POS_REG4 0x104 -#define CNFG_POS_REG5 0x105 - -#define CNFG_ADAPTER_TYPE_MASK 0x0e - -#define SLOT_16BIT 0x0008 -#define INTERFACE_5X3_CHIP 0x0000 /* 0000 = 583 or 593 chips */ -#define NIC_690_BIT 0x0010 /* NIC is 690 */ -#define ALTERNATE_IRQ_BIT 0x0020 /* Alternate IRQ is used */ -#define INTERFACE_584_CHIP 0x0040 /* 0001 = 584 chip */ -#define INTERFACE_594_CHIP 0x0080 /* 0010 = 594 chip */ -#define INTERFACE_585_CHIP 0x0100 /* 0100 = 585/790 chip */ -#define INTERFACE_CHIP_MASK 0x03C0 /* Isolates Intfc Chip Type */ - -#define BOARD_16BIT 0x0040 -#define NODE_ADDR_CKSUM 0xEE -#define BRD_ID_8115T 0x04 - -#define NIC_825_BIT 0x0400 /* TRC 83C825 NIC */ -#define NIC_790_BIT 0x0800 /* NIC is 83C790 Ethernet */ - -#define CHIP_REV_MASK 0x3000 - -#define HWR_CBUSY 0x02 -#define HWR_CA 0x01 - -#define MAC_QUEUE 0 -#define NON_MAC_QUEUE 1 -#define BUG_QUEUE 2 /* NO RECEIVE QUEUE, ONLY TX */ - -#define NUM_MAC_TX_FCBS 8 -#define NUM_MAC_TX_BDBS NUM_MAC_TX_FCBS -#define NUM_MAC_RX_FCBS 7 -#define NUM_MAC_RX_BDBS 8 - -#define NUM_NON_MAC_TX_FCBS 6 -#define NUM_NON_MAC_TX_BDBS NUM_NON_MAC_TX_FCBS - -#define NUM_NON_MAC_RX_BDBS 0 /* CALCULATED DYNAMICALLY */ - -#define NUM_BUG_TX_FCBS 8 -#define NUM_BUG_TX_BDBS NUM_BUG_TX_FCBS - -#define MAC_TX_BUFFER_MEMORY 1024 -#define NON_MAC_TX_BUFFER_MEMORY (20 * 1024) -#define BUG_TX_BUFFER_MEMORY (NUM_BUG_TX_FCBS * 32) - -#define RX_BUFFER_MEMORY 0 /* CALCULATED DYNAMICALLY */ -#define RX_DATA_BUFFER_SIZE 256 -#define RX_BDB_SIZE_SHIFT 3 /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */ -#define RX_BDB_SIZE_MASK (sizeof(BDBlock) - 1) -#define RX_DATA_BUFFER_SIZE_MASK (RX_DATA_BUFFER_SIZE-1) - -#define NUM_OF_INTERRUPTS 0x20 - -#define NOT_TRANSMITING 0 -#define TRANSMITING 1 - -#define TRC_INTERRUPT_ENABLE_MASK 0x7FF6 - -#define UCODE_VERSION 0x58 - -#define UCODE_SIZE_OFFSET 0x0000 /* WORD */ -#define UCODE_CHECKSUM_OFFSET 0x0002 /* WORD */ -#define UCODE_VERSION_OFFSET 0x0004 /* BYTE */ - -#define CS_RAM_SIZE 0X2000 -#define CS_RAM_CHECKSUM_OFFSET 0x1FFE /* WORD 1FFE(MSB)-1FFF(LSB)*/ -#define CS_RAM_VERSION_OFFSET 0x1FFC /* WORD 1FFC(MSB)-1FFD(LSB)*/ - -#define MISC_DATA_SIZE 128 -#define NUM_OF_ACBS 1 - -#define ACB_COMMAND_NOT_DONE 0x0000 /* Init, command not done */ -#define ACB_COMMAND_DONE 0x8000 /* TRC says command done */ -#define ACB_COMMAND_STATUS_MASK 0x00FF /* low byte is status */ -#define ACB_COMMAND_SUCCESSFUL 0x0000 /* means cmd was successful */ -#define ACB_NOT_CHAIN_END 0x0000 /* tell TRC more CBs in chain */ -#define ACB_CHAIN_END 0x8000 /* tell TRC last CB in chain */ -#define ACB_COMMAND_NO_INTERRUPT 0x0000 /* tell TRC no INT after CB */ -#define ACB_COMMAND_INTERRUPT 0x2000 /* tell TRC to INT after CB */ -#define ACB_SUB_CMD_NOP 0x0000 -#define ACB_CMD_HIC_NOP 0x0080 -#define ACB_CMD_MCT_NOP 0x0000 -#define ACB_CMD_MCT_TEST 0x0001 -#define ACB_CMD_HIC_TEST 0x0081 -#define ACB_CMD_INSERT 0x0002 -#define ACB_CMD_REMOVE 0x0003 -#define ACB_CMD_MCT_WRITE_VALUE 0x0004 -#define ACB_CMD_HIC_WRITE_VALUE 0x0084 -#define ACB_CMD_MCT_READ_VALUE 0x0005 -#define ACB_CMD_HIC_READ_VALUE 0x0085 -#define ACB_CMD_INIT_TX_RX 0x0086 -#define ACB_CMD_INIT_TRC_TIMERS 0x0006 -#define ACB_CMD_READ_TRC_STATUS 0x0007 -#define ACB_CMD_CHANGE_JOIN_STATE 0x0008 -#define ACB_CMD_RESERVED_9 0x0009 -#define ACB_CMD_RESERVED_A 0x000A -#define ACB_CMD_RESERVED_B 0x000B -#define ACB_CMD_RESERVED_C 0x000C -#define ACB_CMD_RESERVED_D 0x000D -#define ACB_CMD_RESERVED_E 0x000E -#define ACB_CMD_RESERVED_F 0x000F - -#define TRC_MAC_REGISTERS_TEST 0x0000 -#define TRC_INTERNAL_LOOPBACK 0x0001 -#define TRC_TRI_LOOPBACK 0x0002 -#define TRC_INTERNAL_ROM_TEST 0x0003 -#define TRC_LOBE_MEDIA_TEST 0x0004 -#define TRC_ANALOG_TEST 0x0005 -#define TRC_HOST_INTERFACE_REG_TEST 0x0003 - -#define TEST_DMA_1 0x0000 -#define TEST_DMA_2 0x0001 -#define TEST_MCT_ROM 0x0002 -#define HIC_INTERNAL_DIAG 0x0003 - -#define ABORT_TRANSMIT_PRIORITY_0 0x0001 -#define ABORT_TRANSMIT_PRIORITY_1 0x0002 -#define ABORT_TRANSMIT_PRIORITY_2 0x0004 -#define ABORT_TRANSMIT_PRIORITY_3 0x0008 -#define ABORT_TRANSMIT_PRIORITY_4 0x0010 -#define ABORT_TRANSMIT_PRIORITY_5 0x0020 -#define ABORT_TRANSMIT_PRIORITY_6 0x0040 -#define ABORT_TRANSMIT_PRIORITY_7 0x0080 - -#define TX_PENDING_PRIORITY_0 0x0001 -#define TX_PENDING_PRIORITY_1 0x0002 -#define TX_PENDING_PRIORITY_2 0x0004 -#define TX_PENDING_PRIORITY_3 0x0008 -#define TX_PENDING_PRIORITY_4 0x0010 -#define TX_PENDING_PRIORITY_5 0x0020 -#define TX_PENDING_PRIORITY_6 0x0040 -#define TX_PENDING_PRIORITY_7 0x0080 - -#define FCB_FRAME_LENGTH 0x100 -#define FCB_COMMAND_DONE 0x8000 /* FCB Word 0 */ -#define FCB_NOT_CHAIN_END 0x0000 /* FCB Word 1 */ -#define FCB_CHAIN_END 0x8000 -#define FCB_NO_WARNING 0x0000 -#define FCB_WARNING 0x4000 -#define FCB_INTERRUPT_DISABLE 0x0000 -#define FCB_INTERRUPT_ENABLE 0x2000 - -#define FCB_ENABLE_IMA 0x0008 -#define FCB_ENABLE_TES 0x0004 /* Guarantee Tx before Int */ -#define FCB_ENABLE_TFS 0x0002 /* Post Tx Frame Status */ -#define FCB_ENABLE_NTC 0x0001 /* No Tx CRC */ - -#define FCB_TX_STATUS_CR2 0x0004 -#define FCB_TX_STATUS_AR2 0x0008 -#define FCB_TX_STATUS_CR1 0x0040 -#define FCB_TX_STATUS_AR1 0x0080 -#define FCB_TX_AC_BITS (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2) -#define FCB_TX_STATUS_E 0x0100 - -#define FCB_RX_STATUS_ANY_ERROR 0x0001 -#define FCB_RX_STATUS_FCS_ERROR 0x0002 - -#define FCB_RX_STATUS_IA_MATCHED 0x0400 -#define FCB_RX_STATUS_IGA_BSGA_MATCHED 0x0500 -#define FCB_RX_STATUS_FA_MATCHED 0x0600 -#define FCB_RX_STATUS_BA_MATCHED 0x0700 -#define FCB_RX_STATUS_DA_MATCHED 0x0400 -#define FCB_RX_STATUS_SOURCE_ROUTING 0x0800 - -#define BDB_BUFFER_SIZE 0x100 -#define BDB_NOT_CHAIN_END 0x0000 -#define BDB_CHAIN_END 0x8000 -#define BDB_NO_WARNING 0x0000 -#define BDB_WARNING 0x4000 - -#define ERROR_COUNTERS_CHANGED 0x0001 -#define TI_NDIS_RING_STATUS_CHANGED 0x0002 -#define UNA_CHANGED 0x0004 -#define READY_TO_SEND_RQ_INIT 0x0008 - -#define SCGB_ADDRESS_POINTER_FORMAT INTEL_ADDRESS_POINTER_FORMAT -#define SCGB_DATA_FORMAT INTEL_DATA_FORMAT -#define SCGB_MULTI_WORD_CONTROL 0 -#define SCGB_BURST_LENGTH 0x000E /* DMA Burst Length */ - -#define SCGB_CONFIG (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH) - -#define ISCP_BLOCK_SIZE 0x0A -#define RAM_SIZE 0x10000 -#define INIT_SYS_CONFIG_PTR_OFFSET (RAM_SIZE-ISCP_BLOCK_SIZE) -#define SCGP_BLOCK_OFFSET 0 - -#define SCLB_NOT_VALID 0x0000 /* Initially, SCLB not valid */ -#define SCLB_VALID 0x8000 /* Host tells TRC SCLB valid */ -#define SCLB_PROCESSED 0x0000 /* TRC says SCLB processed */ -#define SCLB_RESUME_CONTROL_NOT_VALID 0x0000 /* Initially, RC not valid */ -#define SCLB_RESUME_CONTROL_VALID 0x4000 /* Host tells TRC RC valid */ -#define SCLB_IACK_CODE_NOT_VALID 0x0000 /* Initially, IACK not valid */ -#define SCLB_IACK_CODE_VALID 0x2000 /* Host tells TRC IACK valid */ -#define SCLB_CMD_NOP 0x0000 -#define SCLB_CMD_REMOVE 0x0001 -#define SCLB_CMD_SUSPEND_ACB_CHAIN 0x0002 -#define SCLB_CMD_SET_INTERRUPT_MASK 0x0003 -#define SCLB_CMD_CLEAR_INTERRUPT_MASK 0x0004 -#define SCLB_CMD_RESERVED_5 0x0005 -#define SCLB_CMD_RESERVED_6 0x0006 -#define SCLB_CMD_RESERVED_7 0x0007 -#define SCLB_CMD_RESERVED_8 0x0008 -#define SCLB_CMD_RESERVED_9 0x0009 -#define SCLB_CMD_RESERVED_A 0x000A -#define SCLB_CMD_RESERVED_B 0x000B -#define SCLB_CMD_RESERVED_C 0x000C -#define SCLB_CMD_RESERVED_D 0x000D -#define SCLB_CMD_RESERVED_E 0x000E -#define SCLB_CMD_RESERVED_F 0x000F - -#define SCLB_RC_ACB 0x0001 /* Action Command Block Chain */ -#define SCLB_RC_RES0 0x0002 /* Always Zero */ -#define SCLB_RC_RES1 0x0004 /* Always Zero */ -#define SCLB_RC_RES2 0x0008 /* Always Zero */ -#define SCLB_RC_RX_MAC_FCB 0x0010 /* RX_MAC_FCB Chain */ -#define SCLB_RC_RX_MAC_BDB 0x0020 /* RX_MAC_BDB Chain */ -#define SCLB_RC_RX_NON_MAC_FCB 0x0040 /* RX_NON_MAC_FCB Chain */ -#define SCLB_RC_RX_NON_MAC_BDB 0x0080 /* RX_NON_MAC_BDB Chain */ -#define SCLB_RC_TFCB0 0x0100 /* TX Priority 0 FCB Chain */ -#define SCLB_RC_TFCB1 0x0200 /* TX Priority 1 FCB Chain */ -#define SCLB_RC_TFCB2 0x0400 /* TX Priority 2 FCB Chain */ -#define SCLB_RC_TFCB3 0x0800 /* TX Priority 3 FCB Chain */ -#define SCLB_RC_TFCB4 0x1000 /* TX Priority 4 FCB Chain */ -#define SCLB_RC_TFCB5 0x2000 /* TX Priority 5 FCB Chain */ -#define SCLB_RC_TFCB6 0x4000 /* TX Priority 6 FCB Chain */ -#define SCLB_RC_TFCB7 0x8000 /* TX Priority 7 FCB Chain */ - -#define SCLB_IMC_RES0 0x0001 /* */ -#define SCLB_IMC_MAC_TYPE_3 0x0002 /* MAC_ARC_INDICATE */ -#define SCLB_IMC_MAC_ERROR_COUNTERS 0x0004 /* */ -#define SCLB_IMC_RES1 0x0008 /* */ -#define SCLB_IMC_MAC_TYPE_2 0x0010 /* QUE_MAC_INDICATE */ -#define SCLB_IMC_TX_FRAME 0x0020 /* */ -#define SCLB_IMC_END_OF_TX_QUEUE 0x0040 /* */ -#define SCLB_IMC_NON_MAC_RX_RESOURCE 0x0080 /* */ -#define SCLB_IMC_MAC_RX_RESOURCE 0x0100 /* */ -#define SCLB_IMC_NON_MAC_RX_FRAME 0x0200 /* */ -#define SCLB_IMC_MAC_RX_FRAME 0x0400 /* */ -#define SCLB_IMC_TRC_FIFO_STATUS 0x0800 /* */ -#define SCLB_IMC_COMMAND_STATUS 0x1000 /* */ -#define SCLB_IMC_MAC_TYPE_1 0x2000 /* Self Removed */ -#define SCLB_IMC_TRC_INTRNL_TST_STATUS 0x4000 /* */ -#define SCLB_IMC_RES2 0x8000 /* */ - -#define DMA_TRIGGER 0x0004 -#define FREQ_16MB_BIT 0x0010 -#define THDREN 0x0020 -#define CFG0_RSV1 0x0040 -#define CFG0_RSV2 0x0080 -#define ETREN 0x0100 -#define RX_OWN_BIT 0x0200 -#define RXATMAC 0x0400 -#define PROMISCUOUS_BIT 0x0800 -#define USETPT 0x1000 -#define SAVBAD_BIT 0x2000 -#define ONEQUE 0x4000 -#define NO_AUTOREMOVE 0x8000 - -#define RX_FCB_AREA_8316 0x00000000 -#define RX_BUFF_AREA_8316 0x00000000 - -#define TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access) -#define RX_FCB_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316) -#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316) - -// Offset 0: MSR - Memory Select Register -// -#define r587_MSR 0x000 // Register Offset -//#define MSR_RST 0x080 // LAN Controller Reset -#define MSR_MENB 0x040 // Shared Memory Enable -#define MSR_RA18 0x020 // Ram Address bit 18 (583, 584, 587) -#define MSR_RA17 0x010 // Ram Address bit 17 (583, 584, 585/790) -#define MSR_RA16 0x008 // Ram Address bit 16 (583, 584, 585/790) -#define MSR_RA15 0x004 // Ram Address bit 15 (583, 584, 585/790) -#define MSR_RA14 0x002 // Ram Address bit 14 (583, 584, 585/790) -#define MSR_RA13 0x001 // Ram Address bit 13 (583, 584, 585/790) - -#define MSR_MASK 0x03F // Mask for Address bits RA18-RA13 (583, 584, 587) - -#define MSR 0x00 -#define IRR 0x04 -#define HWR 0x04 -#define LAAR 0x05 -#define IMCCR 0x05 -#define LAR0 0x08 -#define BDID 0x0E // Adapter ID byte register offset -#define CSR 0x10 -#define PR 0x11 - -#define MSR_RST 0x80 -#define MSR_MEMB 0x40 -#define MSR_0WS 0x20 - -#define FORCED_16BIT_MODE 0x0002 - -#define INTERFRAME_SPACING_16 0x0003 /* 6 bytes */ -#define INTERFRAME_SPACING_4 0x0001 /* 2 bytes */ -#define MULTICAST_ADDRESS_BIT 0x0010 -#define NON_SRC_ROUTING_BIT 0x0020 - -#define LOOPING_MODE_MASK 0x0007 - -/* - * Decode firmware defines. - */ -#define SWAP_BYTES(X) ((X & 0xff) << 8) | (X >> 8) -#define WEIGHT_OFFSET 5 -#define TREE_SIZE_OFFSET 9 -#define TREE_OFFSET 11 - -/* The Huffman Encoding Tree is constructed of these nodes. */ -typedef struct { - __u8 llink; /* Short version of above node. */ - __u8 tag; - __u8 info; /* This node is used on decodes. */ - __u8 rlink; -} DECODE_TREE_NODE; - -#define ROOT 0 /* Branch value. */ -#define LEAF 0 /* Tag field value. */ -#define BRANCH 1 /* Tag field value. */ - -/* - * Multicast Table Structure - */ -typedef struct { - __u8 address[6]; - __u8 instance_count; -} McTable; - -/* - * Fragment Descriptor Definition - */ -typedef struct { - __u8 *fragment_ptr; - __u32 fragment_length; -} FragmentStructure; - -/* - * Data Buffer Structure Definition - */ -typedef struct { - __u32 fragment_count; - FragmentStructure fragment_list[MAXFRAGMENTS]; -} DataBufferStructure; - -#pragma pack(1) -typedef struct { - __u8 IType; - __u8 ISubtype; -} Interrupt_Status_Word; - -#pragma pack(1) -typedef struct BDBlockType { - __u16 info; /* 02 */ - __u32 trc_next_ptr; /* 06 */ - __u32 trc_data_block_ptr; /* 10 */ - __u16 buffer_length; /* 12 */ - - __u16 *data_block_ptr; /* 16 */ - struct BDBlockType *next_ptr; /* 20 */ - struct BDBlockType *back_ptr; /* 24 */ - __u8 filler[8]; /* 32 */ -} BDBlock; - -#pragma pack(1) -typedef struct FCBlockType { - __u16 frame_status; /* 02 */ - __u16 info; /* 04 */ - __u32 trc_next_ptr; /* 08 */ - __u32 trc_bdb_ptr; /* 12 */ - __u16 frame_length; /* 14 */ - - BDBlock *bdb_ptr; /* 18 */ - struct FCBlockType *next_ptr; /* 22 */ - struct FCBlockType *back_ptr; /* 26 */ - __u16 memory_alloc; /* 28 */ - __u8 filler[4]; /* 32 */ - -} FCBlock; - -#pragma pack(1) -typedef struct SBlockType{ - __u8 Internal_Error_Count; - __u8 Line_Error_Count; - __u8 AC_Error_Count; - __u8 Burst_Error_Count; - __u8 RESERVED_COUNTER_0; - __u8 AD_TRANS_Count; - __u8 RCV_Congestion_Count; - __u8 Lost_FR_Error_Count; - __u8 FREQ_Error_Count; - __u8 FR_Copied_Error_Count; - __u8 RESERVED_COUNTER_1; - __u8 Token_Error_Count; - - __u16 TI_NDIS_Ring_Status; - __u16 BCN_Type; - __u16 Error_Code; - __u16 SA_of_Last_AMP_SMP[3]; - __u16 UNA[3]; - __u16 Ucode_Version_Number; - __u16 Status_CHG_Indicate; - __u16 RESERVED_STATUS_0; -} SBlock; - -#pragma pack(1) -typedef struct ACBlockType { - __u16 cmd_done_status; /* 02 */ - __u16 cmd_info; /* 04 */ - __u32 trc_next_ptr; /* 08 */ - __u16 cmd; /* 10 */ - __u16 subcmd; /* 12 */ - __u16 data_offset_lo; /* 14 */ - __u16 data_offset_hi; /* 16 */ - - struct ACBlockType *next_ptr; /* 20 */ - - __u8 filler[12]; /* 32 */ -} ACBlock; - -#define NUM_OF_INTERRUPTS 0x20 - -#pragma pack(1) -typedef struct { - Interrupt_Status_Word IStatus[NUM_OF_INTERRUPTS]; -} ISBlock; - -#pragma pack(1) -typedef struct { - __u16 valid_command; /* 02 */ - __u16 iack_code; /* 04 */ - __u16 resume_control; /* 06 */ - __u16 int_mask_control; /* 08 */ - __u16 int_mask_state; /* 10 */ - - __u8 filler[6]; /* 16 */ -} SCLBlock; - -#pragma pack(1) -typedef struct -{ - __u16 config; /* 02 */ - __u32 trc_sclb_ptr; /* 06 */ - __u32 trc_acb_ptr; /* 10 */ - __u32 trc_isb_ptr; /* 14 */ - __u16 isbsiz; /* 16 */ - - SCLBlock *sclb_ptr; /* 20 */ - ACBlock *acb_ptr; /* 24 */ - ISBlock *isb_ptr; /* 28 */ - - __u16 Non_Mac_Rx_Bdbs; /* 30 DEBUG */ - __u8 filler[2]; /* 32 */ - -} SCGBlock; - -#pragma pack(1) -typedef struct -{ - __u32 trc_scgb_ptr; - SCGBlock *scgb_ptr; -} ISCPBlock; -#pragma pack() - -typedef struct net_local { - ISCPBlock *iscpb_ptr; - SCGBlock *scgb_ptr; - SCLBlock *sclb_ptr; - ISBlock *isb_ptr; - - ACBlock *acb_head; - ACBlock *acb_curr; - ACBlock *acb_next; - - __u8 adapter_name[12]; - - __u16 num_rx_bdbs [NUM_RX_QS_USED]; - __u16 num_rx_fcbs [NUM_RX_QS_USED]; - - __u16 num_tx_bdbs [NUM_TX_QS_USED]; - __u16 num_tx_fcbs [NUM_TX_QS_USED]; - - __u16 num_of_tx_buffs; - - __u16 tx_buff_size [NUM_TX_QS_USED]; - __u16 tx_buff_used [NUM_TX_QS_USED]; - __u16 tx_queue_status [NUM_TX_QS_USED]; - - FCBlock *tx_fcb_head[NUM_TX_QS_USED]; - FCBlock *tx_fcb_curr[NUM_TX_QS_USED]; - FCBlock *tx_fcb_end[NUM_TX_QS_USED]; - BDBlock *tx_bdb_head[NUM_TX_QS_USED]; - __u16 *tx_buff_head[NUM_TX_QS_USED]; - __u16 *tx_buff_end[NUM_TX_QS_USED]; - __u16 *tx_buff_curr[NUM_TX_QS_USED]; - __u16 num_tx_fcbs_used[NUM_TX_QS_USED]; - - FCBlock *rx_fcb_head[NUM_RX_QS_USED]; - FCBlock *rx_fcb_curr[NUM_RX_QS_USED]; - BDBlock *rx_bdb_head[NUM_RX_QS_USED]; - BDBlock *rx_bdb_curr[NUM_RX_QS_USED]; - BDBlock *rx_bdb_end[NUM_RX_QS_USED]; - __u16 *rx_buff_head[NUM_RX_QS_USED]; - __u16 *rx_buff_end[NUM_RX_QS_USED]; - - __u32 *ptr_local_ring_num; - - __u32 sh_mem_used; - - __u16 page_offset_mask; - - __u16 authorized_function_classes; - __u16 authorized_access_priority; - - __u16 num_acbs; - __u16 num_acbs_used; - __u16 acb_pending; - - __u16 current_isb_index; - - __u8 monitor_state; - __u8 monitor_state_ready; - __u16 ring_status; - __u8 ring_status_flags; - __u8 state; - - __u8 join_state; - - __u8 slot_num; - __u16 pos_id; - - __u32 *ptr_una; - __u32 *ptr_bcn_type; - __u32 *ptr_tx_fifo_underruns; - __u32 *ptr_rx_fifo_underruns; - __u32 *ptr_rx_fifo_overruns; - __u32 *ptr_tx_fifo_overruns; - __u32 *ptr_tx_fcb_overruns; - __u32 *ptr_rx_fcb_overruns; - __u32 *ptr_tx_bdb_overruns; - __u32 *ptr_rx_bdb_overruns; - - __u16 receive_queue_number; - - __u8 rx_fifo_overrun_count; - __u8 tx_fifo_overrun_count; - - __u16 adapter_flags; - __u16 adapter_flags1; - __u16 *misc_command_data; - __u16 max_packet_size; - - __u16 config_word0; - __u16 config_word1; - - __u8 trc_mask; - - __u16 source_ring_number; - __u16 target_ring_number; - - __u16 microcode_version; - - __u16 bic_type; - __u16 nic_type; - __u16 board_id; - - __u16 rom_size; - __u32 rom_base; - __u16 ram_size; - __u16 ram_usable; - __u32 ram_base; - __u32 ram_access; - - __u16 extra_info; - __u16 mode_bits; - __u16 media_menu; - __u16 media_type; - __u16 adapter_bus; - - __u16 status; - __u16 receive_mask; - - __u16 group_address_0; - __u16 group_address[2]; - __u16 functional_address_0; - __u16 functional_address[2]; - __u16 bitwise_group_address[2]; - - __u8 cleanup; - - struct sk_buff_head SendSkbQueue; - __u16 QueueSkb; - - struct tr_statistics MacStat; /* MAC statistics structure */ - - spinlock_t lock; -} NET_LOCAL; - -/************************************ - * SNMP-ON-BOARD Agent Link Structure - ************************************/ - -typedef struct { - __u8 LnkSigStr[12]; /* signature string "SmcLinkTable" */ - __u8 LnkDrvTyp; /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */ - __u8 LnkFlg; /* 0 if no agent linked, 1 if agent linked */ - void *LnkNfo; /* routine which returns pointer to NIC info */ - void *LnkAgtRcv; /* pointer to agent receive trap entry */ - void *LnkAgtXmt; /* pointer to agent transmit trap -entry */ -void *LnkGet; /* pointer to NIC receive data -copy routine */ - void *LnkSnd; /* pointer to NIC send routine -*/ - void *LnkRst; /* pointer to NIC driver reset -routine */ - void *LnkMib; /* pointer to MIB data base */ - void *LnkMibAct; /* pointer to MIB action routine list */ - __u16 LnkCntOffset; /* offset to error counters */ - __u16 LnkCntNum; /* number of error counters */ - __u16 LnkCntSize; /* size of error counters i.e. 32 = 32 bits */ - void *LnkISR; /* pointer to interrupt vector */ - __u8 LnkFrmTyp; /* 1=Ethernet, 2=Token Ring */ - __u8 LnkDrvVer1 ; /* driver major version */ - __u8 LnkDrvVer2 ; /* driver minor version */ -} AgentLink; - -/* - * Definitions for pcm_card_flags(bit_mapped) - */ -#define REG_COMPLETE 0x0001 -#define INSERTED 0x0002 -#define PCC_INSERTED 0x0004 /* 1=currently inserted, 0=cur removed */ - -/* - * Adapter RAM test patterns - */ -#define RAM_PATTERN_1 0x55AA -#define RAM_PATTERN_2 0x9249 -#define RAM_PATTERN_3 0xDB6D - -/* - * definitions for RAM test - */ -#define ROM_SIGNATURE 0xAA55 -#define MIN_ROM_SIZE 0x2000 - -/* - * Return Codes - */ -#define SUCCESS 0x0000 -#define ADAPTER_AND_CONFIG 0x0001 -#define ADAPTER_NO_CONFIG 0x0002 -#define NOT_MY_INTERRUPT 0x0003 -#define FRAME_REJECTED 0x0004 -#define EVENTS_DISABLED 0x0005 -#define OUT_OF_RESOURCES 0x0006 -#define INVALID_PARAMETER 0x0007 -#define INVALID_FUNCTION 0x0008 -#define INITIALIZE_FAILED 0x0009 -#define CLOSE_FAILED 0x000A -#define MAX_COLLISIONS 0x000B -#define NO_SUCH_DESTINATION 0x000C -#define BUFFER_TOO_SMALL_ERROR 0x000D -#define ADAPTER_CLOSED 0x000E -#define UCODE_NOT_PRESENT 0x000F -#define FIFO_UNDERRUN 0x0010 -#define DEST_OUT_OF_RESOURCES 0x0011 -#define ADAPTER_NOT_INITIALIZED 0x0012 -#define PENDING 0x0013 -#define UCODE_PRESENT 0x0014 -#define NOT_INIT_BY_BRIDGE 0x0015 - -#define OPEN_FAILED 0x0080 -#define HARDWARE_FAILED 0x0081 -#define SELF_TEST_FAILED 0x0082 -#define RAM_TEST_FAILED 0x0083 -#define RAM_CONFLICT 0x0084 -#define ROM_CONFLICT 0x0085 -#define UNKNOWN_ADAPTER 0x0086 -#define CONFIG_ERROR 0x0087 -#define CONFIG_WARNING 0x0088 -#define NO_FIXED_CNFG 0x0089 -#define EEROM_CKSUM_ERROR 0x008A -#define ROM_SIGNATURE_ERROR 0x008B -#define ROM_CHECKSUM_ERROR 0x008C -#define ROM_SIZE_ERROR 0x008D -#define UNSUPPORTED_NIC_CHIP 0x008E -#define NIC_REG_ERROR 0x008F -#define BIC_REG_ERROR 0x0090 -#define MICROCODE_TEST_ERROR 0x0091 -#define LOBE_MEDIA_TEST_FAILED 0x0092 - -#define ADAPTER_FOUND_LAN_CORRUPT 0x009B - -#define ADAPTER_NOT_FOUND 0xFFFF - -#define ILLEGAL_FUNCTION INVALID_FUNCTION - -/* Errors */ -#define IO_BASE_INVALID 0x0001 -#define IO_BASE_RANGE 0x0002 -#define IRQ_INVALID 0x0004 -#define IRQ_RANGE 0x0008 -#define RAM_BASE_INVALID 0x0010 -#define RAM_BASE_RANGE 0x0020 -#define RAM_SIZE_RANGE 0x0040 -#define MEDIA_INVALID 0x0800 - -/* Warnings */ -#define IRQ_MISMATCH 0x0080 -#define RAM_BASE_MISMATCH 0x0100 -#define RAM_SIZE_MISMATCH 0x0200 -#define BUS_MODE_MISMATCH 0x0400 - -#define RX_CRC_ERROR 0x01 -#define RX_ALIGNMENT_ERROR 0x02 -#define RX_HW_FAILED 0x80 - -/* - * Definitions for the field RING_STATUS_FLAGS - */ -#define RING_STATUS_CHANGED 0X01 -#define MONITOR_STATE_CHANGED 0X02 -#define JOIN_STATE_CHANGED 0X04 - -/* - * Definitions for the field JOIN_STATE - */ -#define JS_BYPASS_STATE 0x00 -#define JS_LOBE_TEST_STATE 0x01 -#define JS_DETECT_MONITOR_PRESENT_STATE 0x02 -#define JS_AWAIT_NEW_MONITOR_STATE 0x03 -#define JS_DUPLICATE_ADDRESS_TEST_STATE 0x04 -#define JS_NEIGHBOR_NOTIFICATION_STATE 0x05 -#define JS_REQUEST_INITIALIZATION_STATE 0x06 -#define JS_JOIN_COMPLETE_STATE 0x07 -#define JS_BYPASS_WAIT_STATE 0x08 - -/* - * Definitions for the field MONITOR_STATE - */ -#define MS_MONITOR_FSM_INACTIVE 0x00 -#define MS_REPEAT_BEACON_STATE 0x01 -#define MS_REPEAT_CLAIM_TOKEN_STATE 0x02 -#define MS_TRANSMIT_CLAIM_TOKEN_STATE 0x03 -#define MS_STANDBY_MONITOR_STATE 0x04 -#define MS_TRANSMIT_BEACON_STATE 0x05 -#define MS_ACTIVE_MONITOR_STATE 0x06 -#define MS_TRANSMIT_RING_PURGE_STATE 0x07 -#define MS_BEACON_TEST_STATE 0x09 - -/* - * Definitions for the bit-field RING_STATUS - */ -#define SIGNAL_LOSS 0x8000 -#define HARD_ERROR 0x4000 -#define SOFT_ERROR 0x2000 -#define TRANSMIT_BEACON 0x1000 -#define LOBE_WIRE_FAULT 0x0800 -#define AUTO_REMOVAL_ERROR 0x0400 -#define REMOVE_RECEIVED 0x0100 -#define COUNTER_OVERFLOW 0x0080 -#define SINGLE_STATION 0x0040 -#define RING_RECOVERY 0x0020 - -/* - * Definitions for the field BUS_TYPE - */ -#define AT_BUS 0x00 -#define MCA_BUS 0x01 -#define EISA_BUS 0x02 -#define PCI_BUS 0x03 -#define PCMCIA_BUS 0x04 - -/* - * Definitions for adapter_flags - */ -#define RX_VALID_LOOKAHEAD 0x0001 -#define FORCED_16BIT_MODE 0x0002 -#define ADAPTER_DISABLED 0x0004 -#define TRANSMIT_CHAIN_INT 0x0008 -#define EARLY_RX_FRAME 0x0010 -#define EARLY_TX 0x0020 -#define EARLY_RX_COPY 0x0040 -#define USES_PHYSICAL_ADDR 0x0080 /* Rsvd for DEC PCI and 9232 */ -#define NEEDS_PHYSICAL_ADDR 0x0100 /* Reserved*/ -#define RX_STATUS_PENDING 0x0200 -#define ERX_DISABLED 0x0400 /* EARLY_RX_ENABLE rcv_mask */ -#define ENABLE_TX_PENDING 0x0800 -#define ENABLE_RX_PENDING 0x1000 -#define PERM_CLOSE 0x2000 -#define IO_MAPPED 0x4000 /* IOmapped bus interface 795 */ -#define ETX_DISABLED 0x8000 - - -/* - * Definitions for adapter_flags1 - */ -#define TX_PHY_RX_VIRT 0x0001 -#define NEEDS_HOST_RAM 0x0002 -#define NEEDS_MEDIA_TYPE 0x0004 -#define EARLY_RX_DONE 0x0008 -#define PNP_BOOT_BIT 0x0010 /* activates PnP & config on power-up */ - /* clear => regular PnP operation */ -#define PNP_ENABLE 0x0020 /* regular PnP operation clear => */ - /* no PnP, overrides PNP_BOOT_BIT */ -#define SATURN_ENABLE 0x0040 - -#define ADAPTER_REMOVABLE 0x0080 /* adapter is hot swappable */ -#define TX_PHY 0x0100 /* Uses physical address for tx bufs */ -#define RX_PHY 0x0200 /* Uses physical address for rx bufs */ -#define TX_VIRT 0x0400 /* Uses virtual addr for tx bufs */ -#define RX_VIRT 0x0800 -#define NEEDS_SERVICE 0x1000 - -/* - * Adapter Status Codes - */ -#define OPEN 0x0001 -#define INITIALIZED 0x0002 -#define CLOSED 0x0003 -#define FAILED 0x0005 -#define NOT_INITIALIZED 0x0006 -#define IO_CONFLICT 0x0007 -#define CARD_REMOVED 0x0008 -#define CARD_INSERTED 0x0009 - -/* - * Mode Bit Definitions - */ -#define INTERRUPT_STATUS_BIT 0x8000 /* PC Interrupt Line: 0 = Not Enabled */ -#define BOOT_STATUS_MASK 0x6000 /* Mask to isolate BOOT_STATUS */ -#define BOOT_INHIBIT 0x0000 /* BOOT_STATUS is 'inhibited' */ -#define BOOT_TYPE_1 0x2000 /* Unused BOOT_STATUS value */ -#define BOOT_TYPE_2 0x4000 /* Unused BOOT_STATUS value */ -#define BOOT_TYPE_3 0x6000 /* Unused BOOT_STATUS value */ -#define ZERO_WAIT_STATE_MASK 0x1800 /* Mask to isolate Wait State flags */ -#define ZERO_WAIT_STATE_8_BIT 0x1000 /* 0 = Disabled (Inserts Wait States) */ -#define ZERO_WAIT_STATE_16_BIT 0x0800 /* 0 = Disabled (Inserts Wait States) */ -#define LOOPING_MODE_MASK 0x0007 -#define LOOPBACK_MODE_0 0x0000 -#define LOOPBACK_MODE_1 0x0001 -#define LOOPBACK_MODE_2 0x0002 -#define LOOPBACK_MODE_3 0x0003 -#define LOOPBACK_MODE_4 0x0004 -#define LOOPBACK_MODE_5 0x0005 -#define LOOPBACK_MODE_6 0x0006 -#define LOOPBACK_MODE_7 0x0007 -#define AUTO_MEDIA_DETECT 0x0008 -#define MANUAL_CRC 0x0010 -#define EARLY_TOKEN_REL 0x0020 /* Early Token Release for Token Ring */ -#define UMAC 0x0040 -#define UTP2_PORT 0x0080 /* For 8216T2, 0=port A, 1=Port B. */ -#define BNC_10BT_INTERFACE 0x0600 /* BNC and UTP current media set */ -#define UTP_INTERFACE 0x0500 /* Ethernet UTP Only. */ -#define BNC_INTERFACE 0x0400 -#define AUI_INTERFACE 0x0300 -#define AUI_10BT_INTERFACE 0x0200 -#define STARLAN_10_INTERFACE 0x0100 -#define INTERFACE_TYPE_MASK 0x0700 - -/* - * Media Type Bit Definitions - * - * legend: TP = Twisted Pair - * STP = Shielded twisted pair - * UTP = Unshielded twisted pair - */ - -#define CNFG_MEDIA_TYPE_MASK 0x001e /* POS Register 3 Mask */ - -#define MEDIA_S10 0x0000 /* Ethernet adapter, TP. */ -#define MEDIA_AUI_UTP 0x0001 /* Ethernet adapter, AUI/UTP media */ -#define MEDIA_BNC 0x0002 /* Ethernet adapter, BNC media. */ -#define MEDIA_AUI 0x0003 /* Ethernet Adapter, AUI media. */ -#define MEDIA_STP_16 0x0004 /* TokenRing adap, 16Mbit STP. */ -#define MEDIA_STP_4 0x0005 /* TokenRing adap, 4Mbit STP. */ -#define MEDIA_UTP_16 0x0006 /* TokenRing adap, 16Mbit UTP. */ -#define MEDIA_UTP_4 0x0007 /* TokenRing adap, 4Mbit UTP. */ -#define MEDIA_UTP 0x0008 /* Ethernet adapter, UTP media (no AUI) -*/ -#define MEDIA_BNC_UTP 0x0010 /* Ethernet adapter, BNC/UTP media */ -#define MEDIA_UTPFD 0x0011 /* Ethernet adapter, TP full duplex */ -#define MEDIA_UTPNL 0x0012 /* Ethernet adapter, TP with link integrity test disabled */ -#define MEDIA_AUI_BNC 0x0013 /* Ethernet adapter, AUI/BNC media */ -#define MEDIA_AUI_BNC_UTP 0x0014 /* Ethernet adapter, AUI_BNC/UTP */ -#define MEDIA_UTPA 0x0015 /* Ethernet UTP-10Mbps Ports A */ -#define MEDIA_UTPB 0x0016 /* Ethernet UTP-10Mbps Ports B */ -#define MEDIA_STP_16_UTP_16 0x0017 /* Token Ring STP-16Mbps/UTP-16Mbps */ -#define MEDIA_STP_4_UTP_4 0x0018 /* Token Ring STP-4Mbps/UTP-4Mbps */ - -#define MEDIA_STP100_UTP100 0x0020 /* Ethernet STP-100Mbps/UTP-100Mbps */ -#define MEDIA_UTP100FD 0x0021 /* Ethernet UTP-100Mbps, full duplex */ -#define MEDIA_UTP100 0x0022 /* Ethernet UTP-100Mbps */ - - -#define MEDIA_UNKNOWN 0xFFFF /* Unknown adapter/media type */ - -/* - * Definitions for the field: - * media_type2 - */ -#define MEDIA_TYPE_MII 0x0001 -#define MEDIA_TYPE_UTP 0x0002 -#define MEDIA_TYPE_BNC 0x0004 -#define MEDIA_TYPE_AUI 0x0008 -#define MEDIA_TYPE_S10 0x0010 -#define MEDIA_TYPE_AUTO_SENSE 0x1000 -#define MEDIA_TYPE_AUTO_DETECT 0x4000 -#define MEDIA_TYPE_AUTO_NEGOTIATE 0x8000 - -/* - * Definitions for the field: - * line_speed - */ -#define LINE_SPEED_UNKNOWN 0x0000 -#define LINE_SPEED_4 0x0001 -#define LINE_SPEED_10 0x0002 -#define LINE_SPEED_16 0x0004 -#define LINE_SPEED_100 0x0008 -#define LINE_SPEED_T4 0x0008 /* 100BaseT4 aliased for 9332BVT */ -#define LINE_SPEED_FULL_DUPLEX 0x8000 - -/* - * Definitions for the field: - * bic_type (Bus interface chip type) - */ -#define BIC_NO_CHIP 0x0000 /* Bus interface chip not implemented */ -#define BIC_583_CHIP 0x0001 /* 83C583 bus interface chip */ -#define BIC_584_CHIP 0x0002 /* 83C584 bus interface chip */ -#define BIC_585_CHIP 0x0003 /* 83C585 bus interface chip */ -#define BIC_593_CHIP 0x0004 /* 83C593 bus interface chip */ -#define BIC_594_CHIP 0x0005 /* 83C594 bus interface chip */ -#define BIC_564_CHIP 0x0006 /* PCMCIA Bus interface chip */ -#define BIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */ -#define BIC_571_CHIP 0x0008 /* 83C571 EISA bus master i-face */ -#define BIC_587_CHIP 0x0009 /* Token Ring AT bus master i-face */ -#define BIC_574_CHIP 0x0010 /* FEAST bus interface chip */ -#define BIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */ -#define BIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */ -#define BIC_8432E_CHIP 0x0013 /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */ -#define BIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */ -#define BIC_C94_CHIP 0x0015 /* 91C94 bus i-face in PCMCIA mode */ -#define BIC_X8020_CHIP 0x0016 /* Xilinx PCMCIA multi-func i-face */ - -/* - * Definitions for the field: - * nic_type (Bus interface chip type) - */ -#define NIC_UNK_CHIP 0x0000 /* Unknown NIC chip */ -#define NIC_8390_CHIP 0x0001 /* DP8390 Ethernet NIC */ -#define NIC_690_CHIP 0x0002 /* 83C690 Ethernet NIC */ -#define NIC_825_CHIP 0x0003 /* 83C825 Token Ring NIC */ -/* #define NIC_???_CHIP 0x0004 */ /* Not used */ -/* #define NIC_???_CHIP 0x0005 */ /* Not used */ -/* #define NIC_???_CHIP 0x0006 */ /* Not used */ -#define NIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */ -#define NIC_C100_CHIP 0x0010 /* FEAST 100Mbps Ethernet NIC */ -#define NIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */ -#define NIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */ -#define NIC_8432E_CHIP 0x0013 /* 8432 enhanced bus iface/Ethernet NIC(DEC) */ -#define NIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */ -#define NIC_C94_CHIP 0x0015 /* 91C94 PC Card with multi func */ - -/* - * Definitions for the field: - * adapter_type The adapter_type field describes the adapter/bus - * configuration. - */ -#define BUS_ISA16_TYPE 0x0001 /* 16 bit adap in 16 bit (E)ISA slot */ -#define BUS_ISA8_TYPE 0x0002 /* 8/16b adap in 8 bit XT/(E)ISA slot */ -#define BUS_MCA_TYPE 0x0003 /* Micro Channel adapter */ - -/* - * Receive Mask definitions - */ -#define ACCEPT_MULTICAST 0x0001 -#define ACCEPT_BROADCAST 0x0002 -#define PROMISCUOUS_MODE 0x0004 -#define ACCEPT_SOURCE_ROUTING 0x0008 -#define ACCEPT_ERR_PACKETS 0x0010 -#define ACCEPT_ATT_MAC_FRAMES 0x0020 -#define ACCEPT_MULTI_PROM 0x0040 -#define TRANSMIT_ONLY 0x0080 -#define ACCEPT_EXT_MAC_FRAMES 0x0100 -#define EARLY_RX_ENABLE 0x0200 -#define PKT_SIZE_NOT_NEEDED 0x0400 -#define ACCEPT_SOURCE_ROUTING_SPANNING 0x0808 - -#define ACCEPT_ALL_MAC_FRAMES 0x0120 - -/* - * config_mode defs - */ -#define STORE_EEROM 0x0001 /* Store config in EEROM. */ -#define STORE_REGS 0x0002 /* Store config in register set. */ - -/* - * equates for lmac_flags in adapter structure (Ethernet) - */ -#define MEM_DISABLE 0x0001 -#define RX_STATUS_POLL 0x0002 -#define USE_RE_BIT 0x0004 -/*#define RESERVED 0x0008 */ -/*#define RESERVED 0x0010 */ -/*#define RESERVED 0x0020 */ -/*#define RESERVED 0x0040 */ -/*#define RESERVED 0x0080 */ -/*#define RESERVED 0x0100 */ -/*#define RESERVED 0x0200 */ -/*#define RESERVED 0x0400 */ -/*#define RESERVED 0x0800 */ -/*#define RESERVED 0x1000 */ -/*#define RESERVED 0x2000 */ -/*#define RESERVED 0x4000 */ -/*#define RESERVED 0x8000 */ - -/* media_opts & media_set Fields bit defs for Ethernet ... */ -#define MED_OPT_BNC 0x01 -#define MED_OPT_UTP 0x02 -#define MED_OPT_AUI 0x04 -#define MED_OPT_10MB 0x08 -#define MED_OPT_100MB 0x10 -#define MED_OPT_S10 0x20 - -/* media_opts & media_set Fields bit defs for Token Ring ... */ -#define MED_OPT_4MB 0x08 -#define MED_OPT_16MB 0x10 -#define MED_OPT_STP 0x40 - -#define MAX_8023_SIZE 1500 /* Max 802.3 size of frame. */ -#define DEFAULT_ERX_VALUE 4 /* Number of 16-byte blocks for 790B early Rx. */ -#define DEFAULT_ETX_VALUE 32 /* Number of bytes for 790B early Tx. */ -#define DEFAULT_TX_RETRIES 3 /* Number of transmit retries */ -#define LPBK_FRAME_SIZE 1024 /* Default loopback frame for Rx calibration test. */ -#define MAX_LOOKAHEAD_SIZE 252 /* Max lookahead size for ethernet. */ - -#define RW_MAC_STATE 0x1101 -#define RW_SA_OF_LAST_AMP_OR_SMP 0x2803 -#define RW_PHYSICAL_DROP_NUMBER 0x3B02 -#define RW_UPSTREAM_NEIGHBOR_ADDRESS 0x3E03 -#define RW_PRODUCT_INSTANCE_ID 0x4B09 - -#define RW_TRC_STATUS_BLOCK 0x5412 - -#define RW_MAC_ERROR_COUNTERS_NO_CLEAR 0x8006 -#define RW_MAC_ERROR_COUNTER_CLEAR 0x7A06 -#define RW_CONFIG_REGISTER_0 0xA001 -#define RW_CONFIG_REGISTER_1 0xA101 -#define RW_PRESCALE_TIMER_THRESHOLD 0xA201 -#define RW_TPT_THRESHOLD 0xA301 -#define RW_TQP_THRESHOLD 0xA401 -#define RW_TNT_THRESHOLD 0xA501 -#define RW_TBT_THRESHOLD 0xA601 -#define RW_TSM_THRESHOLD 0xA701 -#define RW_TAM_THRESHOLD 0xA801 -#define RW_TBR_THRESHOLD 0xA901 -#define RW_TER_THRESHOLD 0xAA01 -#define RW_TGT_THRESHOLD 0xAB01 -#define RW_THT_THRESHOLD 0xAC01 -#define RW_TRR_THRESHOLD 0xAD01 -#define RW_TVX_THRESHOLD 0xAE01 -#define RW_INDIVIDUAL_MAC_ADDRESS 0xB003 - -#define RW_INDIVIDUAL_GROUP_ADDRESS 0xB303 /* all of group addr */ -#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301 /* 1st word of group addr */ -#define RW_INDIVIDUAL_GROUP_ADDR 0xB402 /* 2nd-3rd word of group addr */ -#define RW_FUNCTIONAL_ADDRESS 0xB603 /* all of functional addr */ -#define RW_FUNCTIONAL_ADDR_WORD_0 0xB601 /* 1st word of func addr */ -#define RW_FUNCTIONAL_ADDR 0xB702 /* 2nd-3rd word func addr */ - -#define RW_BIT_SIGNIFICANT_GROUP_ADDR 0xB902 -#define RW_SOURCE_RING_BRIDGE_NUMBER 0xBB01 -#define RW_TARGET_RING_NUMBER 0xBC01 - -#define RW_HIC_INTERRUPT_MASK 0xC601 - -#define SOURCE_ROUTING_SPANNING_BITS 0x00C0 /* Spanning Tree Frames */ -#define SOURCE_ROUTING_EXPLORER_BIT 0x0040 /* Explorer and Single Route */ - - /* write */ - -#define CSR_MSK_ALL 0x80 // Bic 587 Only -#define CSR_MSKTINT 0x20 -#define CSR_MSKCBUSY 0x10 -#define CSR_CLRTINT 0x08 -#define CSR_CLRCBUSY 0x04 -#define CSR_WCSS 0x02 -#define CSR_CA 0x01 - - /* read */ - -#define CSR_TINT 0x20 -#define CSR_CINT 0x10 -#define CSR_TSTAT 0x08 -#define CSR_CSTAT 0x04 -#define CSR_FAULT 0x02 -#define CSR_CBUSY 0x01 - -#define LAAR_MEM16ENB 0x80 -#define Zws16 0x20 - -#define IRR_IEN 0x80 -#define Zws8 0x01 - -#define IMCCR_EIL 0x04 - -typedef struct { - __u8 ac; /* Access Control */ - __u8 fc; /* Frame Control */ - __u8 da[6]; /* Dest Addr */ - __u8 sa[6]; /* Source Addr */ - - __u16 vl; /* Vector Length */ - __u8 dc_sc; /* Dest/Source Class */ - __u8 vc; /* Vector Code */ - } MAC_HEADER; - -#define MAX_SUB_VECTOR_INFO (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2) - -typedef struct - { - __u8 svl; /* Sub-vector Length */ - __u8 svi; /* Sub-vector Code */ - __u8 svv[MAX_SUB_VECTOR_INFO]; /* Sub-vector Info */ - } MAC_SUB_VECTOR; - -#endif /* __KERNEL__ */ -#endif /* __LINUX_SMCTR_H */ diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c deleted file mode 100644 index be4813e0366..00000000000 --- a/drivers/net/tokenring/tms380tr.c +++ /dev/null @@ -1,2306 +0,0 @@ -/* - * tms380tr.c: A network driver library for Texas Instruments TMS380-based - * Token Ring Adapters. - * - * Originally sktr.c: Written 1997 by Christoph Goos - * - * A fine result of the Linux Systems Network Architecture Project. - * http://www.vanheusden.com/sna/ - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * The following modules are currently available for card support: - * - tmspci (Generic PCI card support) - * - abyss (Madge PCI support) - * - tmsisa (SysKonnect TR4/16 ISA) - * - * Sources: - * - The hardware related parts of this driver are take from - * the SysKonnect Token Ring driver for Windows NT. - * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this - * driver, as well as the 'skeleton.c' driver by Donald Becker. - * - Also various other drivers in the linux source tree were taken - * as samples for some tasks. - * - TI TMS380 Second-Generation Token Ring User's Guide - * - TI datasheets for respective chips - * - David Hein at Texas Instruments - * - Various Madge employees - * - * Maintainer(s): - * JS Jay Schulist jschlst@samba.org - * CG Christoph Goos cgoos@syskonnect.de - * AF Adam Fritzler - * MLP Mike Phillips phillim@amtrak.com - * JF Jochen Friedrich jochen@scram.de - * - * Modification History: - * 29-Aug-97 CG Created - * 04-Apr-98 CG Fixed problems caused by tok_timer_check - * 10-Apr-98 CG Fixed lockups at cable disconnection - * 27-May-98 JS Formated to Linux Kernel Format - * 31-May-98 JS Hacked in PCI support - * 16-Jun-98 JS Modulized for multiple cards with one driver - * Sep-99 AF Renamed to tms380tr (supports more than SK's) - * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support - * Fixed a bug causing double copies on PCI - * Fixed for new multicast stuff (2.2/2.3) - * 25-Sep-99 AF Uped TPL_NUM from 3 to 9 - * Removed extraneous 'No free TPL' - * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized - * parts of the initilization procedure. - * 30-Dec-99 AF Turned tms380tr into a library ala 8390. - * Madge support is provided in the abyss module - * Generic PCI support is in the tmspci module. - * 30-Nov-00 JF Updated PCI code to support IO MMU via - * pci_map_static(). Alpha uses this MMU for ISA - * as well. - * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some - * cleanup. - * 13-Jan-02 JF Add spinlock to fix race condition. - * 09-Nov-02 JF Fixed printks to not SPAM the console during - * normal operation. - * 30-Dec-02 JF Removed incorrect __init from - * tms380tr_init_card. - * 22-Jul-05 JF Converted to dma-mapping. - * - * To do: - * 1. Multi/Broadcast packet handling (this may have fixed itself) - * 2. Write a sktrisa module that includes the old ISA support (done) - * 3. Allow modules to load their own microcode - * 4. Speed up the BUD process -- freezing the kernel for 3+sec is - * quite unacceptable. - * 5. Still a few remaining stalls when the cable is unplugged. - */ - -#ifdef MODULE -static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n"; -#endif - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/time.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/trdevice.h> -#include <linux/firmware.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/irq.h> -#include <asm/uaccess.h> - -#include "tms380tr.h" /* Our Stuff */ - -/* Use 0 for production, 1 for verification, 2 for debug, and - * 3 for very verbose debug. - */ -#ifndef TMS380TR_DEBUG -#define TMS380TR_DEBUG 0 -#endif -static unsigned int tms380tr_debug = TMS380TR_DEBUG; - -/* Index to functions, as function prototypes. - * Alphabetical by function name. - */ - -/* "A" */ -/* "B" */ -static int tms380tr_bringup_diags(struct net_device *dev); -/* "C" */ -static void tms380tr_cancel_tx_queue(struct net_local* tp); -static int tms380tr_chipset_init(struct net_device *dev); -static void tms380tr_chk_irq(struct net_device *dev); -static void tms380tr_chk_outstanding_cmds(struct net_device *dev); -static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr); -static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType); -int tms380tr_close(struct net_device *dev); -static void tms380tr_cmd_status_irq(struct net_device *dev); -/* "D" */ -static void tms380tr_disable_interrupts(struct net_device *dev); -#if TMS380TR_DEBUG > 0 -static void tms380tr_dump(unsigned char *Data, int length); -#endif -/* "E" */ -static void tms380tr_enable_interrupts(struct net_device *dev); -static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command); -static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue); -/* "F" */ -/* "G" */ -static struct net_device_stats *tms380tr_get_stats(struct net_device *dev); -/* "H" */ -static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, - struct net_device *dev); -/* "I" */ -static int tms380tr_init_adapter(struct net_device *dev); -static void tms380tr_init_ipb(struct net_local *tp); -static void tms380tr_init_net_local(struct net_device *dev); -static void tms380tr_init_opb(struct net_device *dev); -/* "M" */ -/* "O" */ -int tms380tr_open(struct net_device *dev); -static void tms380tr_open_adapter(struct net_device *dev); -/* "P" */ -/* "R" */ -static void tms380tr_rcv_status_irq(struct net_device *dev); -static int tms380tr_read_ptr(struct net_device *dev); -static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, - unsigned short Address, int Length); -static int tms380tr_reset_adapter(struct net_device *dev); -static void tms380tr_reset_interrupt(struct net_device *dev); -static void tms380tr_ring_status_irq(struct net_device *dev); -/* "S" */ -static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, - struct net_device *dev); -static void tms380tr_set_multicast_list(struct net_device *dev); -static int tms380tr_set_mac_address(struct net_device *dev, void *addr); -/* "T" */ -static void tms380tr_timer_chk(unsigned long data); -static void tms380tr_timer_end_wait(unsigned long data); -static void tms380tr_tx_status_irq(struct net_device *dev); -/* "U" */ -static void tms380tr_update_rcv_stats(struct net_local *tp, - unsigned char DataPtr[], unsigned int Length); -/* "W" */ -void tms380tr_wait(unsigned long time); -static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status); -static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status); - -#define SIFREADB(reg) \ - (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg)) -#define SIFWRITEB(val, reg) \ - (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg)) -#define SIFREADW(reg) \ - (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg)) -#define SIFWRITEW(val, reg) \ - (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg)) - - - -#if 0 /* TMS380TR_DEBUG > 0 */ -static int madgemc_sifprobe(struct net_device *dev) -{ - unsigned char old, chk1, chk2; - - old = SIFREADB(SIFADR); /* Get the old SIFADR value */ - - chk1 = 0; /* Begin with check value 0 */ - do { - madgemc_setregpage(dev, 0); - /* Write new SIFADR value */ - SIFWRITEB(chk1, SIFADR); - chk2 = SIFREADB(SIFADR); - if (chk2 != chk1) - return -1; - - madgemc_setregpage(dev, 1); - /* Read, invert and write */ - chk2 = SIFREADB(SIFADD); - if (chk2 != chk1) - return -1; - - madgemc_setregpage(dev, 0); - chk2 ^= 0x0FE; - SIFWRITEB(chk2, SIFADR); - - /* Read, invert and compare */ - madgemc_setregpage(dev, 1); - chk2 = SIFREADB(SIFADD); - madgemc_setregpage(dev, 0); - chk2 ^= 0x0FE; - - if(chk1 != chk2) - return -1; /* No adapter */ - chk1 -= 2; - } while(chk1 != 0); /* Repeat 128 times (all byte values) */ - - madgemc_setregpage(dev, 0); /* sanity */ - /* Restore the SIFADR value */ - SIFWRITEB(old, SIFADR); - - return 0; -} -#endif - -/* - * Open/initialize the board. This is called sometime after - * booting when the 'ifconfig' program is run. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is non-reboot way to recover if something goes wrong. - */ -int tms380tr_open(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - /* init the spinlock */ - spin_lock_init(&tp->lock); - init_timer(&tp->timer); - - /* Reset the hardware here. Don't forget to set the station address. */ - -#ifdef CONFIG_ISA - if(dev->dma > 0) - { - unsigned long flags=claim_dma_lock(); - disable_dma(dev->dma); - set_dma_mode(dev->dma, DMA_MODE_CASCADE); - enable_dma(dev->dma); - release_dma_lock(flags); - } -#endif - - err = tms380tr_chipset_init(dev); - if(err) - { - printk(KERN_INFO "%s: Chipset initialization error\n", - dev->name); - return -1; - } - - tp->timer.expires = jiffies + 30*HZ; - tp->timer.function = tms380tr_timer_end_wait; - tp->timer.data = (unsigned long)dev; - add_timer(&tp->timer); - - printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n", - dev->name, tms380tr_read_ptr(dev)); - - tms380tr_enable_interrupts(dev); - tms380tr_open_adapter(dev); - - netif_start_queue(dev); - - /* Wait for interrupt from hardware. If interrupt does not come, - * there will be a timeout from the timer. - */ - tp->Sleeping = 1; - interruptible_sleep_on(&tp->wait_for_tok_int); - del_timer(&tp->timer); - - /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */ - if(tp->AdapterVirtOpenFlag == 0) - { - tms380tr_disable_interrupts(dev); - return -1; - } - - tp->StartTime = jiffies; - - /* Start function control timer */ - tp->timer.expires = jiffies + 2*HZ; - tp->timer.function = tms380tr_timer_chk; - tp->timer.data = (unsigned long)dev; - add_timer(&tp->timer); - - return 0; -} - -/* - * Timeout function while waiting for event - */ -static void tms380tr_timer_end_wait(unsigned long data) -{ - struct net_device *dev = (struct net_device*)data; - struct net_local *tp = netdev_priv(dev); - - if(tp->Sleeping) - { - tp->Sleeping = 0; - wake_up_interruptible(&tp->wait_for_tok_int); - } -} - -/* - * Initialize the chipset - */ -static int tms380tr_chipset_init(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int err; - - tms380tr_init_ipb(tp); - tms380tr_init_opb(dev); - tms380tr_init_net_local(dev); - - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name); - err = tms380tr_reset_adapter(dev); - if(err < 0) - return -1; - - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name); - err = tms380tr_bringup_diags(dev); - if(err < 0) - return -1; - - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Init adapter...\n", dev->name); - err = tms380tr_init_adapter(dev); - if(err < 0) - return -1; - - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Done!\n", dev->name); - return 0; -} - -/* - * Initializes the net_local structure. - */ -static void tms380tr_init_net_local(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - int i; - dma_addr_t dmabuf; - - tp->scb.CMD = 0; - tp->scb.Parm[0] = 0; - tp->scb.Parm[1] = 0; - - tp->ssb.STS = 0; - tp->ssb.Parm[0] = 0; - tp->ssb.Parm[1] = 0; - tp->ssb.Parm[2] = 0; - - tp->CMDqueue = 0; - - tp->AdapterOpenFlag = 0; - tp->AdapterVirtOpenFlag = 0; - tp->ScbInUse = 0; - tp->OpenCommandIssued = 0; - tp->ReOpenInProgress = 0; - tp->HaltInProgress = 0; - tp->TransmitHaltScheduled = 0; - tp->LobeWireFaultLogged = 0; - tp->LastOpenStatus = 0; - tp->MaxPacketSize = DEFAULT_PACKET_SIZE; - - /* Create circular chain of transmit lists */ - for (i = 0; i < TPL_NUM; i++) - { - tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ - tp->Tpl[i].Status = 0; - tp->Tpl[i].FrameSize = 0; - tp->Tpl[i].FragList[0].DataCount = 0; - tp->Tpl[i].FragList[0].DataAddr = 0; - tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM]; - tp->Tpl[i].MData = NULL; - tp->Tpl[i].TPLIndex = i; - tp->Tpl[i].DMABuff = 0; - tp->Tpl[i].BusyFlag = 0; - } - - tp->TplFree = tp->TplBusy = &tp->Tpl[0]; - - /* Create circular chain of receive lists */ - for (i = 0; i < RPL_NUM; i++) - { - tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */ - tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); - tp->Rpl[i].FrameSize = 0; - tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); - - /* Alloc skb and point adapter to data area */ - tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize); - tp->Rpl[i].DMABuff = 0; - - /* skb == NULL ? then use local buffer */ - if(tp->Rpl[i].Skb == NULL) - { - tp->Rpl[i].SkbStat = SKB_UNAVAILABLE; - tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); - tp->Rpl[i].MData = tp->LocalRxBuffers[i]; - } - else /* SKB != NULL */ - { - tp->Rpl[i].Skb->dev = dev; - skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); - - /* data unreachable for DMA ? then use local buffer */ - dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); - if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) - { - tp->Rpl[i].SkbStat = SKB_DATA_COPY; - tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer); - tp->Rpl[i].MData = tp->LocalRxBuffers[i]; - } - else /* DMA directly in skb->data */ - { - tp->Rpl[i].SkbStat = SKB_DMA_DIRECT; - tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf); - tp->Rpl[i].MData = tp->Rpl[i].Skb->data; - tp->Rpl[i].DMABuff = dmabuf; - } - } - - tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM]; - tp->Rpl[i].RPLIndex = i; - } - - tp->RplHead = &tp->Rpl[0]; - tp->RplTail = &tp->Rpl[RPL_NUM-1]; - tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); -} - -/* - * Initializes the initialisation parameter block. - */ -static void tms380tr_init_ipb(struct net_local *tp) -{ - tp->ipb.Init_Options = BURST_MODE; - tp->ipb.CMD_Status_IV = 0; - tp->ipb.TX_IV = 0; - tp->ipb.RX_IV = 0; - tp->ipb.Ring_Status_IV = 0; - tp->ipb.SCB_Clear_IV = 0; - tp->ipb.Adapter_CHK_IV = 0; - tp->ipb.RX_Burst_Size = BURST_SIZE; - tp->ipb.TX_Burst_Size = BURST_SIZE; - tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES; - tp->ipb.SCB_Addr = 0; - tp->ipb.SSB_Addr = 0; -} - -/* - * Initializes the open parameter block. - */ -static void tms380tr_init_opb(struct net_device *dev) -{ - struct net_local *tp; - unsigned long Addr; - unsigned short RplSize = RPL_SIZE; - unsigned short TplSize = TPL_SIZE; - unsigned short BufferSize = BUFFER_SIZE; - int i; - - tp = netdev_priv(dev); - - tp->ocpl.OPENOptions = 0; - tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION; - tp->ocpl.FullDuplex = 0; - tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF; - - /* - * Set node address - * - * We go ahead and put it in the OPB even though on - * most of the generic adapters this isn't required. - * Its simpler this way. -- ASF - */ - for (i=0;i<6;i++) - tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i]; - - tp->ocpl.GroupAddr = 0; - tp->ocpl.FunctAddr = 0; - tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize); - tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize); - tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize); - tp->ocpl.Reserved = 0; - tp->ocpl.TXBufMin = TX_BUF_MIN; - tp->ocpl.TXBufMax = TX_BUF_MAX; - - Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer); - - tp->ocpl.ProdIDAddr[0] = LOWORD(Addr); - tp->ocpl.ProdIDAddr[1] = HIWORD(Addr); -} - -/* - * Send OPEN command to adapter - */ -static void tms380tr_open_adapter(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - if(tp->OpenCommandIssued) - return; - - tp->OpenCommandIssued = 1; - tms380tr_exec_cmd(dev, OC_OPEN); -} - -/* - * Clear the adapter's interrupt flag. Clear system interrupt enable - * (SINTEN): disable adapter to system interrupts. - */ -static void tms380tr_disable_interrupts(struct net_device *dev) -{ - SIFWRITEB(0, SIFACL); -} - -/* - * Set the adapter's interrupt flag. Set system interrupt enable - * (SINTEN): enable adapter to system interrupts. - */ -static void tms380tr_enable_interrupts(struct net_device *dev) -{ - SIFWRITEB(ACL_SINTEN, SIFACL); -} - -/* - * Put command in command queue, try to execute it. - */ -static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command) -{ - struct net_local *tp = netdev_priv(dev); - - tp->CMDqueue |= Command; - tms380tr_chk_outstanding_cmds(dev); -} - -static void tms380tr_timeout(struct net_device *dev) -{ - /* - * If we get here, some higher level has decided we are broken. - * There should really be a "kick me" function call instead. - * - * Resetting the token ring adapter takes a long time so just - * fake transmission time and go on trying. Our own timeout - * routine is in tms380tr_timer_chk() - */ - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * Gets skb from system, queues it and checks if it can be sent - */ -static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - netdev_tx_t rc; - - rc = tms380tr_hardware_send_packet(skb, dev); - if(tp->TplFree->NextTPLPtr->BusyFlag) - netif_stop_queue(dev); - return rc; -} - -/* - * Move frames into adapter tx queue - */ -static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - TPL *tpl; - short length; - unsigned char *buf; - unsigned long flags; - int i; - dma_addr_t dmabuf, newbuf; - struct net_local *tp = netdev_priv(dev); - - /* Try to get a free TPL from the chain. - * - * NOTE: We *must* always leave one unused TPL in the chain, - * because otherwise the adapter might send frames twice. - */ - spin_lock_irqsave(&tp->lock, flags); - if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */ - if (tms380tr_debug > 0) - printk(KERN_DEBUG "%s: No free TPL\n", dev->name); - spin_unlock_irqrestore(&tp->lock, flags); - return NETDEV_TX_BUSY; - } - - dmabuf = 0; - - /* Is buffer reachable for Busmaster-DMA? */ - - length = skb->len; - dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE); - if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { - /* Copy frame to local buffer */ - dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE); - dmabuf = 0; - i = tp->TplFree->TPLIndex; - buf = tp->LocalTxBuffers[i]; - skb_copy_from_linear_data(skb, buf, length); - newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; - } - else { - /* Send direct from skb->data */ - newbuf = dmabuf; - buf = skb->data; - } - /* Source address in packet? */ - tms380tr_chk_src_addr(buf, dev->dev_addr); - tp->LastSendTime = jiffies; - tpl = tp->TplFree; /* Get the "free" TPL */ - tpl->BusyFlag = 1; /* Mark TPL as busy */ - tp->TplFree = tpl->NextTPLPtr; - - /* Save the skb for delayed return of skb to system */ - tpl->Skb = skb; - tpl->DMABuff = dmabuf; - tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length); - tpl->FragList[0].DataAddr = htonl(newbuf); - - /* Write the data length in the transmit list. */ - tpl->FrameSize = cpu_to_be16((unsigned short)length); - tpl->MData = buf; - - /* Transmit the frame and set the status values. */ - tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME - | TX_END_FRAME | TX_PASS_SRC_ADDR - | TX_FRAME_IRQ); - - /* Let adapter send the frame. */ - tms380tr_exec_sifcmd(dev, CMD_TX_VALID); - spin_unlock_irqrestore(&tp->lock, flags); - - return NETDEV_TX_OK; -} - -/* - * Write the given value to the 'Status' field of the specified TPL. - * NOTE: This function should be used whenever the status of any TPL must be - * modified by the driver, because the compiler may otherwise change the - * order of instructions such that writing the TPL status may be executed at - * an undesirable time. When this function is used, the status is always - * written when the function is called. - */ -static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status) -{ - tpl->Status = Status; -} - -static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr) -{ - unsigned char SRBit; - - if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */ - return; - if((unsigned short)frame[12] != 0) /* Compare 2 bytes */ - return; - - SRBit = frame[8] & 0x80; - memcpy(&frame[8], hw_addr, 6); - frame[8] |= SRBit; -} - -/* - * The timer routine: Check if adapter still open and working, reopen if not. - */ -static void tms380tr_timer_chk(unsigned long data) -{ - struct net_device *dev = (struct net_device*)data; - struct net_local *tp = netdev_priv(dev); - - if(tp->HaltInProgress) - return; - - tms380tr_chk_outstanding_cmds(dev); - if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) && - (tp->TplFree != tp->TplBusy)) - { - /* Anything to send, but stalled too long */ - tp->LastSendTime = jiffies; - tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */ - } - - tp->timer.expires = jiffies + 2*HZ; - add_timer(&tp->timer); - - if(tp->AdapterOpenFlag || tp->ReOpenInProgress) - return; - tp->ReOpenInProgress = 1; - tms380tr_open_adapter(dev); -} - -/* - * The typical workload of the driver: Handle the network interface interrupts. - */ -irqreturn_t tms380tr_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *tp; - unsigned short irq_type; - int handled = 0; - - tp = netdev_priv(dev); - - irq_type = SIFREADW(SIFSTS); - - while(irq_type & STS_SYSTEM_IRQ) { - handled = 1; - irq_type &= STS_IRQ_MASK; - - if(!tms380tr_chk_ssb(tp, irq_type)) { - printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name); - break; - } - - switch(irq_type) { - case STS_IRQ_RECEIVE_STATUS: - tms380tr_reset_interrupt(dev); - tms380tr_rcv_status_irq(dev); - break; - - case STS_IRQ_TRANSMIT_STATUS: - /* Check if TRANSMIT.HALT command is complete */ - if(tp->ssb.Parm[0] & COMMAND_COMPLETE) { - tp->TransmitCommandActive = 0; - tp->TransmitHaltScheduled = 0; - - /* Issue a new transmit command. */ - tms380tr_exec_cmd(dev, OC_TRANSMIT); - } - - tms380tr_reset_interrupt(dev); - tms380tr_tx_status_irq(dev); - break; - - case STS_IRQ_COMMAND_STATUS: - /* The SSB contains status of last command - * other than receive/transmit. - */ - tms380tr_cmd_status_irq(dev); - break; - - case STS_IRQ_SCB_CLEAR: - /* The SCB is free for another command. */ - tp->ScbInUse = 0; - tms380tr_chk_outstanding_cmds(dev); - break; - - case STS_IRQ_RING_STATUS: - tms380tr_ring_status_irq(dev); - break; - - case STS_IRQ_ADAPTER_CHECK: - tms380tr_chk_irq(dev); - break; - - case STS_IRQ_LLC_STATUS: - printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n"); - break; - - case STS_IRQ_TIMER: - printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n"); - break; - - case STS_IRQ_RECEIVE_PENDING: - printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n"); - break; - - default: - printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type); - break; - } - - /* Reset system interrupt if not already done. */ - if(irq_type != STS_IRQ_TRANSMIT_STATUS && - irq_type != STS_IRQ_RECEIVE_STATUS) { - tms380tr_reset_interrupt(dev); - } - - irq_type = SIFREADW(SIFSTS); - } - - return IRQ_RETVAL(handled); -} - -/* - * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command. - */ -static void tms380tr_reset_interrupt(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - SSB *ssb = &tp->ssb; - - /* - * [Workaround for "Data Late"] - * Set all fields of the SSB to well-defined values so we can - * check if the adapter has written the SSB. - */ - - ssb->STS = (unsigned short) -1; - ssb->Parm[0] = (unsigned short) -1; - ssb->Parm[1] = (unsigned short) -1; - ssb->Parm[2] = (unsigned short) -1; - - /* Free SSB by issuing SSB_CLEAR command after reading IRQ code - * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts. - */ - tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ); -} - -/* - * Check if the SSB has actually been written by the adapter. - */ -static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType) -{ - SSB *ssb = &tp->ssb; /* The address of the SSB. */ - - /* C 0 1 2 INTERRUPT CODE - * - - - - -------------- - * 1 1 1 1 TRANSMIT STATUS - * 1 1 1 1 RECEIVE STATUS - * 1 ? ? 0 COMMAND STATUS - * 0 0 0 0 SCB CLEAR - * 1 1 0 0 RING STATUS - * 0 0 0 0 ADAPTER CHECK - * - * 0 = SSB field not affected by interrupt - * 1 = SSB field is affected by interrupt - * - * C = SSB ADDRESS +0: COMMAND - * 0 = SSB ADDRESS +2: STATUS 0 - * 1 = SSB ADDRESS +4: STATUS 1 - * 2 = SSB ADDRESS +6: STATUS 2 - */ - - /* Check if this interrupt does use the SSB. */ - - if(IrqType != STS_IRQ_TRANSMIT_STATUS && - IrqType != STS_IRQ_RECEIVE_STATUS && - IrqType != STS_IRQ_COMMAND_STATUS && - IrqType != STS_IRQ_RING_STATUS) - { - return 1; /* SSB not involved. */ - } - - /* Note: All fields of the SSB have been set to all ones (-1) after it - * has last been used by the software (see DriverIsr()). - * - * Check if the affected SSB fields are still unchanged. - */ - - if(ssb->STS == (unsigned short) -1) - return 0; /* Command field not yet available. */ - if(IrqType == STS_IRQ_COMMAND_STATUS) - return 1; /* Status fields not always affected. */ - if(ssb->Parm[0] == (unsigned short) -1) - return 0; /* Status 1 field not yet available. */ - if(IrqType == STS_IRQ_RING_STATUS) - return 1; /* Status 2 & 3 fields not affected. */ - - /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */ - if(ssb->Parm[1] == (unsigned short) -1) - return 0; /* Status 2 field not yet available. */ - if(ssb->Parm[2] == (unsigned short) -1) - return 0; /* Status 3 field not yet available. */ - - return 1; /* All SSB fields have been written by the adapter. */ -} - -/* - * Evaluates the command results status in the SSB status field. - */ -static void tms380tr_cmd_status_irq(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned short ssb_cmd, ssb_parm_0; - unsigned short ssb_parm_1; - char *open_err = "Open error -"; - char *code_err = "Open code -"; - - /* Copy the ssb values to local variables */ - ssb_cmd = tp->ssb.STS; - ssb_parm_0 = tp->ssb.Parm[0]; - ssb_parm_1 = tp->ssb.Parm[1]; - - if(ssb_cmd == OPEN) - { - tp->Sleeping = 0; - if(!tp->ReOpenInProgress) - wake_up_interruptible(&tp->wait_for_tok_int); - - tp->OpenCommandIssued = 0; - tp->ScbInUse = 0; - - if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION) - { - /* Success, the adapter is open. */ - tp->LobeWireFaultLogged = 0; - tp->AdapterOpenFlag = 1; - tp->AdapterVirtOpenFlag = 1; - tp->TransmitCommandActive = 0; - tms380tr_exec_cmd(dev, OC_TRANSMIT); - tms380tr_exec_cmd(dev, OC_RECEIVE); - - if(tp->ReOpenInProgress) - tp->ReOpenInProgress = 0; - - return; - } - else /* The adapter did not open. */ - { - if(ssb_parm_0 & NODE_ADDR_ERROR) - printk(KERN_INFO "%s: Node address error\n", - dev->name); - if(ssb_parm_0 & LIST_SIZE_ERROR) - printk(KERN_INFO "%s: List size error\n", - dev->name); - if(ssb_parm_0 & BUF_SIZE_ERROR) - printk(KERN_INFO "%s: Buffer size error\n", - dev->name); - if(ssb_parm_0 & TX_BUF_COUNT_ERROR) - printk(KERN_INFO "%s: Tx buffer count error\n", - dev->name); - if(ssb_parm_0 & INVALID_OPEN_OPTION) - printk(KERN_INFO "%s: Invalid open option\n", - dev->name); - if(ssb_parm_0 & OPEN_ERROR) - { - /* Show the open phase. */ - switch(ssb_parm_0 & OPEN_PHASES_MASK) - { - case LOBE_MEDIA_TEST: - if(!tp->LobeWireFaultLogged) - { - tp->LobeWireFaultLogged = 1; - printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err); - } - tp->ReOpenInProgress = 1; - tp->AdapterOpenFlag = 0; - tp->AdapterVirtOpenFlag = 1; - tms380tr_open_adapter(dev); - return; - - case PHYSICAL_INSERTION: - printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err); - break; - - case ADDRESS_VERIFICATION: - printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err); - break; - - case PARTICIPATION_IN_RING_POLL: - printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err); - break; - - case REQUEST_INITIALISATION: - printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err); - break; - - case FULLDUPLEX_CHECK: - printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err); - break; - - default: - printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err); - break; - } - - /* Show the open errors. */ - switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK) - { - case OPEN_FUNCTION_FAILURE: - printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err); - tp->LastOpenStatus = - OPEN_FUNCTION_FAILURE; - break; - - case OPEN_SIGNAL_LOSS: - printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_SIGNAL_LOSS; - break; - - case OPEN_TIMEOUT: - printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_TIMEOUT; - break; - - case OPEN_RING_FAILURE: - printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_RING_FAILURE; - break; - - case OPEN_RING_BEACONING: - printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_RING_BEACONING; - break; - - case OPEN_DUPLICATE_NODEADDR: - printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_DUPLICATE_NODEADDR; - break; - - case OPEN_REQUEST_INIT: - printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_REQUEST_INIT; - break; - - case OPEN_REMOVE_RECEIVED: - printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err); - tp->LastOpenStatus = - OPEN_REMOVE_RECEIVED; - break; - - case OPEN_FULLDUPLEX_SET: - printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err); - tp->LastOpenStatus = - OPEN_FULLDUPLEX_SET; - break; - - default: - printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err); - tp->LastOpenStatus = - OPEN_FUNCTION_FAILURE; - break; - } - } - - tp->AdapterOpenFlag = 0; - tp->AdapterVirtOpenFlag = 0; - - return; - } - } - else - { - if(ssb_cmd != READ_ERROR_LOG) - return; - - /* Add values from the error log table to the MAC - * statistics counters and update the errorlogtable - * memory. - */ - tp->MacStat.line_errors += tp->errorlogtable.Line_Error; - tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error; - tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error; - tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error; - tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error; - tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error; - tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error; - tp->MacStat.token_errors += tp->errorlogtable.Token_Error; - tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error; - tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error; - tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters; - tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error; - tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error; - } -} - -/* - * The inverse routine to tms380tr_open(). - */ -int tms380tr_close(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - netif_stop_queue(dev); - - del_timer(&tp->timer); - - /* Flush the Tx and disable Rx here. */ - - tp->HaltInProgress = 1; - tms380tr_exec_cmd(dev, OC_CLOSE); - tp->timer.expires = jiffies + 1*HZ; - tp->timer.function = tms380tr_timer_end_wait; - tp->timer.data = (unsigned long)dev; - add_timer(&tp->timer); - - tms380tr_enable_interrupts(dev); - - tp->Sleeping = 1; - interruptible_sleep_on(&tp->wait_for_tok_int); - tp->TransmitCommandActive = 0; - - del_timer(&tp->timer); - tms380tr_disable_interrupts(dev); - -#ifdef CONFIG_ISA - if(dev->dma > 0) - { - unsigned long flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - } -#endif - - SIFWRITEW(0xFF00, SIFCMD); -#if 0 - if(dev->dma > 0) /* what the? */ - SIFWRITEB(0xff, POSREG); -#endif - tms380tr_cancel_tx_queue(tp); - - return 0; -} - -/* - * Get the current statistics. This may be called with the card open - * or closed. - */ -static struct net_device_stats *tms380tr_get_stats(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - return (struct net_device_stats *)&tp->MacStat; -} - -/* - * Set or clear the multicast filter for this adapter. - */ -static void tms380tr_set_multicast_list(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned int OpenOptions; - - OpenOptions = tp->ocpl.OPENOptions & - ~(PASS_ADAPTER_MAC_FRAMES - | PASS_ATTENTION_FRAMES - | PASS_BEACON_MAC_FRAMES - | COPY_ALL_MAC_FRAMES - | COPY_ALL_NON_MAC_FRAMES); - - tp->ocpl.FunctAddr = 0; - - if(dev->flags & IFF_PROMISC) - /* Enable promiscuous mode */ - OpenOptions |= COPY_ALL_NON_MAC_FRAMES | - COPY_ALL_MAC_FRAMES; - else - { - if(dev->flags & IFF_ALLMULTI) - { - /* Disable promiscuous mode, use normal mode. */ - tp->ocpl.FunctAddr = 0xFFFFFFFF; - } - else - { - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - ((char *)(&tp->ocpl.FunctAddr))[0] |= - ha->addr[2]; - ((char *)(&tp->ocpl.FunctAddr))[1] |= - ha->addr[3]; - ((char *)(&tp->ocpl.FunctAddr))[2] |= - ha->addr[4]; - ((char *)(&tp->ocpl.FunctAddr))[3] |= - ha->addr[5]; - } - } - tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); - } - - tp->ocpl.OPENOptions = OpenOptions; - tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS); -} - -/* - * Wait for some time (microseconds) - */ -void tms380tr_wait(unsigned long time) -{ -#if 0 - long tmp; - - tmp = jiffies + time/(1000000/HZ); - do { - tmp = schedule_timeout_interruptible(tmp); - } while(time_after(tmp, jiffies)); -#else - mdelay(time / 1000); -#endif -} - -/* - * Write a command value to the SIFCMD register - */ -static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue) -{ - unsigned short cmd; - unsigned short SifStsValue; - unsigned long loop_counter; - - WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER); - cmd = (unsigned short)WriteValue; - loop_counter = 0,5 * 800000; - do { - SifStsValue = SIFREADW(SIFSTS); - } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--); - SIFWRITEW(cmd, SIFCMD); -} - -/* - * Processes adapter hardware reset, halts adapter and downloads firmware, - * clears the halt bit. - */ -static int tms380tr_reset_adapter(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned short *fw_ptr; - unsigned short count, c, count2; - const struct firmware *fw_entry = NULL; - - if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) { - printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", - dev->name, "tms380tr.bin"); - return -1; - } - - fw_ptr = (unsigned short *)fw_entry->data; - count2 = fw_entry->size / 2; - - /* Hardware adapter reset */ - SIFWRITEW(ACL_ARESET, SIFACL); - tms380tr_wait(40); - - c = SIFREADW(SIFACL); - tms380tr_wait(20); - - if(dev->dma == 0) /* For PCI adapters */ - { - c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */ - if(tp->setnselout) - c |= (*tp->setnselout)(dev); - } - - /* In case a command is pending - forget it */ - tp->ScbInUse = 0; - - c &= ~ACL_ARESET; /* Clear adapter reset bit */ - c |= ACL_CPHALT; /* Halt adapter CPU, allow download */ - c |= ACL_BOOT; - c |= ACL_SINTEN; - c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */ - SIFWRITEW(c, SIFACL); - tms380tr_wait(40); - - count = 0; - /* Download firmware via DIO interface: */ - do { - if (count2 < 3) continue; - - /* Download first address part */ - SIFWRITEW(*fw_ptr, SIFADX); - fw_ptr++; - count2--; - /* Download second address part */ - SIFWRITEW(*fw_ptr, SIFADD); - fw_ptr++; - count2--; - - if((count = *fw_ptr) != 0) /* Load loop counter */ - { - fw_ptr++; /* Download block data */ - count2--; - if (count > count2) continue; - - for(; count > 0; count--) - { - SIFWRITEW(*fw_ptr, SIFINC); - fw_ptr++; - count2--; - } - } - else /* Stop, if last block downloaded */ - { - c = SIFREADW(SIFACL); - c &= (~ACL_CPHALT | ACL_SINTEN); - - /* Clear CPHALT and start BUD */ - SIFWRITEW(c, SIFACL); - release_firmware(fw_entry); - return 1; - } - } while(count == 0); - - release_firmware(fw_entry); - printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); - return -1; -} - -MODULE_FIRMWARE("tms380tr.bin"); - -/* - * Starts bring up diagnostics of token ring adapter and evaluates - * diagnostic results. - */ -static int tms380tr_bringup_diags(struct net_device *dev) -{ - int loop_cnt, retry_cnt; - unsigned short Status; - - tms380tr_wait(HALF_SECOND); - tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); - tms380tr_wait(HALF_SECOND); - - retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */ - - do { - retry_cnt--; - if(tms380tr_debug > 3) - printk(KERN_DEBUG "BUD-Status: "); - loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/ - do { /* Inspect BUD results */ - loop_cnt--; - tms380tr_wait(HALF_SECOND); - Status = SIFREADW(SIFSTS); - Status &= STS_MASK; - - if(tms380tr_debug > 3) - printk(KERN_DEBUG " %04X\n", Status); - /* BUD successfully completed */ - if(Status == STS_INITIALIZE) - return 1; - /* Unrecoverable hardware error, BUD not completed? */ - } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST)) - != (STS_ERROR | STS_TEST))); - - /* Error preventing completion of BUD */ - if(retry_cnt > 0) - { - printk(KERN_INFO "%s: Adapter Software Reset.\n", - dev->name); - tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); - tms380tr_wait(HALF_SECOND); - } - } while(retry_cnt > 0); - - Status = SIFREADW(SIFSTS); - - printk(KERN_INFO "%s: Hardware error\n", dev->name); - /* Hardware error occurred! */ - Status &= 0x001f; - if (Status & 0x0010) - printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name); - else if ((Status & 0x000f) > 6) - printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name); - else - printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f); - - return -1; -} - -/* - * Copy initialisation data to adapter memory, beginning at address - * 1:0A00; Starting DMA test and evaluating result bits. - */ -static int tms380tr_init_adapter(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B}; - const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7, - 0xC5, 0xD9, 0xC3, 0xD4}; - void *ptr = (void *)&tp->ipb; - unsigned short *ipb_ptr = (unsigned short *)ptr; - unsigned char *cb_ptr = (unsigned char *) &tp->scb; - unsigned char *sb_ptr = (unsigned char *) &tp->ssb; - unsigned short Status; - int i, loop_cnt, retry_cnt; - - /* Normalize: byte order low/high, word order high/low! (only IPB!) */ - tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer); - tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer); - - if(tms380tr_debug > 3) - { - printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb); - printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer); - printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer); - printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp); - } - /* Maximum: three initialization retries */ - retry_cnt = INIT_MAX_RETRIES; - - do { - retry_cnt--; - - /* Transfer initialization block */ - SIFWRITEW(0x0001, SIFADX); - - /* To address 0001:0A00 of adapter RAM */ - SIFWRITEW(0x0A00, SIFADD); - - /* Write 11 words to adapter RAM */ - for(i = 0; i < 11; i++) - SIFWRITEW(ipb_ptr[i], SIFINC); - - /* Execute SCB adapter command */ - tms380tr_exec_sifcmd(dev, CMD_EXECUTE); - - loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */ - - /* While remaining retries, no error and not completed */ - do { - Status = 0; - loop_cnt--; - tms380tr_wait(HALF_SECOND); - - /* Mask interesting status bits */ - Status = SIFREADW(SIFSTS); - Status &= STS_MASK; - } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) && - ((Status & STS_ERROR) == 0) && (loop_cnt != 0)); - - if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0) - { - /* Initialization completed without error */ - i = 0; - do { /* Test if contents of SCB is valid */ - if(SCB_Test[i] != *(cb_ptr + i)) - { - printk(KERN_INFO "%s: DMA failed\n", dev->name); - /* DMA data error: wrong data in SCB */ - return -1; - } - i++; - } while(i < 6); - - i = 0; - do { /* Test if contents of SSB is valid */ - if(SSB_Test[i] != *(sb_ptr + i)) - /* DMA data error: wrong data in SSB */ - return -1; - i++; - } while (i < 8); - - return 1; /* Adapter successfully initialized */ - } - else - { - if((Status & STS_ERROR) != 0) - { - /* Initialization error occurred */ - Status = SIFREADW(SIFSTS); - Status &= STS_ERROR_MASK; - /* ShowInitialisationErrorCode(Status); */ - printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status); - return -1; /* Unrecoverable error */ - } - else - { - if(retry_cnt > 0) - { - /* Reset adapter and try init again */ - tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET); - tms380tr_wait(HALF_SECOND); - } - } - } - } while(retry_cnt > 0); - - printk(KERN_INFO "%s: Retry exceeded\n", dev->name); - return -1; -} - -/* - * Check for outstanding commands in command queue and tries to execute - * command immediately. Corresponding command flag in command queue is cleared. - */ -static void tms380tr_chk_outstanding_cmds(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned long Addr = 0; - - if(tp->CMDqueue == 0) - return; /* No command execution */ - - /* If SCB in use: no command */ - if(tp->ScbInUse == 1) - return; - - /* Check if adapter is opened, avoiding COMMAND_REJECT - * interrupt by the adapter! - */ - if (tp->AdapterOpenFlag == 0) { - if (tp->CMDqueue & OC_OPEN) { - /* Execute OPEN command */ - tp->CMDqueue ^= OC_OPEN; - - Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer); - tp->scb.Parm[0] = LOWORD(Addr); - tp->scb.Parm[1] = HIWORD(Addr); - tp->scb.CMD = OPEN; - } else - /* No OPEN command queued, but adapter closed. Note: - * We'll try to re-open the adapter in DriverPoll() - */ - return; /* No adapter command issued */ - } else { - /* Adapter is open; evaluate command queue: try to execute - * outstanding commands (depending on priority!) CLOSE - * command queued - */ - if (tp->CMDqueue & OC_CLOSE) { - tp->CMDqueue ^= OC_CLOSE; - tp->AdapterOpenFlag = 0; - tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */ - tp->scb.Parm[1] = 0; /* but should be set to zero! */ - tp->scb.CMD = CLOSE; - if(!tp->HaltInProgress) - tp->CMDqueue |= OC_OPEN; /* re-open adapter */ - else - tp->CMDqueue = 0; /* no more commands */ - } else if (tp->CMDqueue & OC_RECEIVE) { - tp->CMDqueue ^= OC_RECEIVE; - Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer); - tp->scb.Parm[0] = LOWORD(Addr); - tp->scb.Parm[1] = HIWORD(Addr); - tp->scb.CMD = RECEIVE; - } else if (tp->CMDqueue & OC_TRANSMIT_HALT) { - /* NOTE: TRANSMIT.HALT must be checked - * before TRANSMIT. - */ - tp->CMDqueue ^= OC_TRANSMIT_HALT; - tp->scb.CMD = TRANSMIT_HALT; - - /* Parm[0] and Parm[1] are ignored - * but should be set to zero! - */ - tp->scb.Parm[0] = 0; - tp->scb.Parm[1] = 0; - } else if (tp->CMDqueue & OC_TRANSMIT) { - /* NOTE: TRANSMIT must be - * checked after TRANSMIT.HALT - */ - if (tp->TransmitCommandActive) { - if (!tp->TransmitHaltScheduled) { - tp->TransmitHaltScheduled = 1; - tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT); - } - tp->TransmitCommandActive = 0; - return; - } - - tp->CMDqueue ^= OC_TRANSMIT; - tms380tr_cancel_tx_queue(tp); - Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer); - tp->scb.Parm[0] = LOWORD(Addr); - tp->scb.Parm[1] = HIWORD(Addr); - tp->scb.CMD = TRANSMIT; - tp->TransmitCommandActive = 1; - } else if (tp->CMDqueue & OC_MODIFY_OPEN_PARMS) { - tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS; - tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/ - tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION; - tp->scb.Parm[1] = 0; /* is ignored but should be zero */ - tp->scb.CMD = MODIFY_OPEN_PARMS; - } else if (tp->CMDqueue & OC_SET_FUNCT_ADDR) { - tp->CMDqueue ^= OC_SET_FUNCT_ADDR; - tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr); - tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr); - tp->scb.CMD = SET_FUNCT_ADDR; - } else if (tp->CMDqueue & OC_SET_GROUP_ADDR) { - tp->CMDqueue ^= OC_SET_GROUP_ADDR; - tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr); - tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr); - tp->scb.CMD = SET_GROUP_ADDR; - } else if (tp->CMDqueue & OC_READ_ERROR_LOG) { - tp->CMDqueue ^= OC_READ_ERROR_LOG; - Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer); - tp->scb.Parm[0] = LOWORD(Addr); - tp->scb.Parm[1] = HIWORD(Addr); - tp->scb.CMD = READ_ERROR_LOG; - } else { - printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n"); - tp->CMDqueue = 0; - return; - } - } - - tp->ScbInUse = 1; /* Set semaphore: SCB in use. */ - - /* Execute SCB and generate IRQ when done. */ - tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST); -} - -/* - * IRQ conditions: signal loss on the ring, transmit or receive of beacon - * frames (disabled if bit 1 of OPEN option is set); report error MAC - * frame transmit (disabled if bit 2 of OPEN option is set); open or short - * circuit fault on the lobe is detected; remove MAC frame received; - * error counter overflow (255); opened adapter is the only station in ring. - * After some of the IRQs the adapter is closed! - */ -static void tms380tr_ring_status_irq(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - - tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]); - - /* First: fill up statistics */ - if(tp->ssb.Parm[0] & SIGNAL_LOSS) - { - printk(KERN_INFO "%s: Signal Loss\n", dev->name); - tp->MacStat.line_errors++; - } - - /* Adapter is closed, but initialized */ - if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT) - { - printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n", - dev->name); - tp->MacStat.line_errors++; - } - - if(tp->ssb.Parm[0] & RING_RECOVERY) - printk(KERN_INFO "%s: Ring Recovery\n", dev->name); - - /* Counter overflow: read error log */ - if(tp->ssb.Parm[0] & COUNTER_OVERFLOW) - { - printk(KERN_INFO "%s: Counter Overflow\n", dev->name); - tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG); - } - - /* Adapter is closed, but initialized */ - if(tp->ssb.Parm[0] & REMOVE_RECEIVED) - printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n", - dev->name); - - /* Adapter is closed, but initialized */ - if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR) - printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n", - dev->name); - - if(tp->ssb.Parm[0] & HARD_ERROR) - printk(KERN_INFO "%s: Hard Error\n", dev->name); - - if(tp->ssb.Parm[0] & SOFT_ERROR) - printk(KERN_INFO "%s: Soft Error\n", dev->name); - - if(tp->ssb.Parm[0] & TRANSMIT_BEACON) - printk(KERN_INFO "%s: Transmit Beacon\n", dev->name); - - if(tp->ssb.Parm[0] & SINGLE_STATION) - printk(KERN_INFO "%s: Single Station\n", dev->name); - - /* Check if adapter has been closed */ - if(tp->ssb.Parm[0] & ADAPTER_CLOSED) - { - printk(KERN_INFO "%s: Adapter closed (Reopening)," - "CurrentRingStat %x\n", - dev->name, tp->CurrentRingStatus); - tp->AdapterOpenFlag = 0; - tms380tr_open_adapter(dev); - } -} - -/* - * Issued if adapter has encountered an unrecoverable hardware - * or software error. - */ -static void tms380tr_chk_irq(struct net_device *dev) -{ - int i; - unsigned short AdapterCheckBlock[4]; - struct net_local *tp = netdev_priv(dev); - - tp->AdapterOpenFlag = 0; /* Adapter closed now */ - - /* Page number of adapter memory */ - SIFWRITEW(0x0001, SIFADX); - /* Address offset */ - SIFWRITEW(CHECKADDR, SIFADR); - - /* Reading 8 byte adapter check block. */ - for(i = 0; i < 4; i++) - AdapterCheckBlock[i] = SIFREADW(SIFINC); - - if(tms380tr_debug > 3) - { - printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name); - for (i = 0; i < 4; i++) - printk("%04X", AdapterCheckBlock[i]); - printk("\n"); - } - - switch(AdapterCheckBlock[0]) - { - case DIO_PARITY: - printk(KERN_INFO "%s: DIO parity error\n", dev->name); - break; - - case DMA_READ_ABORT: - printk(KERN_INFO "%s DMA read operation aborted:\n", - dev->name); - switch (AdapterCheckBlock[1]) - { - case 0: - printk(KERN_INFO "Timeout\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - case 1: - printk(KERN_INFO "Parity error\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - case 2: - printk(KERN_INFO "Bus error\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - default: - printk(KERN_INFO "Unknown error.\n"); - break; - } - break; - - case DMA_WRITE_ABORT: - printk(KERN_INFO "%s: DMA write operation aborted:\n", - dev->name); - switch (AdapterCheckBlock[1]) - { - case 0: - printk(KERN_INFO "Timeout\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - case 1: - printk(KERN_INFO "Parity error\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - case 2: - printk(KERN_INFO "Bus error\n"); - printk(KERN_INFO "Address: %04X %04X\n", - AdapterCheckBlock[2], - AdapterCheckBlock[3]); - break; - - default: - printk(KERN_INFO "Unknown error.\n"); - break; - } - break; - - case ILLEGAL_OP_CODE: - printk(KERN_INFO "%s: Illegal operation code in firmware\n", - dev->name); - /* Parm[0-3]: adapter internal register R13-R15 */ - break; - - case PARITY_ERRORS: - printk(KERN_INFO "%s: Adapter internal bus parity error\n", - dev->name); - /* Parm[0-3]: adapter internal register R13-R15 */ - break; - - case RAM_DATA_ERROR: - printk(KERN_INFO "%s: RAM data error\n", dev->name); - /* Parm[0-1]: MSW/LSW address of RAM location. */ - break; - - case RAM_PARITY_ERROR: - printk(KERN_INFO "%s: RAM parity error\n", dev->name); - /* Parm[0-1]: MSW/LSW address of RAM location. */ - break; - - case RING_UNDERRUN: - printk(KERN_INFO "%s: Internal DMA underrun detected\n", - dev->name); - break; - - case INVALID_IRQ: - printk(KERN_INFO "%s: Unrecognized interrupt detected\n", - dev->name); - /* Parm[0-3]: adapter internal register R13-R15 */ - break; - - case INVALID_ERROR_IRQ: - printk(KERN_INFO "%s: Unrecognized error interrupt detected\n", - dev->name); - /* Parm[0-3]: adapter internal register R13-R15 */ - break; - - case INVALID_XOP: - printk(KERN_INFO "%s: Unrecognized XOP request detected\n", - dev->name); - /* Parm[0-3]: adapter internal register R13-R15 */ - break; - - default: - printk(KERN_INFO "%s: Unknown status", dev->name); - break; - } - - if(tms380tr_chipset_init(dev) == 1) - { - /* Restart of firmware successful */ - tp->AdapterOpenFlag = 1; - } -} - -/* - * Internal adapter pointer to RAM data are copied from adapter into - * host system. - */ -static int tms380tr_read_ptr(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned short adapterram; - - tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr, - ADAPTER_INT_PTRS, 16); - tms380tr_read_ram(dev, (unsigned char *)&adapterram, - cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2); - return be16_to_cpu(adapterram); -} - -/* - * Reads a number of bytes from adapter to system memory. - */ -static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data, - unsigned short Address, int Length) -{ - int i; - unsigned short old_sifadx, old_sifadr, InWord; - - /* Save the current values */ - old_sifadx = SIFREADW(SIFADX); - old_sifadr = SIFREADW(SIFADR); - - /* Page number of adapter memory */ - SIFWRITEW(0x0001, SIFADX); - /* Address offset in adapter RAM */ - SIFWRITEW(Address, SIFADR); - - /* Copy len byte from adapter memory to system data area. */ - i = 0; - for(;;) - { - InWord = SIFREADW(SIFINC); - - *(Data + i) = HIBYTE(InWord); /* Write first byte */ - if(++i == Length) /* All is done break */ - break; - - *(Data + i) = LOBYTE(InWord); /* Write second byte */ - if (++i == Length) /* All is done break */ - break; - } - - /* Restore original values */ - SIFWRITEW(old_sifadx, SIFADX); - SIFWRITEW(old_sifadr, SIFADR); -} - -/* - * Cancel all queued packets in the transmission queue. - */ -static void tms380tr_cancel_tx_queue(struct net_local* tp) -{ - TPL *tpl; - - /* - * NOTE: There must not be an active TRANSMIT command pending, when - * this function is called. - */ - if(tp->TransmitCommandActive) - return; - - for(;;) - { - tpl = tp->TplBusy; - if(!tpl->BusyFlag) - break; - /* "Remove" TPL from busy list. */ - tp->TplBusy = tpl->NextTPLPtr; - tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */ - tpl->BusyFlag = 0; /* "free" TPL */ - - printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); - if (tpl->DMABuff) - dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); - dev_kfree_skb_any(tpl->Skb); - } -} - -/* - * This function is called whenever a transmit interrupt is generated by the - * adapter. For a command complete interrupt, it is checked if we have to - * issue a new transmit command or not. - */ -static void tms380tr_tx_status_irq(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned char HighByte, HighAc, LowAc; - TPL *tpl; - - /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer - * available, because the CLEAR SSB command has already been issued. - * - * Process all complete transmissions. - */ - - for(;;) - { - tpl = tp->TplBusy; - if(!tpl->BusyFlag || (tpl->Status - & (TX_VALID | TX_FRAME_COMPLETE)) - != TX_FRAME_COMPLETE) - { - break; - } - - /* "Remove" TPL from busy list. */ - tp->TplBusy = tpl->NextTPLPtr ; - - /* Check the transmit status field only for directed frames*/ - if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0) - { - HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status); - HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte); - LowAc = GET_FRAME_STATUS_LOW_AC(HighByte); - - if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED)) - { - printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n", - dev->name, - *(unsigned long *)&tpl->MData[2+2]); - } - else - { - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Directed frame tx'd\n", - dev->name); - } - } - else - { - if(!DIRECTED_FRAME(tpl)) - { - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Broadcast frame tx'd\n", - dev->name); - } - } - - tp->MacStat.tx_packets++; - if (tpl->DMABuff) - dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(tpl->Skb); - tpl->BusyFlag = 0; /* "free" TPL */ - } - - if(!tp->TplFree->NextTPLPtr->BusyFlag) - netif_wake_queue(dev); -} - -/* - * Called if a frame receive interrupt is generated by the adapter. - * Check if the frame is valid and indicate it to system. - */ -static void tms380tr_rcv_status_irq(struct net_device *dev) -{ - struct net_local *tp = netdev_priv(dev); - unsigned char *ReceiveDataPtr; - struct sk_buff *skb; - unsigned int Length, Length2; - RPL *rpl; - RPL *SaveHead; - dma_addr_t dmabuf; - - /* NOTE: At this point the SSB from RECEIVE STATUS is no longer - * available, because the CLEAR SSB command has already been issued. - * - * Process all complete receives. - */ - - for(;;) - { - rpl = tp->RplHead; - if(rpl->Status & RX_VALID) - break; /* RPL still in use by adapter */ - - /* Forward RPLHead pointer to next list. */ - SaveHead = tp->RplHead; - tp->RplHead = rpl->NextRPLPtr; - - /* Get the frame size (Byte swap for Intel). - * Do this early (see workaround comment below) - */ - Length = be16_to_cpu(rpl->FrameSize); - - /* Check if the Frame_Start, Frame_End and - * Frame_Complete bits are set. - */ - if((rpl->Status & VALID_SINGLE_BUFFER_FRAME) - == VALID_SINGLE_BUFFER_FRAME) - { - ReceiveDataPtr = rpl->MData; - - /* Workaround for delayed write of FrameSize on ISA - * (FrameSize is false but valid-bit is reset) - * Frame size is set to zero when the RPL is freed. - * Length2 is there because there have also been - * cases where the FrameSize was partially written - */ - Length2 = be16_to_cpu(rpl->FrameSize); - - if(Length == 0 || Length != Length2) - { - tp->RplHead = SaveHead; - break; /* Return to tms380tr_interrupt */ - } - tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length); - - if(tms380tr_debug > 3) - printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n", - dev->name, Length, Length); - - /* Indicate the received frame to system the - * adapter does the Source-Routing padding for - * us. See: OpenOptions in tms380tr_init_opb() - */ - skb = rpl->Skb; - if(rpl->SkbStat == SKB_UNAVAILABLE) - { - /* Try again to allocate skb */ - skb = dev_alloc_skb(tp->MaxPacketSize); - if(skb == NULL) - { - /* Update Stats ?? */ - } - else - { - skb_put(skb, tp->MaxPacketSize); - rpl->SkbStat = SKB_DATA_COPY; - ReceiveDataPtr = rpl->MData; - } - } - - if(skb && (rpl->SkbStat == SKB_DATA_COPY || - rpl->SkbStat == SKB_DMA_DIRECT)) - { - if(rpl->SkbStat == SKB_DATA_COPY) - skb_copy_to_linear_data(skb, ReceiveDataPtr, - Length); - - /* Deliver frame to system */ - rpl->Skb = NULL; - skb_trim(skb,Length); - skb->protocol = tr_type_trans(skb,dev); - netif_rx(skb); - } - } - else /* Invalid frame */ - { - if(rpl->Skb != NULL) - dev_kfree_skb_irq(rpl->Skb); - - /* Skip list. */ - if(rpl->Status & RX_START_FRAME) - /* Frame start bit is set -> overflow. */ - tp->MacStat.rx_errors++; - } - if (rpl->DMABuff) - dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE); - rpl->DMABuff = 0; - - /* Allocate new skb for rpl */ - rpl->Skb = dev_alloc_skb(tp->MaxPacketSize); - /* skb == NULL ? then use local buffer */ - if(rpl->Skb == NULL) - { - rpl->SkbStat = SKB_UNAVAILABLE; - rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); - rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; - } - else /* skb != NULL */ - { - rpl->Skb->dev = dev; - skb_put(rpl->Skb, tp->MaxPacketSize); - - /* Data unreachable for DMA ? then use local buffer */ - dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE); - if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) - { - rpl->SkbStat = SKB_DATA_COPY; - rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer); - rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex]; - } - else - { - /* DMA directly in skb->data */ - rpl->SkbStat = SKB_DMA_DIRECT; - rpl->FragList[0].DataAddr = htonl(dmabuf); - rpl->MData = rpl->Skb->data; - rpl->DMABuff = dmabuf; - } - } - - rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize); - rpl->FrameSize = 0; - - /* Pass the last RPL back to the adapter */ - tp->RplTail->FrameSize = 0; - - /* Reset the CSTAT field in the list. */ - tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ); - - /* Current RPL becomes last one in list. */ - tp->RplTail = tp->RplTail->NextRPLPtr; - - /* Inform adapter about RPL valid. */ - tms380tr_exec_sifcmd(dev, CMD_RX_VALID); - } -} - -/* - * This function should be used whenever the status of any RPL must be - * modified by the driver, because the compiler may otherwise change the - * order of instructions such that writing the RPL status may be executed - * at an undesirable time. When this function is used, the status is - * always written when the function is called. - */ -static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) -{ - rpl->Status = Status; -} - -/* - * The function updates the statistic counters in mac->MacStat. - * It differtiates between directed and broadcast/multicast ( ==functional) - * frames. - */ -static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[], - unsigned int Length) -{ - tp->MacStat.rx_packets++; - tp->MacStat.rx_bytes += Length; - - /* Test functional bit */ - if(DataPtr[2] & GROUP_BIT) - tp->MacStat.multicast++; -} - -static int tms380tr_set_mac_address(struct net_device *dev, void *addr) -{ - struct net_local *tp = netdev_priv(dev); - struct sockaddr *saddr = addr; - - if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) { - printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name); - return -EIO; - } - memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); - return 0; -} - -#if TMS380TR_DEBUG > 0 -/* - * Dump Packet (data) - */ -static void tms380tr_dump(unsigned char *Data, int length) -{ - int i, j; - - for (i = 0, j = 0; i < length / 8; i++, j += 8) - { - printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n", - Data[j+0],Data[j+1],Data[j+2],Data[j+3], - Data[j+4],Data[j+5],Data[j+6],Data[j+7]); - } -} -#endif - -void tmsdev_term(struct net_device *dev) -{ - struct net_local *tp; - - tp = netdev_priv(dev); - dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), - DMA_BIDIRECTIONAL); -} - -const struct net_device_ops tms380tr_netdev_ops = { - .ndo_open = tms380tr_open, - .ndo_stop = tms380tr_close, - .ndo_start_xmit = tms380tr_send_packet, - .ndo_tx_timeout = tms380tr_timeout, - .ndo_get_stats = tms380tr_get_stats, - .ndo_set_rx_mode = tms380tr_set_multicast_list, - .ndo_set_mac_address = tms380tr_set_mac_address, -}; -EXPORT_SYMBOL(tms380tr_netdev_ops); - -int tmsdev_init(struct net_device *dev, struct device *pdev) -{ - struct net_local *tms_local; - - memset(netdev_priv(dev), 0, sizeof(struct net_local)); - tms_local = netdev_priv(dev); - init_waitqueue_head(&tms_local->wait_for_tok_int); - if (pdev->dma_mask) - tms_local->dmalimit = *pdev->dma_mask; - else - return -ENOMEM; - tms_local->pdev = pdev; - tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local, - sizeof(struct net_local), DMA_BIDIRECTIONAL); - if (tms_local->dmabuffer + sizeof(struct net_local) > - tms_local->dmalimit) - { - printk(KERN_INFO "%s: Memory not accessible for DMA\n", - dev->name); - tmsdev_term(dev); - return -ENOMEM; - } - - dev->netdev_ops = &tms380tr_netdev_ops; - dev->watchdog_timeo = HZ; - - return 0; -} - -EXPORT_SYMBOL(tms380tr_open); -EXPORT_SYMBOL(tms380tr_close); -EXPORT_SYMBOL(tms380tr_interrupt); -EXPORT_SYMBOL(tmsdev_init); -EXPORT_SYMBOL(tmsdev_term); -EXPORT_SYMBOL(tms380tr_wait); - -#ifdef MODULE - -static struct module *TMS380_module = NULL; - -int init_module(void) -{ - printk(KERN_DEBUG "%s", version); - - TMS380_module = &__this_module; - return 0; -} - -void cleanup_module(void) -{ - TMS380_module = NULL; -} -#endif - -MODULE_LICENSE("GPL"); - diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h deleted file mode 100644 index e5a617c586c..00000000000 --- a/drivers/net/tokenring/tms380tr.h +++ /dev/null @@ -1,1141 +0,0 @@ -/* - * tms380tr.h: TI TMS380 Token Ring driver for Linux - * - * Authors: - * - Christoph Goos <cgoos@syskonnect.de> - * - Adam Fritzler - */ - -#ifndef __LINUX_TMS380TR_H -#define __LINUX_TMS380TR_H - -#ifdef __KERNEL__ - -#include <linux/interrupt.h> - -/* module prototypes */ -extern const struct net_device_ops tms380tr_netdev_ops; -int tms380tr_open(struct net_device *dev); -int tms380tr_close(struct net_device *dev); -irqreturn_t tms380tr_interrupt(int irq, void *dev_id); -int tmsdev_init(struct net_device *dev, struct device *pdev); -void tmsdev_term(struct net_device *dev); -void tms380tr_wait(unsigned long time); - -#define TMS380TR_MAX_ADAPTERS 7 - -#define SEND_TIMEOUT 10*HZ - -#define TR_RCF_LONGEST_FRAME_MASK 0x0070 -#define TR_RCF_FRAME4K 0x0030 - -/*------------------------------------------------------------------*/ -/* Bit order for adapter communication with DMA */ -/* -------------------------------------------------------------- */ -/* Bit 8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7| */ -/* -------------------------------------------------------------- */ -/* The bytes in a word must be byte swapped. Also, if a double */ -/* word is used for storage, then the words, as well as the bytes, */ -/* must be swapped. */ -/* Bit order for adapter communication with DIO */ -/* -------------------------------------------------------------- */ -/* Bit 0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15| */ -/* -------------------------------------------------------------- */ -/*------------------------------------------------------------------*/ - -/* Swap words of a long. */ -#define SWAPW(x) (((x) << 16) | ((x) >> 16)) - -/* Get the low byte of a word. */ -#define LOBYTE(w) ((unsigned char)(w)) - -/* Get the high byte of a word. */ -#define HIBYTE(w) ((unsigned char)((unsigned short)(w) >> 8)) - -/* Get the low word of a long. */ -#define LOWORD(l) ((unsigned short)(l)) - -/* Get the high word of a long. */ -#define HIWORD(l) ((unsigned short)((unsigned long)(l) >> 16)) - - - -/* Token ring adapter I/O addresses for normal mode. */ - -/* - * The SIF registers. Common to all adapters. - */ -/* Basic SIF (SRSX = 0) */ -#define SIFDAT 0x00 /* SIF/DMA data. */ -#define SIFINC 0x02 /* IO Word data with auto increment. */ -#define SIFINH 0x03 /* IO Byte data with auto increment. */ -#define SIFADR 0x04 /* SIF/DMA Address. */ -#define SIFCMD 0x06 /* SIF Command. */ -#define SIFSTS 0x06 /* SIF Status. */ - -/* "Extended" SIF (SRSX = 1) */ -#define SIFACL 0x08 /* SIF Adapter Control Register. */ -#define SIFADD 0x0a /* SIF/DMA Address. -- 0x0a */ -#define SIFADX 0x0c /* 0x0c */ -#define DMALEN 0x0e /* SIF DMA length. -- 0x0e */ - -/* - * POS Registers. Only for ISA Adapters. - */ -#define POSREG 0x10 /* Adapter Program Option Select (POS) - * Register: base IO address + 16 byte. - */ -#define POSREG_2 24L /* only for TR4/16+ adapter - * base IO address + 24 byte. -- 0x18 - */ - -/* SIFCMD command codes (high-low) */ -#define CMD_INTERRUPT_ADAPTER 0x8000 /* Cause internal adapter interrupt */ -#define CMD_ADAPTER_RESET 0x4000 /* Hardware reset of adapter */ -#define CMD_SSB_CLEAR 0x2000 /* Acknowledge to adapter to - * system interrupts. - */ -#define CMD_EXECUTE 0x1000 /* Execute SCB command */ -#define CMD_SCB_REQUEST 0x0800 /* Request adapter to interrupt - * system when SCB is available for - * another command. - */ -#define CMD_RX_CONTINUE 0x0400 /* Continue receive after odd pointer - * stop. (odd pointer receive method) - */ -#define CMD_RX_VALID 0x0200 /* Now actual RPL is valid. */ -#define CMD_TX_VALID 0x0100 /* Now actual TPL is valid. (valid - * bit receive/transmit method) - */ -#define CMD_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system - * interrupt is reset. - */ -#define CMD_CLEAR_SYSTEM_IRQ 0x0080 /* Clear SYSTEM_INTERRUPT bit. - * (write: 1=ignore, 0=reset) - */ -#define EXEC_SOFT_RESET 0xFF00 /* adapter soft reset. (restart - * adapter after hardware reset) - */ - - -/* ACL commands (high-low) */ -#define ACL_SWHLDA 0x0800 /* Software hold acknowledge. */ -#define ACL_SWDDIR 0x0400 /* Data transfer direction. */ -#define ACL_SWHRQ 0x0200 /* Pseudo DMA operation. */ -#define ACL_PSDMAEN 0x0100 /* Enable pseudo system DMA. */ -#define ACL_ARESET 0x0080 /* Adapter hardware reset command. - * (held in reset condition as - * long as bit is set) - */ -#define ACL_CPHALT 0x0040 /* Communication processor halt. - * (can only be set while ACL_ARESET - * bit is set; prevents adapter - * processor from executing code while - * downloading firmware) - */ -#define ACL_BOOT 0x0020 -#define ACL_SINTEN 0x0008 /* System interrupt enable/disable - * (1/0): can be written if ACL_ARESET - * is zero. - */ -#define ACL_PEN 0x0004 - -#define ACL_NSELOUT0 0x0002 -#define ACL_NSELOUT1 0x0001 /* NSELOUTx have a card-specific - * meaning for setting ring speed. - */ - -#define PS_DMA_MASK (ACL_SWHRQ | ACL_PSDMAEN) - - -/* SIFSTS register return codes (high-low) */ -#define STS_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system - * interrupt is valid. - */ -#define STS_INITIALIZE 0x0040 /* INITIALIZE status. (ready to - * initialize) - */ -#define STS_TEST 0x0020 /* TEST status. (BUD not completed) */ -#define STS_ERROR 0x0010 /* ERROR status. (unrecoverable - * HW error occurred) - */ -#define STS_MASK 0x00F0 /* Mask interesting status bits. */ -#define STS_ERROR_MASK 0x000F /* Get Error Code by masking the - * interrupt code bits. - */ -#define ADAPTER_INT_PTRS 0x0A00 /* Address offset of adapter internal - * pointers 01:0a00 (high-low) have to - * be read after init and before open. - */ - - -/* Interrupt Codes (only MAC IRQs) */ -#define STS_IRQ_ADAPTER_CHECK 0x0000 /* unrecoverable hardware or - * software error. - */ -#define STS_IRQ_RING_STATUS 0x0004 /* SSB is updated with ring status. */ -#define STS_IRQ_LLC_STATUS 0x0005 /* Not used in MAC-only microcode */ -#define STS_IRQ_SCB_CLEAR 0x0006 /* SCB clear, following an - * SCB_REQUEST IRQ. - */ -#define STS_IRQ_TIMER 0x0007 /* Not normally used in MAC ucode */ -#define STS_IRQ_COMMAND_STATUS 0x0008 /* SSB is updated with command - * status. - */ -#define STS_IRQ_RECEIVE_STATUS 0x000A /* SSB is updated with receive - * status. - */ -#define STS_IRQ_TRANSMIT_STATUS 0x000C /* SSB is updated with transmit - * status - */ -#define STS_IRQ_RECEIVE_PENDING 0x000E /* Not used in MAC-only microcode */ -#define STS_IRQ_MASK 0x000F /* = STS_ERROR_MASK. */ - - -/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */ -#define COMMAND_COMPLETE 0x0080 /* TRANSMIT command completed - * (avoid this!) issue another transmit - * to send additional frames. - */ -#define FRAME_COMPLETE 0x0040 /* Frame has been transmitted; - * INTERRUPT_FRAME bit was set in the - * CSTAT request; indication of possibly - * more than one frame transmissions! - * SSB.Parm[0-1]: 32 bit pointer to - * TPL of last frame. - */ -#define LIST_ERROR 0x0020 /* Error in one of the TPLs that - * compose the frame; TRANSMIT - * terminated; Parm[1-2]: 32bit pointer - * to TPL which starts the error - * frame; error details in bits 8-13. - * (14?) - */ -#define FRAME_SIZE_ERROR 0x8000 /* FRAME_SIZE does not equal the sum of - * the valid DATA_COUNT fields; - * FRAME_SIZE less than header plus - * information field. (15 bytes + - * routing field) Or if FRAME_SIZE - * was specified as zero in one list. - */ -#define TX_THRESHOLD 0x4000 /* FRAME_SIZE greater than (BUFFER_SIZE - * - 9) * TX_BUF_MAX. - */ -#define ODD_ADDRESS 0x2000 /* Odd forward pointer value is - * read on a list without END_FRAME - * indication. - */ -#define FRAME_ERROR 0x1000 /* START_FRAME bit (not) anticipated, - * but (not) set. - */ -#define ACCESS_PRIORITY_ERROR 0x0800 /* Access priority requested has not - * been allowed. - */ -#define UNENABLED_MAC_FRAME 0x0400 /* MAC frame has source class of zero - * or MAC frame PCF ATTN field is - * greater than one. - */ -#define ILLEGAL_FRAME_FORMAT 0x0200 /* Bit 0 or FC field was set to one. */ - - -/* - * Since we need to support some functions even if the adapter is in a - * CLOSED state, we have a (pseudo-) command queue which holds commands - * that are outstandig to be executed. - * - * Each time a command completes, an interrupt occurs and the next - * command is executed. The command queue is actually a simple word with - * a bit for each outstandig command. Therefore the commands will not be - * executed in the order they have been queued. - * - * The following defines the command code bits and the command queue: - */ -#define OC_OPEN 0x0001 /* OPEN command */ -#define OC_TRANSMIT 0x0002 /* TRANSMIT command */ -#define OC_TRANSMIT_HALT 0x0004 /* TRANSMIT_HALT command */ -#define OC_RECEIVE 0x0008 /* RECEIVE command */ -#define OC_CLOSE 0x0010 /* CLOSE command */ -#define OC_SET_GROUP_ADDR 0x0020 /* SET_GROUP_ADDR command */ -#define OC_SET_FUNCT_ADDR 0x0040 /* SET_FUNCT_ADDR command */ -#define OC_READ_ERROR_LOG 0x0080 /* READ_ERROR_LOG command */ -#define OC_READ_ADAPTER 0x0100 /* READ_ADAPTER command */ -#define OC_MODIFY_OPEN_PARMS 0x0400 /* MODIFY_OPEN_PARMS command */ -#define OC_RESTORE_OPEN_PARMS 0x0800 /* RESTORE_OPEN_PARMS command */ -#define OC_SET_FIRST_16_GROUP 0x1000 /* SET_FIRST_16_GROUP command */ -#define OC_SET_BRIDGE_PARMS 0x2000 /* SET_BRIDGE_PARMS command */ -#define OC_CONFIG_BRIDGE_PARMS 0x4000 /* CONFIG_BRIDGE_PARMS command */ - -#define OPEN 0x0300 /* C: open command. S: completion. */ -#define TRANSMIT 0x0400 /* C: transmit command. S: completion - * status. (reject: COMMAND_REJECT if - * adapter not opened, TRANSMIT already - * issued or address passed in the SCB - * not word aligned) - */ -#define TRANSMIT_HALT 0x0500 /* C: interrupt TX TPL chain; if no - * TRANSMIT command issued, the command - * is ignored (completion with TRANSMIT - * status (0x0400)!) - */ -#define RECEIVE 0x0600 /* C: receive command. S: completion - * status. (reject: COMMAND_REJECT if - * adapter not opened, RECEIVE already - * issued or address passed in the SCB - * not word aligned) - */ -#define CLOSE 0x0700 /* C: close adapter. S: completion. - * (COMMAND_REJECT if adapter not open) - */ -#define SET_GROUP_ADDR 0x0800 /* C: alter adapter group address after - * OPEN. S: completion. (COMMAND_REJECT - * if adapter not open) - */ -#define SET_FUNCT_ADDR 0x0900 /* C: alter adapter functional address - * after OPEN. S: completion. - * (COMMAND_REJECT if adapter not open) - */ -#define READ_ERROR_LOG 0x0A00 /* C: read adapter error counters. - * S: completion. (command ignored - * if adapter not open!) - */ -#define READ_ADAPTER 0x0B00 /* C: read data from adapter memory. - * (important: after init and before - * open!) S: completion. (ADAPTER_CHECK - * interrupt if undefined storage area - * read) - */ -#define MODIFY_OPEN_PARMS 0x0D00 /* C: modify some adapter operational - * parameters. (bit correspondend to - * WRAP_INTERFACE is ignored) - * S: completion. (reject: - * COMMAND_REJECT) - */ -#define RESTORE_OPEN_PARMS 0x0E00 /* C: modify some adapter operational - * parameters. (bit correspondend - * to WRAP_INTERFACE is ignored) - * S: completion. (reject: - * COMMAND_REJECT) - */ -#define SET_FIRST_16_GROUP 0x0F00 /* C: alter the first two bytes in - * adapter group address. - * S: completion. (reject: - * COMMAND_REJECT) - */ -#define SET_BRIDGE_PARMS 0x1000 /* C: values and conditions for the - * adapter hardware to use when frames - * are copied for forwarding. - * S: completion. (reject: - * COMMAND_REJECT) - */ -#define CONFIG_BRIDGE_PARMS 0x1100 /* C: .. - * S: completion. (reject: - * COMMAND_REJECT) - */ - -#define SPEED_4 4 -#define SPEED_16 16 /* Default transmission speed */ - - -/* Initialization Parameter Block (IPB); word alignment necessary! */ -#define BURST_SIZE 0x0018 /* Default burst size */ -#define BURST_MODE 0x9F00 /* Burst mode enable */ -#define DMA_RETRIES 0x0505 /* Magic DMA retry number... */ - -#define CYCLE_TIME 3 /* Default AT-bus cycle time: 500 ns - * (later adapter version: fix cycle time!) - */ -#define LINE_SPEED_BIT 0x80 - -/* Macro definition for the wait function. */ -#define ONE_SECOND_TICKS 1000000 -#define HALF_SECOND (ONE_SECOND_TICKS / 2) -#define ONE_SECOND (ONE_SECOND_TICKS) -#define TWO_SECONDS (ONE_SECOND_TICKS * 2) -#define THREE_SECONDS (ONE_SECOND_TICKS * 3) -#define FOUR_SECONDS (ONE_SECOND_TICKS * 4) -#define FIVE_SECONDS (ONE_SECOND_TICKS * 5) - -#define BUFFER_SIZE 2048 /* Buffers on Adapter */ - -#pragma pack(1) -typedef struct { - unsigned short Init_Options; /* Initialize with burst mode; - * LLC disabled. (MAC only) - */ - - /* Interrupt vectors the adapter places on attached system bus. */ - u_int8_t CMD_Status_IV; /* Interrupt vector: command status. */ - u_int8_t TX_IV; /* Interrupt vector: transmit. */ - u_int8_t RX_IV; /* Interrupt vector: receive. */ - u_int8_t Ring_Status_IV; /* Interrupt vector: ring status. */ - u_int8_t SCB_Clear_IV; /* Interrupt vector: SCB clear. */ - u_int8_t Adapter_CHK_IV; /* Interrupt vector: adapter check. */ - - u_int16_t RX_Burst_Size; /* Max. number of transfer cycles. */ - u_int16_t TX_Burst_Size; /* During DMA burst; even value! */ - u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */ - - u_int32_t SCB_Addr; /* SCB address: even, word aligned, high-low */ - u_int32_t SSB_Addr; /* SSB address: even, word aligned, high-low */ -} IPB, *IPB_Ptr; -#pragma pack() - -/* - * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to - * be reopened) - */ -#define BUFFER_SIZE 2048 /* Buffers on Adapter. */ -#define TPL_SIZE 8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */ -#define RPL_SIZE 14 /* (with TI firmware v2.26 handling - * up to nine fragments possible) - */ -#define TX_BUF_MIN 20 /* ??? (Stephan: calculation with */ -#define TX_BUF_MAX 40 /* BUFFER_SIZE and MAX_FRAME_SIZE) ??? - */ -#define DISABLE_EARLY_TOKEN_RELEASE 0x1000 - -/* OPEN Options (high-low) */ -#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test - * purposes; transmit data appears - * as receive data. (useful for - * testing; change: CLOSE necessary) - */ -#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON - * no RING.STATUS interrupt. - */ -#define DISABLE_SOFT_ERROR 0x0020 /* On SOFT_ERROR, no RING.STATUS - * interrupt. - */ -#define PASS_ADAPTER_MAC_FRAMES 0x0010 /* Passing unsupported MAC frames - * to system. - */ -#define PASS_ATTENTION_FRAMES 0x0008 /* All changed attention MAC frames are - * passed to the system. - */ -#define PAD_ROUTING_FIELD 0x0004 /* Routing field is padded to 18 - * bytes. - */ -#define FRAME_HOLD 0x0002 /*Adapter waits for entire frame before - * initiating DMA transfer; otherwise: - * DMA transfer initiation if internal - * buffer filled. - */ -#define CONTENDER 0x0001 /* Adapter participates in the monitor - * contention process. - */ -#define PASS_BEACON_MAC_FRAMES 0x8000 /* Adapter passes beacon MAC frames - * to the system. - */ -#define EARLY_TOKEN_RELEASE 0x1000 /* Only valid in 16 Mbps operation; - * 0 = ETR. (no effect in 4 Mbps - * operation) - */ -#define COPY_ALL_MAC_FRAMES 0x0400 /* All MAC frames are copied to - * the system. (after OPEN: duplicate - * address test (DAT) MAC frame is - * first received frame copied to the - * system) - */ -#define COPY_ALL_NON_MAC_FRAMES 0x0200 /* All non MAC frames are copied to - * the system. - */ -#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer - * of each received frame; FrameSize - * of RPLs must contain internal - * BUFFER_SIZE bits for promiscuous mode. - */ -#define ENABLE_FULL_DUPLEX_SELECTION 0x2000 - /* Enable the use of full-duplex - * settings with bits in byte 22 in - * ocpl. (new feature in firmware - * version 3.09) - */ - -/* Full-duplex settings */ -#define OPEN_FULL_DUPLEX_OFF 0x0000 -#define OPEN_FULL_DUPLEX_ON 0x00c0 -#define OPEN_FULL_DUPLEX_AUTO 0x0080 - -#define PROD_ID_SIZE 18 /* Length of product ID. */ - -#define TX_FRAG_NUM 3 /* Number of fragments used in one TPL. */ -#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more - * fragments following. - */ - -/* XXX is there some better way to do this? */ -#define ISA_MAX_ADDRESS 0x00ffffff -#define PCI_MAX_ADDRESS 0xffffffff - -#pragma pack(1) -typedef struct { - u_int16_t OPENOptions; - u_int8_t NodeAddr[6]; /* Adapter node address; use ROM - * address - */ - u_int32_t GroupAddr; /* Multicast: high order - * bytes = 0xC000 - */ - u_int32_t FunctAddr; /* High order bytes = 0xC000 */ - __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or - * 26 bytes read by the adapter. - * (Depending on the number of - * fragments/list) - */ - __be16 TxListSize; /* TPL size */ - __be16 BufSize; /* Is automatically rounded up to the - * nearest nK boundary. - */ - u_int16_t FullDuplex; - u_int16_t Reserved; - u_int8_t TXBufMin; /* Number of adapter buffers reserved - * for transmission a minimum of 2 - * buffers must be allocated. - */ - u_int8_t TXBufMax; /* Maximum number of adapter buffers - * for transmit; a minimum of 2 buffers - * must be available for receive. - * Default: 6 - */ - u_int16_t ProdIDAddr[2];/* Pointer to product ID. */ -} OPB, *OPB_Ptr; -#pragma pack() - -/* - * SCB: adapter commands enabled by the host system started by writing - * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO - * register. (special case: | CMD_SYSTEM_IRQ for initialization) - */ -#pragma pack(1) -typedef struct { - u_int16_t CMD; /* Command code */ - u_int16_t Parm[2]; /* Pointer to Command Parameter Block */ -} SCB; /* System Command Block (32 bit physical address; big endian)*/ -#pragma pack() - -/* - * SSB: adapter command return status can be evaluated after COMMAND_STATUS - * adapter to system interrupt after reading SSB, the availability of the SSB - * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR - * in the SIFCMD IO register. - */ -#pragma pack(1) -typedef struct { - u_int16_t STS; /* Status code */ - u_int16_t Parm[3]; /* Parameter or pointer to Status Parameter - * Block. - */ -} SSB; /* System Status Block (big endian - physical address) */ -#pragma pack() - -typedef struct { - unsigned short BurnedInAddrPtr; /* Pointer to adapter burned in - * address. (BIA) - */ - unsigned short SoftwareLevelPtr;/* Pointer to software level data. */ - unsigned short AdapterAddrPtr; /* Pointer to adapter addresses. */ - unsigned short AdapterParmsPtr; /* Pointer to adapter parameters. */ - unsigned short MACBufferPtr; /* Pointer to MAC buffer. (internal) */ - unsigned short LLCCountersPtr; /* Pointer to LLC counters. */ - unsigned short SpeedFlagPtr; /* Pointer to data rate flag. - * (4/16 Mbps) - */ - unsigned short AdapterRAMPtr; /* Pointer to adapter RAM found. (KB) */ -} INTPTRS; /* Adapter internal pointers */ - -#pragma pack(1) -typedef struct { - u_int8_t Line_Error; /* Line error: code violation in - * frame or in a token, or FCS error. - */ - u_int8_t Internal_Error; /* IBM specific. (Reserved_1) */ - u_int8_t Burst_Error; - u_int8_t ARI_FCI_Error; /* ARI/FCI bit zero in AMP or - * SMP MAC frame. - */ - u_int8_t AbortDelimeters; /* IBM specific. (Reserved_2) */ - u_int8_t Reserved_3; - u_int8_t Lost_Frame_Error; /* Receive of end of transmitted - * frame failed. - */ - u_int8_t Rx_Congest_Error; /* Adapter in repeat mode has not - * enough buffer space to copy incoming - * frame. - */ - u_int8_t Frame_Copied_Error; /* ARI bit not zero in frame - * addressed to adapter. - */ - u_int8_t Frequency_Error; /* IBM specific. (Reserved_4) */ - u_int8_t Token_Error; /* (active only in monitor station) */ - u_int8_t Reserved_5; - u_int8_t DMA_Bus_Error; /* DMA bus errors not exceeding the - * abort thresholds. - */ - u_int8_t DMA_Parity_Error; /* DMA parity errors not exceeding - * the abort thresholds. - */ -} ERRORTAB; /* Adapter error counters */ -#pragma pack() - - -/*--------------------- Send and Receive definitions -------------------*/ -#pragma pack(1) -typedef struct { - __be16 DataCount; /* Value 0, even and odd values are - * permitted; value is unaltered most - * significant bit set: following - * fragments last fragment: most - * significant bit is not evaluated. - * (???) - */ - __be32 DataAddr; /* Pointer to frame data fragment; - * even or odd. - */ -} Fragment; -#pragma pack() - -#define MAX_FRAG_NUMBERS 9 /* Maximal number of fragments possible to use - * in one RPL/TPL. (depending on TI firmware - * version) - */ - -/* - * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504 - * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176, - * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide - * Page 2-27. - */ -#define HEADER_SIZE (1 + 1 + 6 + 6) -#define SRC_SIZE 18 -#define MIN_DATA_SIZE 516 -#define DEFAULT_DATA_SIZE 4472 -#define MAX_DATA_SIZE 17800 - -#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE) -#define MIN_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE) -#define MAX_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE) - -/* - * Macros to deal with the frame status field. - */ -#define AC_NOT_RECOGNIZED 0x00 -#define GROUP_BIT 0x80 -#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8)) -#define GET_FRAME_STATUS_HIGH_AC(Fs) ((unsigned char)(((Fs) & 0xC0) >> 6)) -#define GET_FRAME_STATUS_LOW_AC(Fs) ((unsigned char)(((Fs) & 0x0C) >> 2)) -#define DIRECTED_FRAME(Context) (!((Context)->MData[2] & GROUP_BIT)) - - -/*--------------------- Send Functions ---------------------------------*/ -/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */ - -#define TX_VALID 0x0080 /* R: set via TRANSMIT.VALID interrupt. - * C: always reset to zero! - */ -#define TX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero. - * C: set to one. - */ -#define TX_START_FRAME 0x0020 /* R: start of a frame: 1 - * C: unchanged. - */ -#define TX_END_FRAME 0x0010 /* R: end of a frame: 1 - * C: unchanged. - */ -#define TX_FRAME_IRQ 0x0008 /* R: request interrupt generation - * after transmission. - * C: unchanged. - */ -#define TX_ERROR 0x0004 /* R: reserved. - * C: set to one if Error occurred. - */ -#define TX_INTERFRAME_WAIT 0x0004 -#define TX_PASS_CRC 0x0002 /* R: set if CRC value is already - * calculated. (valid only in - * FRAME_START TPL) - * C: unchanged. - */ -#define TX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame - * source address and does not overwrite - * with the adapter node address. - * (valid only in FRAME_START TPL) - * - * C: unchanged. - */ -#define TX_STRIP_FS 0xFF00 /* R: reserved. - * C: if no Transmission Error, - * field contains copy of FS byte after - * stripping of frame. - */ - -/* - * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL, - * but possibly multiple TPLs for one frame) the length of the TPLs has to be - * initialized in the OPL. (OPEN parameter list) - */ -#define TPL_NUM 3 /* Number of Transmit Parameter Lists. - * !! MUST BE >= 3 !! - */ - -#pragma pack(1) -typedef struct s_TPL TPL; - -struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ - __be32 NextTPLAddr; /* Pointer to next TPL in chain; if - * pointer is odd: this is the last - * TPL. Pointing to itself can cause - * problems! - */ - volatile u_int16_t Status; /* Initialized by the adapter: - * CSTAT_REQUEST important: update least - * significant bit first! Set by the - * adapter: CSTAT_COMPLETE status. - */ - __be16 FrameSize; /* Number of bytes to be transmitted - * as a frame including AC/FC, - * Destination, Source, Routing field - * not including CRC, FS, End Delimiter - * (valid only if START_FRAME bit in - * CSTAT nonzero) must not be zero in - * any list; maximum value: (BUFFER_SIZE - * - 8) * TX_BUF_MAX sum of DataCount - * values in FragmentList must equal - * Frame_Size value in START_FRAME TPL! - * frame data fragment list. - */ - - /* TPL/RPL size in OPEN parameter list depending on maximal - * numbers of fragments used in one parameter list. - */ - Fragment FragList[TX_FRAG_NUM]; /* Maximum: nine frame fragments in one - * TPL actual version of firmware: 9 - * fragments possible. - */ -#pragma pack() - - /* Special proprietary data and precalculations */ - - TPL *NextTPLPtr; /* Pointer to next TPL in chain. */ - unsigned char *MData; - struct sk_buff *Skb; - unsigned char TPLIndex; - volatile unsigned char BusyFlag;/* Flag: TPL busy? */ - dma_addr_t DMABuff; /* DMA IO bus address from dma_map */ -}; - -/* ---------------------Receive Functions-------------------------------* - * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values. - * (high-low) - */ -#define RX_VALID 0x0080 /* R: set; tell adapter with - * RECEIVE.VALID interrupt. - * C: reset to zero. - */ -#define RX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero, - * C: set to one. - */ -#define RX_START_FRAME 0x0020 /* R: must be reset to zero. - * C: set to one on the list. - */ -#define RX_END_FRAME 0x0010 /* R: must be reset to zero. - * C: set to one on the list - * that ends the frame. - */ -#define RX_FRAME_IRQ 0x0008 /* R: request interrupt generation - * after receive. - * C: unchanged. - */ -#define RX_INTERFRAME_WAIT 0x0004 /* R: after receiving a frame: - * interrupt and wait for a - * RECEIVE.CONTINUE. - * C: unchanged. - */ -#define RX_PASS_CRC 0x0002 /* R: if set, the adapter includes - * the CRC in data passed. (last four - * bytes; valid only if FRAME_START is - * set) - * C: set, if CRC is included in - * received data. - */ -#define RX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame - * source address and does not - * overwrite with the adapter node - * address. (valid only if FRAME_START - * is set) - * C: unchanged. - */ -#define RX_RECEIVE_FS 0xFC00 /* R: reserved; must be reset to zero. - * C: on lists with START_FRAME, field - * contains frame status field from - * received frame; otherwise cleared. - */ -#define RX_ADDR_MATCH 0x0300 /* R: reserved; must be reset to zero. - * C: address match code mask. - */ -#define RX_STATUS_MASK 0x00FF /* Mask for receive status bits. */ - -#define RX_INTERN_ADDR_MATCH 0x0100 /* C: internally address match. */ -#define RX_EXTERN_ADDR_MATCH 0x0200 /* C: externally matched via - * XMATCH/XFAIL interface. - */ -#define RX_INTEXT_ADDR_MATCH 0x0300 /* C: internally and externally - * matched. - */ -#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */ - -/* Constants for Command Status Interrupt. - * COMMAND_REJECT status field bit functions (SSB.Parm[0]) - */ -#define ILLEGAL_COMMAND 0x0080 /* Set if an unknown command - * is issued to the adapter - */ -#define ADDRESS_ERROR 0x0040 /* Set if any address field in - * the SCB is odd. (not word aligned) - */ -#define ADAPTER_OPEN 0x0020 /* Command issued illegal with - * open adapter. - */ -#define ADAPTER_CLOSE 0x0010 /* Command issued illegal with - * closed adapter. - */ -#define SAME_COMMAND 0x0008 /* Command issued with same command - * already executing. - */ - -/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */ -#define NODE_ADDR_ERROR 0x0040 /* Wrong address or BIA read - * zero address. - */ -#define LIST_SIZE_ERROR 0x0020 /* If List_Size value not in 0, - * 14, 20, 26. - */ -#define BUF_SIZE_ERROR 0x0010 /* Not enough available memory for - * two buffers. - */ -#define TX_BUF_COUNT_ERROR 0x0004 /* Remaining receive buffers less than - * two. - */ -#define OPEN_ERROR 0x0002 /* Error during ring insertion; more - * information in bits 8-15. - */ - -/* Standard return codes */ -#define GOOD_COMPLETION 0x0080 /* =OPEN_SUCCESSFULL */ -#define INVALID_OPEN_OPTION 0x0001 /* OPEN options are not supported by - * the adapter. - */ - -/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB) */ -#define OPEN_PHASES_MASK 0xF000 /* Check only the bits 8-11. */ -#define LOBE_MEDIA_TEST 0x1000 -#define PHYSICAL_INSERTION 0x2000 -#define ADDRESS_VERIFICATION 0x3000 -#define PARTICIPATION_IN_RING_POLL 0x4000 -#define REQUEST_INITIALISATION 0x5000 -#define FULLDUPLEX_CHECK 0x6000 - -/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */ -#define OPEN_ERROR_CODES_MASK 0x0F00 /* Check only the bits 12-15. */ -#define OPEN_FUNCTION_FAILURE 0x0100 /* Unable to transmit to itself or - * frames received before insertion. - */ -#define OPEN_SIGNAL_LOSS 0x0200 /* Signal loss condition detected at - * receiver. - */ -#define OPEN_TIMEOUT 0x0500 /* Insertion timer expired before - * logical insertion. - */ -#define OPEN_RING_FAILURE 0x0600 /* Unable to receive own ring purge - * MAC frames. - */ -#define OPEN_RING_BEACONING 0x0700 /* Beacon MAC frame received after - * ring insertion. - */ -#define OPEN_DUPLICATE_NODEADDR 0x0800 /* Other station in ring found - * with the same address. - */ -#define OPEN_REQUEST_INIT 0x0900 /* RPS present but does not respond. */ -#define OPEN_REMOVE_RECEIVED 0x0A00 /* Adapter received a remove adapter - * MAC frame. - */ -#define OPEN_FULLDUPLEX_SET 0x0D00 /* Got this with full duplex on when - * trying to connect to a normal ring. - */ - -/* SET_BRIDGE_PARMS return codes: */ -#define BRIDGE_INVALID_MAX_LEN 0x4000 /* MAX_ROUTING_FIELD_LENGTH odd, - * less than 6 or > 30. - */ -#define BRIDGE_INVALID_SRC_RING 0x2000 /* SOURCE_RING number zero, too large - * or = TARGET_RING. - */ -#define BRIDGE_INVALID_TRG_RING 0x1000 /* TARGET_RING number zero, too large - * or = SOURCE_RING. - */ -#define BRIDGE_INVALID_BRDGE_NO 0x0800 /* BRIDGE_NUMBER too large. */ -#define BRIDGE_INVALID_OPTIONS 0x0400 /* Invalid bridge options. */ -#define BRIDGE_DIAGS_FAILED 0x0200 /* Diagnostics of TMS380SRA failed. */ -#define BRIDGE_NO_SRA 0x0100 /* The TMS380SRA does not exist in HW - * configuration. - */ - -/* - * Bring Up Diagnostics error codes. - */ -#define BUD_INITIAL_ERROR 0x0 -#define BUD_CHECKSUM_ERROR 0x1 -#define BUD_ADAPTER_RAM_ERROR 0x2 -#define BUD_INSTRUCTION_ERROR 0x3 -#define BUD_CONTEXT_ERROR 0x4 -#define BUD_PROTOCOL_ERROR 0x5 -#define BUD_INTERFACE_ERROR 0x6 - -/* BUD constants */ -#define BUD_MAX_RETRIES 3 -#define BUD_MAX_LOOPCNT 6 -#define BUD_TIMEOUT 3000 - -/* Initialization constants */ -#define INIT_MAX_RETRIES 3 /* Maximum three retries. */ -#define INIT_MAX_LOOPCNT 22 /* Maximum loop counts. */ - -/* RING STATUS field values (high/low) */ -#define SIGNAL_LOSS 0x0080 /* Loss of signal on the ring - * detected. - */ -#define HARD_ERROR 0x0040 /* Transmitting or receiving beacon - * frames. - */ -#define SOFT_ERROR 0x0020 /* Report error MAC frame - * transmitted. - */ -#define TRANSMIT_BEACON 0x0010 /* Transmitting beacon frames on the - * ring. - */ -#define LOBE_WIRE_FAULT 0x0008 /* Open or short circuit in the - * cable to concentrator; adapter - * closed. - */ -#define AUTO_REMOVAL_ERROR 0x0004 /* Lobe wrap test failed, deinserted; - * adapter closed. - */ -#define REMOVE_RECEIVED 0x0001 /* Received a remove ring station MAC - * MAC frame request; adapter closed. - */ -#define COUNTER_OVERFLOW 0x8000 /* Overflow of one of the adapters - * error counters; READ.ERROR.LOG. - */ -#define SINGLE_STATION 0x4000 /* Adapter is the only station on the - * ring. - */ -#define RING_RECOVERY 0x2000 /* Claim token MAC frames on the ring; - * reset after ring purge frame. - */ - -#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\ - REMOVE_RECEIVED) - -/* Adapter_check_block.Status field bit assignments: */ -#define DIO_PARITY 0x8000 /* Adapter detects bad parity - * through direct I/O access. - */ -#define DMA_READ_ABORT 0x4000 /* Aborting DMA read operation - * from system Parm[0]: 0=timeout, - * 1=parity error, 2=bus error; - * Parm[1]: 32 bit pointer to host - * system address at failure. - */ -#define DMA_WRITE_ABORT 0x2000 /* Aborting DMA write operation - * to system. (parameters analogous to - * DMA_READ_ABORT) - */ -#define ILLEGAL_OP_CODE 0x1000 /* Illegal operation code in the - * the adapters firmware Parm[0]-2: - * communications processor registers - * R13-R15. - */ -#define PARITY_ERRORS 0x0800 /* Adapter detects internal bus - * parity error. - */ -#define RAM_DATA_ERROR 0x0080 /* Valid only during RAM testing; - * RAM data error Parm[0-1]: 32 bit - * pointer to RAM location. - */ -#define RAM_PARITY_ERROR 0x0040 /* Valid only during RAM testing; - * RAM parity error Parm[0-1]: 32 bit - * pointer to RAM location. - */ -#define RING_UNDERRUN 0x0020 /* Internal DMA underrun when - * transmitting onto ring. - */ -#define INVALID_IRQ 0x0008 /* Unrecognized interrupt generated - * internal to adapter Parm[0-2]: - * adapter register R13-R15. - */ -#define INVALID_ERROR_IRQ 0x0004 /* Unrecognized error interrupt - * generated Parm[0-2]: adapter register - * R13-R15. - */ -#define INVALID_XOP 0x0002 /* Unrecognized XOP request in - * communication processor Parm[0-2]: - * adapter register R13-R15. - */ -#define CHECKADDR 0x05E0 /* Adapter check status information - * address offset. - */ -#define ROM_PAGE_0 0x0000 /* Adapter ROM page 0. */ - -/* - * RECEIVE.STATUS interrupt result SSB values: (high-low) - * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0]) - */ -#define RX_COMPLETE 0x0080 /* SSB.Parm[0]; SSB.Parm[1]: 32 - * bit pointer to last RPL. - */ -#define RX_SUSPENDED 0x0040 /* SSB.Parm[0]; SSB.Parm[1]: 32 - * bit pointer to RPL with odd - * forward pointer. - */ - -/* Valid receive CSTAT: */ -#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \ - RX_FRAME_COMPLETE) -#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \ - RX_FRAME_COMPLETE) - -typedef enum SKB_STAT SKB_STAT; -enum SKB_STAT { - SKB_UNAVAILABLE, - SKB_DMA_DIRECT, - SKB_DATA_COPY -}; - -/* Receive Parameter List (RPL) The length of the RPLs has to be initialized - * in the OPL. (OPEN parameter list) - */ -#define RPL_NUM 3 - -#define RX_FRAG_NUM 1 /* Maximal number of used fragments in one RPL. - * (up to firmware v2.24: 3, now: up to 9) - */ - -#pragma pack(1) -typedef struct s_RPL RPL; -struct s_RPL { /* Receive Parameter List */ - __be32 NextRPLAddr; /* Pointer to next RPL in chain - * (normalized = physical 32 bit - * address) if pointer is odd: this - * is last RPL. Pointing to itself can - * cause problems! - */ - volatile u_int16_t Status; /* Set by creation of Receive Parameter - * List RECEIVE_CSTAT_COMPLETE set by - * adapter in lists that start or end - * a frame. - */ - volatile __be16 FrameSize; /* Number of bytes received as a - * frame including AC/FC, Destination, - * Source, Routing field not including - * CRC, FS (Frame Status), End Delimiter - * (valid only if START_FRAME bit in - * CSTAT nonzero) must not be zero in - * any list; maximum value: (BUFFER_SIZE - * - 8) * TX_BUF_MAX sum of DataCount - * values in FragmentList must equal - * Frame_Size value in START_FRAME TPL! - * frame data fragment list - */ - - /* TPL/RPL size in OPEN parameter list depending on maximal numbers - * of fragments used in one parameter list. - */ - Fragment FragList[RX_FRAG_NUM]; /* Maximum: nine frame fragments in - * one TPL. Actual version of firmware: - * 9 fragments possible. - */ -#pragma pack() - - /* Special proprietary data and precalculations. */ - RPL *NextRPLPtr; /* Logical pointer to next RPL in chain. */ - unsigned char *MData; - struct sk_buff *Skb; - SKB_STAT SkbStat; - int RPLIndex; - dma_addr_t DMABuff; /* DMA IO bus address from dma_map */ -}; - -/* Information that need to be kept for each board. */ -typedef struct net_local { -#pragma pack(1) - IPB ipb; /* Initialization Parameter Block. */ - SCB scb; /* System Command Block: system to adapter - * communication. - */ - SSB ssb; /* System Status Block: adapter to system - * communication. - */ - OPB ocpl; /* Open Options Parameter Block. */ - - ERRORTAB errorlogtable; /* Adapter statistic error counters. - * (read from adapter memory) - */ - unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */ -#pragma pack() - - TPL Tpl[TPL_NUM]; - TPL *TplFree; - TPL *TplBusy; - unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE]; - - RPL Rpl[RPL_NUM]; - RPL *RplHead; - RPL *RplTail; - unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE]; - - struct device *pdev; - int DataRate; - unsigned char ScbInUse; - unsigned short CMDqueue; - - unsigned long AdapterOpenFlag:1; - unsigned long AdapterVirtOpenFlag:1; - unsigned long OpenCommandIssued:1; - unsigned long TransmitCommandActive:1; - unsigned long TransmitHaltScheduled:1; - unsigned long HaltInProgress:1; - unsigned long LobeWireFaultLogged:1; - unsigned long ReOpenInProgress:1; - unsigned long Sleeping:1; - - unsigned long LastOpenStatus; - unsigned short CurrentRingStatus; - unsigned long MaxPacketSize; - - unsigned long StartTime; - unsigned long LastSendTime; - - struct tr_statistics MacStat; /* MAC statistics structure */ - - unsigned long dmalimit; /* the max DMA address (ie, ISA) */ - dma_addr_t dmabuffer; /* the DMA bus address corresponding to - priv. Might be different from virt_to_bus() - for architectures with IO MMU (Alpha) */ - - struct timer_list timer; - - wait_queue_head_t wait_for_tok_int; - - INTPTRS intptrs; /* Internal adapter pointer. Must be read - * before OPEN command. - */ - unsigned short (*setnselout)(struct net_device *); - unsigned short (*sifreadb)(struct net_device *, unsigned short); - void (*sifwriteb)(struct net_device *, unsigned short, unsigned short); - unsigned short (*sifreadw)(struct net_device *, unsigned short); - void (*sifwritew)(struct net_device *, unsigned short, unsigned short); - - spinlock_t lock; /* SMP protection */ - void *tmspriv; -} NET_LOCAL; - -#endif /* __KERNEL__ */ -#endif /* __LINUX_TMS380TR_H */ diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c deleted file mode 100644 index fb9918da579..00000000000 --- a/drivers/net/tokenring/tmspci.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * tmspci.c: A generic network driver for TMS380-based PCI token ring cards. - * - * Written 1999 by Adam Fritzler - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver module supports the following cards: - * - SysKonnect TR4/16(+) PCI (SK-4590) - * - SysKonnect TR4/16 PCI (SK-4591) - * - Compaq TR 4/16 PCI - * - Thomas-Conrad TC4048 4/16 PCI - * - 3Com 3C339 Token Link Velocity - * - * Maintainer(s): - * AF Adam Fritzler - * - * Modification History: - * 30-Dec-99 AF Split off from the tms380tr driver. - * 22-Jan-00 AF Updated to use indirect read/writes - * 23-Nov-00 JG New PCI API, cleanups - * - * TODO: - * 1. See if we can use MMIO instead of port accesses - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#include "tms380tr.h" - -static char version[] __devinitdata = -"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n"; - -#define TMS_PCI_IO_EXTENT 32 - -struct card_info { - unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */ - char *name; -}; - -static struct card_info card_info_table[] = { - { {0x03, 0x01}, "Compaq 4/16 TR PCI"}, - { {0x03, 0x01}, "SK NET TR 4/16 PCI"}, - { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"}, - { {0x03, 0x01}, "3Com Token Link Velocity"}, -}; - -static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = { - { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, - { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, - { } /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl); - -MODULE_LICENSE("GPL"); - -static void tms_pci_read_eeprom(struct net_device *dev); -static unsigned short tms_pci_setnselout_pins(struct net_device *dev); - -static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg) -{ - return inb(dev->base_addr + reg); -} - -static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg) -{ - return inw(dev->base_addr + reg); -} - -static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outb(val, dev->base_addr + reg); -} - -static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) -{ - outw(val, dev->base_addr + reg); -} - -static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - static int versionprinted; - struct net_device *dev; - struct net_local *tp; - int ret; - unsigned int pci_irq_line; - unsigned long pci_ioaddr; - struct card_info *cardinfo = &card_info_table[ent->driver_data]; - - if (versionprinted++ == 0) - printk("%s", version); - - if (pci_enable_device(pdev)) - return -EIO; - - /* Remove I/O space marker in bit 0. */ - pci_irq_line = pdev->irq; - pci_ioaddr = pci_resource_start (pdev, 0); - - /* At this point we have found a valid card. */ - dev = alloc_trdev(sizeof(struct net_local)); - if (!dev) - return -ENOMEM; - - if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) { - ret = -EBUSY; - goto err_out_trdev; - } - - dev->base_addr = pci_ioaddr; - dev->irq = pci_irq_line; - dev->dma = 0; - - dev_info(&pdev->dev, "%s\n", cardinfo->name); - dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq); - - tms_pci_read_eeprom(dev); - - dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr); - - ret = tmsdev_init(dev, &pdev->dev); - if (ret) { - dev_info(&pdev->dev, "unable to get memory for dev->priv.\n"); - goto err_out_region; - } - - tp = netdev_priv(dev); - tp->setnselout = tms_pci_setnselout_pins; - - tp->sifreadb = tms_pci_sifreadb; - tp->sifreadw = tms_pci_sifreadw; - tp->sifwriteb = tms_pci_sifwriteb; - tp->sifwritew = tms_pci_sifwritew; - - memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1); - - tp->tmspriv = cardinfo; - - dev->netdev_ops = &tms380tr_netdev_ops; - - ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED, - dev->name, dev); - if (ret) - goto err_out_tmsdev; - - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - ret = register_netdev(dev); - if (ret) - goto err_out_irq; - - return 0; - -err_out_irq: - free_irq(pdev->irq, dev); -err_out_tmsdev: - pci_set_drvdata(pdev, NULL); - tmsdev_term(dev); -err_out_region: - release_region(pci_ioaddr, TMS_PCI_IO_EXTENT); -err_out_trdev: - free_netdev(dev); - return ret; -} - -/* - * Reads MAC address from adapter RAM, which should've read it from - * the onboard ROM. - * - * Calling this on a board that does not support it can be a very - * dangerous thing. The Madge board, for instance, will lock your - * machine hard when this is called. Luckily, its supported in a - * separate driver. --ASF - */ -static void tms_pci_read_eeprom(struct net_device *dev) -{ - int i; - - /* Address: 0000:0000 */ - tms_pci_sifwritew(dev, 0, SIFADX); - tms_pci_sifwritew(dev, 0, SIFADR); - - /* Read six byte MAC address data */ - dev->addr_len = 6; - for(i = 0; i < 6; i++) - dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8; -} - -static unsigned short tms_pci_setnselout_pins(struct net_device *dev) -{ - unsigned short val = 0; - struct net_local *tp = netdev_priv(dev); - struct card_info *cardinfo = tp->tmspriv; - - if(tp->DataRate == SPEED_4) - val |= cardinfo->nselout[0]; /* Set 4Mbps */ - else - val |= cardinfo->nselout[1]; /* Set 16Mbps */ - return val; -} - -static void __devexit tms_pci_detach (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - BUG_ON(!dev); - unregister_netdev(dev); - release_region(dev->base_addr, TMS_PCI_IO_EXTENT); - free_irq(dev->irq, dev); - tmsdev_term(dev); - free_netdev(dev); - pci_set_drvdata(pdev, NULL); -} - -static struct pci_driver tms_pci_driver = { - .name = "tmspci", - .id_table = tmspci_pci_tbl, - .probe = tms_pci_attach, - .remove = __devexit_p(tms_pci_detach), -}; - -static int __init tms_pci_init (void) -{ - return pci_register_driver(&tms_pci_driver); -} - -static void __exit tms_pci_rmmod (void) -{ - pci_unregister_driver (&tms_pci_driver); -} - -module_init(tms_pci_init); -module_exit(tms_pci_rmmod); - diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bb8c72c79c6..987aeefbc77 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -313,7 +313,7 @@ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) /* Exact match */ for (i = 0; i < filter->count; i++) - if (!compare_ether_addr(eh->h_dest, filter->addr[i])) + if (ether_addr_equal(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 425e201f597..fffee6aee8b 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -486,6 +486,7 @@ static const struct driver_info wwan_info = { #define HUAWEI_VENDOR_ID 0x12D1 #define NOVATEL_VENDOR_ID 0x1410 +#define ZTE_VENDOR_ID 0x19D2 static const struct usb_device_id products [] = { /* @@ -618,6 +619,61 @@ static const struct usb_device_id products [] = { .bInterfaceProtocol = USB_CDC_PROTO_NONE, .driver_info = (unsigned long)&wwan_info, }, { + /* ZTE (Vodafone) K3805-Z */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ZTE_VENDOR_ID, + .idProduct = 0x1003, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { + /* ZTE (Vodafone) K3806-Z */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ZTE_VENDOR_ID, + .idProduct = 0x1015, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { + /* ZTE (Vodafone) K4510-Z */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ZTE_VENDOR_ID, + .idProduct = 0x1173, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { + /* ZTE (Vodafone) K3770-Z */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ZTE_VENDOR_ID, + .idProduct = 0x1177, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { + /* ZTE (Vodafone) K3772-Z */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ZTE_VENDOR_ID, + .idProduct = 0x1181, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d316503b35d..63cfd0b2c31 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -356,10 +356,19 @@ static const struct driver_info qmi_wwan_gobi = { }; /* ZTE suck at making USB descriptors */ +static const struct driver_info qmi_wwan_force_int1 = { + .description = "Qualcomm WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind_shared, + .unbind = qmi_wwan_unbind_shared, + .manage_power = qmi_wwan_manage_power, + .data = BIT(1), /* interface whitelist bitmap */ +}; + static const struct driver_info qmi_wwan_force_int4 = { - .description = "Qualcomm Gobi wwan/QMI device", + .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_gobi, + .bind = qmi_wwan_bind_shared, .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, .data = BIT(4), /* interface whitelist bitmap */ @@ -401,6 +410,14 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = HUAWEI_VENDOR_ID, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* Huawei E392, E398 and possibly others in "Windows mode" * using a combined control and data interface without any CDC * functional descriptors @@ -430,6 +447,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE (Vodafone) K3520-Z */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x0055, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int1, + }, { /* ZTE (Vodafone) K3565-Z */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19d2, @@ -457,6 +483,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE (Vodafone) K3765-Z */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x2002, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int4, + }, { /* ZTE (Vodafone) K4505-Z */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19d2, diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index c8f1b5b3aff..0d746b3fdef 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -77,7 +77,9 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, if (dev->driver_info->indication) { dev->driver_info->indication(dev, msg, buflen); } else { - switch (msg->status) { + u32 status = le32_to_cpu(msg->status); + + switch (status) { case RNDIS_STATUS_MEDIA_CONNECT: dev_info(udev, "rndis media connect\n"); break; @@ -85,8 +87,7 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, dev_info(udev, "rndis media disconnect\n"); break; default: - dev_info(udev, "rndis indication: 0x%08x\n", - le32_to_cpu(msg->status)); + dev_info(udev, "rndis indication: 0x%08x\n", status); } } } @@ -109,16 +110,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) int retval; int partial; unsigned count; - __le32 rsp; - u32 xid = 0, msg_len, request_id; + u32 xid = 0, msg_len, request_id, msg_type, rsp, + status; /* REVISIT when this gets called from contexts other than probe() or * disconnect(): either serialize, or dispatch responses on xid */ + msg_type = le32_to_cpu(buf->msg_type); + /* Issue the request; xid is unique, don't bother byteswapping it */ - if (likely(buf->msg_type != RNDIS_MSG_HALT && - buf->msg_type != RNDIS_MSG_RESET)) { + if (likely(msg_type != RNDIS_MSG_HALT && msg_type != RNDIS_MSG_RESET)) { xid = dev->xid++; if (!xid) xid = dev->xid++; @@ -149,7 +151,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) } /* Poll the control channel; the request probably completed immediately */ - rsp = buf->msg_type | RNDIS_MSG_COMPLETION; + rsp = le32_to_cpu(buf->msg_type) | RNDIS_MSG_COMPLETION; for (count = 0; count < 10; count++) { memset(buf, 0, CONTROL_BUFFER_SIZE); retval = usb_control_msg(dev->udev, @@ -160,35 +162,36 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) buf, buflen, RNDIS_CONTROL_TIMEOUT_MS); if (likely(retval >= 8)) { + msg_type = le32_to_cpu(buf->msg_type); msg_len = le32_to_cpu(buf->msg_len); + status = le32_to_cpu(buf->status); request_id = (__force u32) buf->request_id; - if (likely(buf->msg_type == rsp)) { + if (likely(msg_type == rsp)) { if (likely(request_id == xid)) { if (unlikely(rsp == RNDIS_MSG_RESET_C)) return 0; - if (likely(RNDIS_STATUS_SUCCESS - == buf->status)) + if (likely(RNDIS_STATUS_SUCCESS == + status)) return 0; dev_dbg(&info->control->dev, "rndis reply status %08x\n", - le32_to_cpu(buf->status)); + status); return -EL3RST; } dev_dbg(&info->control->dev, "rndis reply id %d expected %d\n", request_id, xid); /* then likely retry */ - } else switch (buf->msg_type) { - case RNDIS_MSG_INDICATE: /* fault/event */ + } else switch (msg_type) { + case RNDIS_MSG_INDICATE: /* fault/event */ rndis_msg_indicate(dev, (void *)buf, buflen); - break; - case RNDIS_MSG_KEEPALIVE: { /* ping */ + case RNDIS_MSG_KEEPALIVE: { /* ping */ struct rndis_keepalive_c *msg = (void *)buf; - msg->msg_type = RNDIS_MSG_KEEPALIVE_C; + msg->msg_type = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C); msg->msg_len = cpu_to_le32(sizeof *msg); - msg->status = RNDIS_STATUS_SUCCESS; + msg->status = cpu_to_le32(RNDIS_STATUS_SUCCESS); retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, @@ -236,7 +239,7 @@ EXPORT_SYMBOL_GPL(rndis_command); * ActiveSync 4.1 Windows driver. */ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, - void *buf, __le32 oid, u32 in_len, + void *buf, u32 oid, u32 in_len, void **reply, int *reply_len) { int retval; @@ -251,9 +254,9 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, u.buf = buf; memset(u.get, 0, sizeof *u.get + in_len); - u.get->msg_type = RNDIS_MSG_QUERY; + u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY); u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); - u.get->oid = oid; + u.get->oid = cpu_to_le32(oid); u.get->len = cpu_to_le32(in_len); u.get->offset = cpu_to_le32(20); @@ -324,7 +327,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) if (retval < 0) goto fail; - u.init->msg_type = RNDIS_MSG_INIT; + u.init->msg_type = cpu_to_le32(RNDIS_MSG_INIT); u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); @@ -395,22 +398,23 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) /* Check physical medium */ phym = NULL; reply_len = sizeof *phym; - retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, - 0, (void **) &phym, &reply_len); + retval = rndis_query(dev, intf, u.buf, + RNDIS_OID_GEN_PHYSICAL_MEDIUM, + 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ - phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; + phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && - *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { + le32_to_cpup(phym) != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires wireless physical medium, but device is not\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && - *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { + le32_to_cpup(phym) == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires non-wireless physical medium, but device is wireless.\n"); retval = -ENODEV; @@ -419,8 +423,9 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) /* Get designated host ethernet address */ reply_len = ETH_ALEN; - retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, - 48, (void **) &bp, &reply_len); + retval = rndis_query(dev, intf, u.buf, + RNDIS_OID_802_3_PERMANENT_ADDRESS, + 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; @@ -430,12 +435,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); - u.set->msg_type = RNDIS_MSG_SET; + u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET); u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); - u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; + u.set->oid = cpu_to_le32(RNDIS_OID_GEN_CURRENT_PACKET_FILTER); u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); - *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; + *(__le32 *)(u.buf + sizeof *u.set) = cpu_to_le32(RNDIS_DEFAULT_FILTER); retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { @@ -450,7 +455,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); - u.halt->msg_type = RNDIS_MSG_HALT; + u.halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT); u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: @@ -475,7 +480,7 @@ void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) /* try to clear any rndis state/activity (no i/o from stack!) */ halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (halt) { - halt->msg_type = RNDIS_MSG_HALT; + halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT); halt->msg_len = cpu_to_le32(sizeof *halt); (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); kfree(halt); @@ -494,16 +499,16 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; struct sk_buff *skb2; - u32 msg_len, data_offset, data_len; + u32 msg_type, msg_len, data_offset, data_len; + msg_type = le32_to_cpu(hdr->msg_type); msg_len = le32_to_cpu(hdr->msg_len); data_offset = le32_to_cpu(hdr->data_offset); data_len = le32_to_cpu(hdr->data_len); /* don't choke if we see oob, per-packet data, etc */ - if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || - skb->len < msg_len || - (data_offset + data_len + 8) > msg_len)) { + if (unlikely(msg_type != RNDIS_MSG_PACKET || skb->len < msg_len + || (data_offset + data_len + 8) > msg_len)) { dev->net->stats.rx_frame_errors++; netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n", le32_to_cpu(hdr->msg_type), @@ -569,7 +574,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) fill: hdr = (void *) __skb_push(skb, sizeof *hdr); memset(hdr, 0, sizeof *hdr); - hdr->msg_type = RNDIS_MSG_PACKET; + hdr->msg_type = cpu_to_le32(RNDIS_MSG_PACKET); hdr->msg_len = cpu_to_le32(skb->len); hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); hdr->data_len = cpu_to_le32(len); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 00103a8c5e0..fb1a087b101 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -508,10 +508,9 @@ static int smsc75xx_link_reset(struct usbnet *dev) u16 lcladv, rmtadv; int ret; - /* read and write to clear phy interrupt status */ - ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); - check_warn_return(ret, "Error reading PHY_INT_SRC"); - smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, 0xffff); + /* write to clear phy interrupt status */ + smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, + PHY_INT_SRC_CLEAR_ALL); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); check_warn_return(ret, "Error writing INT_STS"); @@ -904,15 +903,20 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); - /* Configure GPIO pins as LED outputs */ - ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); - check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); + ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); + check_warn_return(ret, "Failed to read E2P_CMD: %d", ret); - buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); - buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; + /* only set default GPIO/LED settings if no EEPROM is detected */ + if (!(buf & E2P_CMD_LOADED)) { + ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); + check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); - ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); - check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); + buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); + buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; + + ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); + check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); + } ret = smsc75xx_write_reg(dev, FLOW, 0); check_warn_return(ret, "Failed to write FLOW: %d", ret); diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h index 16e98c77834..67eba39e6ee 100644 --- a/drivers/net/usb/smsc75xx.h +++ b/drivers/net/usb/smsc75xx.h @@ -388,6 +388,7 @@ #define PHY_INT_SRC_ANEG_COMP ((u16)0x0040) #define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020) #define PHY_INT_SRC_LINK_DOWN ((u16)0x0010) +#define PHY_INT_SRC_CLEAR_ALL ((u16)0xffff) #define PHY_INT_MASK (30) #define PHY_INT_MASK_ENERGY_ON ((u16)0x0080) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index b38db48b1ce..9f58330f131 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -909,6 +909,7 @@ static const struct ethtool_ops usbnet_ethtool_ops = { .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cbefe671bcc..9ce6995e8d0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -66,12 +66,21 @@ struct virtnet_info { /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; + /* enable config space updates */ + bool config_enable; + /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; + /* Work struct for config space updates */ + struct work_struct config_work; + + /* Lock for config space updates */ + struct mutex config_lock; + /* Chain pages by the private ptr. */ struct page *pages; @@ -782,6 +791,16 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, return status == VIRTIO_NET_OK; } +static void virtnet_ack_link_announce(struct virtnet_info *vi) +{ + rtnl_lock(); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, + VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, + 0, 0)) + dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); + rtnl_unlock(); +} + static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -953,20 +972,31 @@ static const struct net_device_ops virtnet_netdev = { #endif }; -static void virtnet_update_status(struct virtnet_info *vi) +static void virtnet_config_changed_work(struct work_struct *work) { + struct virtnet_info *vi = + container_of(work, struct virtnet_info, config_work); u16 v; + mutex_lock(&vi->config_lock); + if (!vi->config_enable) + goto done; + if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) - return; + goto done; + + if (v & VIRTIO_NET_S_ANNOUNCE) { + netif_notify_peers(vi->dev); + virtnet_ack_link_announce(vi); + } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) - return; + goto done; vi->status = v; @@ -977,13 +1007,15 @@ static void virtnet_update_status(struct virtnet_info *vi) netif_carrier_off(vi->dev); netif_stop_queue(vi->dev); } +done: + mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - virtnet_update_status(vi); + queue_work(system_nrt_wq, &vi->config_work); } static int init_vqs(struct virtnet_info *vi) @@ -1077,6 +1109,9 @@ static int virtnet_probe(struct virtio_device *vdev) goto free; INIT_DELAYED_WORK(&vi->refill, refill_work); + mutex_init(&vi->config_lock); + vi->config_enable = true; + INIT_WORK(&vi->config_work, virtnet_config_changed_work); sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); @@ -1112,7 +1147,7 @@ static int virtnet_probe(struct virtio_device *vdev) otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); - virtnet_update_status(vi); + queue_work(system_nrt_wq, &vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); @@ -1171,10 +1206,17 @@ static void __devexit virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; + /* Prevent config work handler from accessing the device. */ + mutex_lock(&vi->config_lock); + vi->config_enable = false; + mutex_unlock(&vi->config_lock); + unregister_netdev(vi->dev); remove_vq_common(vi); + flush_work(&vi->config_work); + free_percpu(vi->stats); free_netdev(vi->dev); } @@ -1184,6 +1226,11 @@ static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; + /* Prevent config work handler from accessing the device */ + mutex_lock(&vi->config_lock); + vi->config_enable = false; + mutex_unlock(&vi->config_lock); + virtqueue_disable_cb(vi->rvq); virtqueue_disable_cb(vi->svq); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) @@ -1197,6 +1244,8 @@ static int virtnet_freeze(struct virtio_device *vdev) remove_vq_common(vi); + flush_work(&vi->config_work); + return 0; } @@ -1217,6 +1266,10 @@ static int virtnet_restore(struct virtio_device *vdev) if (!try_fill_recv(vi, GFP_KERNEL)) queue_delayed_work(system_nrt_wq, &vi->refill, 0); + mutex_lock(&vi->config_lock); + vi->config_enable = true; + mutex_unlock(&vi->config_lock); + return 0; } #endif @@ -1234,6 +1287,7 @@ static unsigned int features[] = { VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, + VIRTIO_NET_F_GUEST_ANNOUNCE, }; static struct virtio_driver virtio_net_driver = { diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c676de7de02..9eb6479306d 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -2055,15 +2055,4 @@ static struct pci_driver dscc4_driver = { .remove = __devexit_p(dscc4_remove_one), }; -static int __init dscc4_init_module(void) -{ - return pci_register_driver(&dscc4_driver); -} - -static void __exit dscc4_cleanup_module(void) -{ - pci_unregister_driver(&dscc4_driver); -} - -module_init(dscc4_init_module); -module_exit(dscc4_cleanup_module); +module_pci_driver(dscc4_driver); diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 76a8a4a522e..f5d533a706e 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1120,7 +1120,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ { lmc_softc_t *sc = dev_to_sc(dev); - lmc_trace(dev, "lmc_runnig_reset in"); + lmc_trace(dev, "lmc_running_reset in"); /* stop interrupts */ /* Clear the interrupt mask */ @@ -1736,18 +1736,7 @@ static struct pci_driver lmc_driver = { .remove = __devexit_p(lmc_remove_one), }; -static int __init init_lmc(void) -{ - return pci_register_driver(&lmc_driver); -} - -static void __exit exit_lmc(void) -{ - pci_unregister_driver(&lmc_driver); -} - -module_init(init_lmc); -module_exit(exit_lmc); +module_pci_driver(lmc_driver); unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ { diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig index 3f703384295..672de18a776 100644 --- a/drivers/net/wimax/i2400m/Kconfig +++ b/drivers/net/wimax/i2400m/Kconfig @@ -32,8 +32,9 @@ config WIMAX_I2400M_SDIO If unsure, it is safe to select M (module). config WIMAX_IWMC3200_SDIO - bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO" + bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)" depends on WIMAX_I2400M_SDIO + depends on EXPERIMENTAL select IWMC3200TOP help Select if you have a device based on the Intel Multicom WiMAX diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c index e3257681e36..b78ee676e10 100644 --- a/drivers/net/wimax/i2400m/usb-rx.c +++ b/drivers/net/wimax/i2400m/usb-rx.c @@ -277,7 +277,7 @@ retry: d_printf(1, dev, "RX: size changed to %d, received %d, " "copied %d, capacity %ld\n", rx_size, read_size, rx_skb->len, - (long) (skb_end_pointer(new_skb) - new_skb->head)); + (long) skb_end_offset(new_skb)); goto retry; } /* In most cases, it happens due to the hardware scheduling a diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 29b1e033a10..713d033891e 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -695,7 +695,7 @@ int i2400mu_resume(struct usb_interface *iface) d_fnstart(3, dev, "(iface %p)\n", iface); rmb(); /* see i2400m->updown's documentation */ if (i2400m->updown == 0) { - d_printf(1, dev, "fw was down, no resume neeed\n"); + d_printf(1, dev, "fw was down, no resume needed\n"); goto out; } d_printf(1, dev, "fw was up, resuming\n"); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index abd3b71cd4a..5f58fa53238 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -282,8 +282,7 @@ source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/rt2x00/Kconfig" source "drivers/net/wireless/rtlwifi/Kconfig" -source "drivers/net/wireless/wl1251/Kconfig" -source "drivers/net/wireless/wl12xx/Kconfig" +source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 98db76196b5..0ce218b931d 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -51,9 +51,7 @@ obj-$(CONFIG_ATH_COMMON) += ath/ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o -obj-$(CONFIG_WL1251) += wl1251/ -obj-$(CONFIG_WL12XX) += wl12xx/ -obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ +obj-$(CONFIG_WL_TI) += ti/ obj-$(CONFIG_IWM) += iwmc3200wifi/ diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index f5ce5623da9..0ac09a2bd14 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1991,19 +1991,4 @@ static struct pci_driver adm8211_driver = { #endif /* CONFIG_PM */ }; - - -static int __init adm8211_init(void) -{ - return pci_register_driver(&adm8211_driver); -} - - -static void __exit adm8211_exit(void) -{ - pci_unregister_driver(&adm8211_driver); -} - - -module_init(adm8211_init); -module_exit(adm8211_exit); +module_pci_driver(adm8211_driver); diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 4045e5ab055..3df0146b797 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1122,12 +1122,12 @@ exit: static void at76_dump_mib_local(struct at76_priv *priv) { int ret; - struct mib_local *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); + struct mib_local *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); + ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (LOCAL) failed: %d\n", ret); @@ -1751,7 +1751,7 @@ static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) * following workaround is necessary. If the TX frame is an * authentication frame extract the bssid and send the CMD_JOIN. */ if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) { - if (compare_ether_addr(priv->bssid, mgmt->bssid)) { + if (!ether_addr_equal(priv->bssid, mgmt->bssid)) { memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); ieee80211_queue_work(hw, &priv->work_join_bssid); dev_kfree_skb_any(skb); @@ -2512,10 +2512,8 @@ static void __exit at76_mod_exit(void) printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n"); usb_deregister(&at76_driver); - for (i = 0; i < ARRAY_SIZE(firmwares); i++) { - if (firmwares[i].fw) - release_firmware(firmwares[i].fw); - } + for (i = 0; i < ARRAY_SIZE(firmwares); i++) + release_firmware(firmwares[i].fw); led_trigger_unregister_simple(ledtrig_tx); } diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index 35e93704c4e..5c008757662 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "ath5k.h" #include "reg.h" #include "debug.h" @@ -728,33 +730,25 @@ void ath5k_ani_print_counters(struct ath5k_hw *ah) { /* clears too */ - printk(KERN_NOTICE "ACK fail\t%d\n", - ath5k_hw_reg_read(ah, AR5K_ACK_FAIL)); - printk(KERN_NOTICE "RTS fail\t%d\n", - ath5k_hw_reg_read(ah, AR5K_RTS_FAIL)); - printk(KERN_NOTICE "RTS success\t%d\n", - ath5k_hw_reg_read(ah, AR5K_RTS_OK)); - printk(KERN_NOTICE "FCS error\t%d\n", - ath5k_hw_reg_read(ah, AR5K_FCS_FAIL)); + pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL)); + pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL)); + pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK)); + pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL)); /* no clear */ - printk(KERN_NOTICE "tx\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX)); - printk(KERN_NOTICE "rx\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX)); - printk(KERN_NOTICE "busy\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR)); - printk(KERN_NOTICE "cycles\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE)); - - printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)); - printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n", - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)); - printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n", - ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT)); - printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n", - ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT)); + pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX)); + pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX)); + pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR)); + pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE)); + + pr_notice("AR5K_PHYERR_CNT1\t%d\n", + ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)); + pr_notice("AR5K_PHYERR_CNT2\t%d\n", + ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)); + pr_notice("AR5K_OFDM_FIL_CNT\t%d\n", + ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT)); + pr_notice("AR5K_CCK_FIL_CNT\t%d\n", + ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT)); } #endif diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 8d434b8f585..64a453a6dfe 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -76,26 +76,29 @@ GENERIC DRIVER DEFINITIONS \****************************/ -#define ATH5K_PRINTF(fmt, ...) \ - printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__) +#define ATH5K_PRINTF(fmt, ...) \ + pr_warn("%s: " fmt, __func__, ##__VA_ARGS__) -#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \ - printk(_level "ath5k %s: " _fmt, \ - ((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \ - ##__VA_ARGS__) +void __printf(3, 4) +_ath5k_printk(const struct ath5k_hw *ah, const char *level, + const char *fmt, ...); -#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \ - if (net_ratelimit()) \ - ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \ - } while (0) +#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \ + _ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__) -#define ATH5K_INFO(_sc, _fmt, ...) \ +#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) \ +do { \ + if (net_ratelimit()) \ + ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \ +} while (0) + +#define ATH5K_INFO(_sc, _fmt, ...) \ ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__) -#define ATH5K_WARN(_sc, _fmt, ...) \ +#define ATH5K_WARN(_sc, _fmt, ...) \ ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__) -#define ATH5K_ERR(_sc, _fmt, ...) \ +#define ATH5K_ERR(_sc, _fmt, ...) \ ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) /* @@ -1524,7 +1527,7 @@ void ath5k_eeprom_detach(struct ath5k_hw *ah); /* Protocol Control Unit Functions */ /* Helpers */ -int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, +int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, int len, struct ieee80211_rate *rate, bool shortpre); unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index d7114c75fe9..7106547a14d 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -20,6 +20,8 @@ * Attach/Detach Functions and helpers * \*************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/pci.h> #include <linux/slab.h> #include "ath5k.h" diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 0e643b016b3..0ba81a66061 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -40,6 +40,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/delay.h> #include <linux/dma-mapping.h> @@ -460,7 +462,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) } if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) - if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0) + if (ether_addr_equal(iter_data->hw_macaddr, mac)) iter_data->need_set_hw_addr = false; if (!iter_data->any_assoc) { @@ -1168,7 +1170,7 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb, if (ieee80211_is_beacon(mgmt->frame_control) && le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && - memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) { + ether_addr_equal(mgmt->bssid, common->curbssid)) { /* * Received an IBSS beacon with the same BSSID. Hardware *must* * have updated the local TSF. We have to work around various @@ -1232,7 +1234,7 @@ ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi) /* only beacons from our BSSID */ if (!ieee80211_is_beacon(mgmt->frame_control) || - memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) + !ether_addr_equal(mgmt->bssid, common->curbssid)) return; ewma_add(&ah->ah_beacon_rssi_avg, rssi); @@ -3038,3 +3040,23 @@ ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable) ath5k_hw_set_rx_filter(ah, rfilt); ah->filter_flags = rfilt; } + +void _ath5k_printk(const struct ath5k_hw *ah, const char *level, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (ah && ah->hw) + printk("%s" pr_fmt("%s: %pV"), + level, wiphy_name(ah->hw->wiphy), &vaf); + else + printk("%s" pr_fmt("%pV"), level, &vaf); + + va_end(args); +} diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index e5e8f45d86a..9d00dab666a 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -57,6 +57,9 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/export.h> #include <linux/moduleparam.h> @@ -247,10 +250,10 @@ static ssize_t write_file_beacon(struct file *file, if (strncmp(buf, "disable", 7) == 0) { AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); - printk(KERN_INFO "debugfs disable beacons\n"); + pr_info("debugfs disable beacons\n"); } else if (strncmp(buf, "enable", 6) == 0) { AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); - printk(KERN_INFO "debugfs enable beacons\n"); + pr_info("debugfs enable beacons\n"); } return count; } @@ -450,19 +453,19 @@ static ssize_t write_file_antenna(struct file *file, if (strncmp(buf, "diversity", 9) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); - printk(KERN_INFO "ath5k debug: enable diversity\n"); + pr_info("debug: enable diversity\n"); } else if (strncmp(buf, "fixed-a", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); - printk(KERN_INFO "ath5k debugfs: fixed antenna A\n"); + pr_info("debug: fixed antenna A\n"); } else if (strncmp(buf, "fixed-b", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); - printk(KERN_INFO "ath5k debug: fixed antenna B\n"); + pr_info("debug: fixed antenna B\n"); } else if (strncmp(buf, "clear", 5) == 0) { for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { ah->stats.antenna_rx[i] = 0; ah->stats.antenna_tx[i] = 0; } - printk(KERN_INFO "ath5k debug: cleared antenna stats\n"); + pr_info("debug: cleared antenna stats\n"); } return count; } @@ -632,7 +635,7 @@ static ssize_t write_file_frameerrors(struct file *file, st->txerr_fifo = 0; st->txerr_filt = 0; st->tx_all_count = 0; - printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n"); + pr_info("debug: cleared frameerrors stats\n"); } return count; } diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index f8bfa3ac2af..bd8d4392d68 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -21,6 +21,8 @@ Hardware Descriptor Functions \******************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "ath5k.h" #include "reg.h" #include "debug.h" @@ -441,10 +443,8 @@ ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { - struct ath5k_hw_2w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; - tx_ctl = &desc->ud.ds_tx5210.tx_ctl; tx_status = &desc->ud.ds_tx5210.tx_stat; /* No frame has been send or error */ @@ -495,11 +495,9 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { - struct ath5k_hw_4w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; u32 txstat0, txstat1; - tx_ctl = &desc->ud.ds_tx5212.tx_ctl; tx_status = &desc->ud.ds_tx5212.tx_stat; txstat1 = ACCESS_ONCE(tx_status->tx_status_1); diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 5cc9aa81469..ce86f158423 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -29,6 +29,8 @@ * status registers (ISR). */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "ath5k.h" #include "reg.h" #include "debug.h" diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index cd708c15b77..4026c906cc7 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -21,6 +21,8 @@ * EEPROM access functions and helpers * \*************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/slab.h> #include "ath5k.h" diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c index a1ea78e05b4..ee1c2fa8b59 100644 --- a/drivers/net/wireless/ath/ath5k/initvals.c +++ b/drivers/net/wireless/ath/ath5k/initvals.c @@ -19,6 +19,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "ath5k.h" #include "reg.h" #include "debug.h" @@ -1574,8 +1576,7 @@ ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) /* AR5K_MODE_11B */ if (mode > 2) { - ATH5K_ERR(ah, - "unsupported channel mode: %d\n", mode); + ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode); return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index c1151c72371..b9f708a45f4 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -39,6 +39,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/pci.h> #include "ath5k.h" diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 5c532995541..22b80af0f47 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -41,6 +41,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <net/mac80211.h> #include <asm/unaligned.h> diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 849fa060ebc..dff48fbc63b 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/nl80211.h> #include <linux/pci.h> #include <linux/pci-aspm.h> @@ -45,6 +47,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ + { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */ { 0 } }; MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); @@ -337,28 +340,4 @@ static struct pci_driver ath5k_pci_driver = { .driver.pm = ATH5K_PM_OPS, }; -/* - * Module init/exit functions - */ -static int __init -init_ath5k_pci(void) -{ - int ret; - - ret = pci_register_driver(&ath5k_pci_driver); - if (ret) { - printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); - return ret; - } - - return 0; -} - -static void __exit -exit_ath5k_pci(void) -{ - pci_unregister_driver(&ath5k_pci_driver); -} - -module_init(init_ath5k_pci); -module_exit(exit_ath5k_pci); +module_pci_driver(ath5k_pci_driver); diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index cebfd6fd31d..1f16b4227d8 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] = * bwmodes. */ int -ath5k_hw_get_frame_duration(struct ath5k_hw *ah, +ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, int len, struct ieee80211_rate *rate, bool shortpre) { int sifs, preamble, plcp_bits, sym_time; @@ -120,7 +120,7 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, /* Fallback */ if (!ah->ah_bwmode) { __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw, - NULL, len, rate); + NULL, band, len, rate); /* subtract difference between long and short preamble */ dur = le16_to_cpu(raw_dur); @@ -302,14 +302,15 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah) * actual rate for this rate. See mac80211 tx.c * ieee80211_duration() for a brief description of * what rate we should choose to TX ACKs. */ - tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); + tx_time = ath5k_hw_get_frame_duration(ah, band, 10, + rate, false); ath5k_hw_reg_write(ah, tx_time, reg); if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) continue; - tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true); + tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true); ath5k_hw_reg_write(ah, tx_time, reg + (AR5K_SET_SHORT_PREAMBLE << 2)); } diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 3a2845489a1..8b71a2d947e 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -22,6 +22,8 @@ * PHY related functions * \***********************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/delay.h> #include <linux/slab.h> #include <asm/unaligned.h> diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index 30b50f93417..65fe929529a 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -20,6 +20,8 @@ Queue Control Unit, DCF Control Unit Functions \********************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "ath5k.h" #include "reg.h" #include "debug.h" @@ -563,6 +565,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) { struct ieee80211_channel *channel = ah->ah_current_channel; + enum ieee80211_band band; struct ieee80211_rate *rate; u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); @@ -598,11 +601,12 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) * Also we have different lowest rate for 802.11a */ if (channel->band == IEEE80211_BAND_5GHZ) - rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0]; + band = IEEE80211_BAND_5GHZ; else - rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; + band = IEEE80211_BAND_2GHZ; - ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); + rate = &ah->sbands[band].bitrates[0]; + ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false); /* ack_tx_time includes an SIFS already */ eifs = ack_tx_time + sifs + 2 * slot_time; diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 200f165c0c6..0c2dd4771c3 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -23,6 +23,8 @@ Reset function and helpers \****************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <asm/unaligned.h> #include <linux/pci.h> /* To determine if a card is pci-e */ diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c index 9364da7bd13..04cf0ca7261 100644 --- a/drivers/net/wireless/ath/ath5k/sysfs.c +++ b/drivers/net/wireless/ath/ath5k/sysfs.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/device.h> #include <linux/pci.h> diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile index 85746c3eb02..8cae8886f17 100644 --- a/drivers/net/wireless/ath/ath6kl/Makefile +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -25,7 +25,8 @@ obj-$(CONFIG_ATH6KL) += ath6kl_core.o ath6kl_core-y += debug.o ath6kl_core-y += hif.o -ath6kl_core-y += htc.o +ath6kl_core-y += htc_mbox.o +ath6kl_core-y += htc_pipe.o ath6kl_core-y += bmi.o ath6kl_core-y += cfg80211.o ath6kl_core-y += init.o diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 00d38952b5f..28a65d3a03d 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -15,6 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/moduleparam.h> #include <linux/inetdevice.h> #include <linux/export.h> @@ -49,6 +51,8 @@ .max_power = 30, \ } +#define DEFAULT_BG_SCAN_PERIOD 60 + static struct ieee80211_rate ath6kl_rates[] = { RATETAB_ENT(10, 0x1, 0), RATETAB_ENT(20, 0x2, 0), @@ -69,7 +73,8 @@ static struct ieee80211_rate ath6kl_rates[] = { #define ath6kl_g_rates (ath6kl_rates + 0) #define ath6kl_g_rates_size 12 -#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ +#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20 +#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ IEEE80211_HT_CAP_SGI_20 | \ IEEE80211_HT_CAP_SGI_40) @@ -126,7 +131,7 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = { .channels = ath6kl_5ghz_a_channels, .n_bitrates = ath6kl_a_rates_size, .bitrates = ath6kl_a_rates, - .ht_cap.cap = ath6kl_g_htcap, + .ht_cap.cap = ath6kl_a_htcap, .ht_cap.ht_supported = true, }; @@ -607,6 +612,17 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, vif->req_bssid, vif->ch_hint, ar->connect_ctrl_flags, nw_subtype); + /* disable background scan if period is 0 */ + if (sme->bg_scan_period == 0) + sme->bg_scan_period = 0xffff; + + /* configure default value if not specified */ + if (sme->bg_scan_period == -1) + sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD; + + ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0, + sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0); + up(&ar->sem); if (status == -EINVAL) { @@ -941,6 +957,8 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, if (test_bit(CONNECTED, &vif->flags)) force_fg_scan = 1; + vif->scan_req = request; + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, ar->fw_capabilities)) { /* @@ -963,10 +981,10 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, ATH6KL_FG_SCAN_INTERVAL, n_channels, channels); } - if (ret) + if (ret) { ath6kl_err("wmi_startscan_cmd failed\n"); - else - vif->scan_req = request; + vif->scan_req = NULL; + } kfree(channels); @@ -1436,9 +1454,38 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, struct vif_params *params) { struct ath6kl_vif *vif = netdev_priv(ndev); + int i; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); + /* + * Don't bring up p2p on an interface which is not initialized + * for p2p operation where fw does not have capability to switch + * dynamically between non-p2p and p2p type interface. + */ + if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + vif->ar->fw_capabilities) && + (type == NL80211_IFTYPE_P2P_CLIENT || + type == NL80211_IFTYPE_P2P_GO)) { + if (vif->ar->vif_max == 1) { + if (vif->fw_vif_idx != 0) + return -EINVAL; + else + goto set_iface_type; + } + + for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) { + if (i == vif->fw_vif_idx) + break; + } + + if (i == vif->ar->vif_max) { + ath6kl_err("Invalid interface to bring up P2P\n"); + return -EINVAL; + } + } + +set_iface_type: switch (type) { case NL80211_IFTYPE_STATION: vif->next_mode = INFRA_NETWORK; @@ -1924,12 +1971,61 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) return 0; } +static int is_hsleep_mode_procsed(struct ath6kl_vif *vif) +{ + return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); +} + +static bool is_ctrl_ep_empty(struct ath6kl *ar) +{ + return !ar->tx_pending[ar->ctrl_ep]; +} + +static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif) +{ + int ret, left; + + clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_ASLEEP); + if (ret) + return ret; + + left = wait_event_interruptible_timeout(ar->event_wq, + is_hsleep_mode_procsed(vif), + WMI_TIMEOUT); + if (left == 0) { + ath6kl_warn("timeout, didn't get host sleep cmd processed event\n"); + ret = -ETIMEDOUT; + } else if (left < 0) { + ath6kl_warn("error while waiting for host sleep cmd processed event %d\n", + left); + ret = left; + } + + if (ar->tx_pending[ar->ctrl_ep]) { + left = wait_event_interruptible_timeout(ar->event_wq, + is_ctrl_ep_empty(ar), + WMI_TIMEOUT); + if (left == 0) { + ath6kl_warn("clear wmi ctrl data timeout\n"); + ret = -ETIMEDOUT; + } else if (left < 0) { + ath6kl_warn("clear wmi ctrl data failed: %d\n", left); + ret = left; + } + } + + return ret; +} + static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) { struct in_device *in_dev; struct in_ifaddr *ifa; struct ath6kl_vif *vif; - int ret, left; + int ret; u32 filter = 0; u16 i, bmiss_time; u8 index = 0; @@ -2030,39 +2126,11 @@ skip_arp: if (ret) return ret; - clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); - - ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, - ATH6KL_HOST_MODE_ASLEEP); + ret = ath6kl_cfg80211_host_sleep(ar, vif); if (ret) return ret; - left = wait_event_interruptible_timeout(ar->event_wq, - test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags), - WMI_TIMEOUT); - if (left == 0) { - ath6kl_warn("timeout, didn't get host sleep cmd " - "processed event\n"); - ret = -ETIMEDOUT; - } else if (left < 0) { - ath6kl_warn("error while waiting for host sleep cmd " - "processed event %d\n", left); - ret = left; - } - - if (ar->tx_pending[ar->ctrl_ep]) { - left = wait_event_interruptible_timeout(ar->event_wq, - ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT); - if (left == 0) { - ath6kl_warn("clear wmi ctrl data timeout\n"); - ret = -ETIMEDOUT; - } else if (left < 0) { - ath6kl_warn("clear wmi ctrl data failed: %d\n", left); - ret = left; - } - } - - return ret; + return 0; } static int ath6kl_wow_resume(struct ath6kl *ar) @@ -2109,10 +2177,82 @@ static int ath6kl_wow_resume(struct ath6kl *ar) return 0; } +static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + if (!vif) + return -EIO; + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + ath6kl_cfg80211_stop_all(ar); + + /* Save the current power mode before enabling power save */ + ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); + if (ret) + return ret; + + /* Disable WOW mode */ + ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_WOW_MODE_DISABLE, + 0, 0); + if (ret) + return ret; + + /* Flush all non control pkts in TX path */ + ath6kl_tx_data_cleanup(ar); + + ret = ath6kl_cfg80211_host_sleep(ar, vif); + if (ret) + return ret; + + return 0; +} + +static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar) +{ + struct ath6kl_vif *vif; + int ret; + + vif = ath6kl_vif_first(ar); + + if (!vif) + return -EIO; + + if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { + ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, + ar->wmi->saved_pwr_mode); + if (ret) + return ret; + } + + ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, + ATH6KL_HOST_MODE_AWAKE); + if (ret) + return ret; + + ar->state = ATH6KL_STATE_ON; + + /* Reset scan parameter to default values */ + ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, + 0, 0, 0, 0, 0, 0, 3, 0, 0, 0); + if (ret) + return ret; + + return 0; +} + int ath6kl_cfg80211_suspend(struct ath6kl *ar, enum ath6kl_cfg_suspend_mode mode, struct cfg80211_wowlan *wow) { + struct ath6kl_vif *vif; enum ath6kl_state prev_state; int ret; @@ -2137,15 +2277,12 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar, case ATH6KL_CFG_SUSPEND_DEEPSLEEP: - ath6kl_cfg80211_stop_all(ar); - - /* save the current power mode before enabling power save */ - ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n"); - ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); + ret = ath6kl_cfg80211_deepsleep_suspend(ar); if (ret) { - ath6kl_warn("wmi powermode command failed during suspend: %d\n", - ret); + ath6kl_err("deepsleep suspend failed: %d\n", ret); + return ret; } ar->state = ATH6KL_STATE_DEEPSLEEP; @@ -2185,6 +2322,9 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar, break; } + list_for_each_entry(vif, &ar->vif_list, list) + ath6kl_cfg80211_scan_complete_event(vif, true); + return 0; } EXPORT_SYMBOL(ath6kl_cfg80211_suspend); @@ -2206,17 +2346,13 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar) break; case ATH6KL_STATE_DEEPSLEEP: - if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { - ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, - ar->wmi->saved_pwr_mode); - if (ret) { - ath6kl_warn("wmi powermode command failed during resume: %d\n", - ret); - } - } - - ar->state = ATH6KL_STATE_ON; + ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n"); + ret = ath6kl_cfg80211_deepsleep_resume(ar); + if (ret) { + ath6kl_warn("deep sleep resume failed: %d\n", ret); + return ret; + } break; case ATH6KL_STATE_CUTPOWER: @@ -2290,31 +2426,25 @@ void ath6kl_check_wow_status(struct ath6kl *ar) } #endif -static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type) +static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, + bool ht_enable) { - struct ath6kl_vif *vif; - - /* - * 'dev' could be NULL if a channel change is required for the hardware - * device itself, instead of a particular VIF. - * - * FIXME: To be handled properly when monitor mode is supported. - */ - if (!dev) - return -EBUSY; - - vif = netdev_priv(dev); + struct ath6kl_htcap *htcap = &vif->htcap; - if (!ath6kl_cfg80211_ready(vif)) - return -EIO; + if (htcap->ht_enable == ht_enable) + return 0; - ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", - __func__, chan->center_freq, chan->hw_value); - vif->next_chan = chan->center_freq; + if (ht_enable) { + /* Set default ht capabilities */ + htcap->ht_enable = true; + htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ? + ath6kl_g_htcap : ath6kl_a_htcap; + htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K; + } else /* Disable ht */ + memset(htcap, 0, sizeof(*htcap)); - return 0; + return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx, + band, htcap); } static bool ath6kl_is_p2p_ie(const u8 *pos) @@ -2391,6 +2521,81 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif, return 0; } +static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct ath6kl_vif *vif; + + /* + * 'dev' could be NULL if a channel change is required for the hardware + * device itself, instead of a particular VIF. + * + * FIXME: To be handled properly when monitor mode is supported. + */ + if (!dev) + return -EBUSY; + + vif = netdev_priv(dev); + + if (!ath6kl_cfg80211_ready(vif)) + return -EIO; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", + __func__, chan->center_freq, chan->hw_value); + vif->next_chan = chan->center_freq; + vif->next_ch_type = channel_type; + vif->next_ch_band = chan->band; + + return 0; +} + +static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon, + u8 *rsn_capab) +{ + const u8 *rsn_ie; + size_t rsn_ie_len; + u16 cnt; + + if (!beacon->tail) + return -EINVAL; + + rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len); + if (!rsn_ie) + return -EINVAL; + + rsn_ie_len = *(rsn_ie + 1); + /* skip element id and length */ + rsn_ie += 2; + + /* skip version, group cipher */ + if (rsn_ie_len < 6) + return -EINVAL; + rsn_ie += 6; + rsn_ie_len -= 6; + + /* skip pairwise cipher suite */ + if (rsn_ie_len < 2) + return -EINVAL; + cnt = *((u16 *) rsn_ie); + rsn_ie += (2 + cnt * 4); + rsn_ie_len -= (2 + cnt * 4); + + /* skip akm suite */ + if (rsn_ie_len < 2) + return -EINVAL; + cnt = *((u16 *) rsn_ie); + rsn_ie += (2 + cnt * 4); + rsn_ie_len -= (2 + cnt * 4); + + if (rsn_ie_len < 2) + return -EINVAL; + + memcpy(rsn_capab, rsn_ie, 2); + + return 0; +} + static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *info) { @@ -2403,6 +2608,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, struct wmi_connect_cmd p; int res; int i, ret; + u16 rsn_capab = 0; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__); @@ -2532,6 +2738,34 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, p.nw_subtype = SUBTYPE_NONE; } + if (info->inactivity_timeout) { + res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx, + info->inactivity_timeout); + if (res < 0) + return res; + } + + if (ath6kl_set_htcap(vif, vif->next_ch_band, + vif->next_ch_type != NL80211_CHAN_NO_HT)) + return -EIO; + + /* + * Get the PTKSA replay counter in the RSN IE. Supplicant + * will use the RSN IE in M3 message and firmware has to + * advertise the same in beacon/probe response. Send + * the complete RSN IE capability field to firmware + */ + if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) && + test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + ar->fw_capabilities)) { + res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx, + WLAN_EID_RSN, WMI_RSN_IE_CAPB, + (const u8 *) &rsn_capab, + sizeof(rsn_capab)); + if (res < 0) + return res; + } + res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); if (res < 0) return res; @@ -2566,6 +2800,13 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev) ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); clear_bit(CONNECTED, &vif->flags); + /* Restore ht setting in firmware */ + if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true)) + return -EIO; + + if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true)) + return -EIO; + return 0; } @@ -2747,6 +2988,21 @@ static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif, return false; } +/* Check if SSID length is greater than DIRECT- */ +static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len) +{ + const struct ieee80211_mgmt *mgmt; + mgmt = (const struct ieee80211_mgmt *) buf; + + /* variable[1] contains the SSID tag length */ + if (buf + len >= &mgmt->u.probe_resp.variable[1] && + (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) { + return true; + } + + return false; +} + static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, bool offchan, enum nl80211_channel_type channel_type, @@ -2761,11 +3017,11 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, bool more_data, queued; mgmt = (const struct ieee80211_mgmt *) buf; - if (buf + len >= mgmt->u.probe_resp.variable && - vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && - ieee80211_is_probe_resp(mgmt->frame_control)) { + if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && + ieee80211_is_probe_resp(mgmt->frame_control) && + ath6kl_is_p2p_go_ssid(buf, len)) { /* - * Send Probe Response frame in AP mode using a separate WMI + * Send Probe Response frame in GO mode using a separate WMI * command to allow the target to fill in the generic IEs. */ *cookie = 0; /* TX status not supported */ @@ -2833,6 +3089,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, if (vif->sme_state != SME_DISCONNECTED) return -EBUSY; + ath6kl_cfg80211_scan_complete_event(vif, true); + for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) { ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, DISABLE_SSID_FLAG, @@ -3094,6 +3352,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name, vif->next_mode = nw_type; vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; + vif->htcap.ht_enable = true; memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); if (fw_vif_idx != 0) @@ -3181,6 +3440,10 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, + ar->fw_capabilities)) + ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER; + ar->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h index a60e78c0472..98a886154d9 100644 --- a/drivers/net/wireless/ath/ath6kl/common.h +++ b/drivers/net/wireless/ath/ath6kl/common.h @@ -22,7 +22,8 @@ #define ATH6KL_MAX_IE 256 -extern int ath6kl_printk(const char *level, const char *fmt, ...); +extern __printf(2, 3) +int ath6kl_printk(const char *level, const char *fmt, ...); /* * Reflects the version of binary interface exposed by ATH6KL target @@ -77,6 +78,7 @@ enum crypto_type { struct htc_endpoint_credit_dist; struct ath6kl; +struct ath6kl_htcap; enum htc_credit_dist_reason; struct ath6kl_htc_credit_info; diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c index 45e641f3a41..fdb3b1decc7 100644 --- a/drivers/net/wireless/ath/ath6kl/core.c +++ b/drivers/net/wireless/ath/ath6kl/core.c @@ -20,9 +20,11 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/export.h> +#include <linux/vmalloc.h> #include "debug.h" #include "hif-ops.h" +#include "htc-ops.h" #include "cfg80211.h" unsigned int debug_mask; @@ -39,12 +41,36 @@ module_param(uart_debug, uint, 0644); module_param(ath6kl_p2p, uint, 0644); module_param(testmode, uint, 0644); -int ath6kl_core_init(struct ath6kl *ar) +void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) +{ + ath6kl_htc_tx_complete(ar, skb); +} +EXPORT_SYMBOL(ath6kl_core_tx_complete); + +void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe) +{ + ath6kl_htc_rx_complete(ar, skb, pipe); +} +EXPORT_SYMBOL(ath6kl_core_rx_complete); + +int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type) { struct ath6kl_bmi_target_info targ_info; struct net_device *ndev; int ret = 0, i; + switch (htc_type) { + case ATH6KL_HTC_TYPE_MBOX: + ath6kl_htc_mbox_attach(ar); + break; + case ATH6KL_HTC_TYPE_PIPE: + ath6kl_htc_pipe_attach(ar); + break; + default: + WARN_ON(1); + return -ENOMEM; + } + ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); if (!ar->ath6kl_wq) return -ENOMEM; @@ -280,7 +306,7 @@ void ath6kl_core_cleanup(struct ath6kl *ar) kfree(ar->fw_board); kfree(ar->fw_otp); - kfree(ar->fw); + vfree(ar->fw); kfree(ar->fw_patch); kfree(ar->fw_testscript); diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index f1dd8906be4..9d67964a51d 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -91,6 +91,15 @@ enum ath6kl_fw_capability { */ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + /* + * Firmware has support to cleanup inactive stations + * in AP mode. + */ + ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, + + /* Firmware has support to override rsn cap of rsn ie */ + ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, + /* this needs to be last */ ATH6KL_FW_CAPABILITY_MAX, }; @@ -205,6 +214,8 @@ struct ath6kl_fw_ie { #define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) #define ATH6KL_CONF_UART_DEBUG BIT(4) +#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */ + enum wlan_low_pwr_state { WLAN_POWER_STATE_ON, WLAN_POWER_STATE_CUT_PWR, @@ -454,6 +465,11 @@ enum ath6kl_hif_type { ATH6KL_HIF_TYPE_USB, }; +enum ath6kl_htc_type { + ATH6KL_HTC_TYPE_MBOX, + ATH6KL_HTC_TYPE_PIPE, +}; + /* Max number of filters that hw supports */ #define ATH6K_MAX_MC_FILTERS_PER_LIST 7 struct ath6kl_mc_filter { @@ -461,6 +477,12 @@ struct ath6kl_mc_filter { char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; }; +struct ath6kl_htcap { + bool ht_enable; + u8 ampdu_factor; + unsigned short cap_info; +}; + /* * Driver's maximum limit, note that some firmwares support only one vif * and the runtime (current) limit must be checked from ar->vif_max. @@ -509,6 +531,7 @@ struct ath6kl_vif { struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; struct aggr_info *aggr_cntxt; + struct ath6kl_htcap htcap; struct timer_list disconnect_timer; struct timer_list sched_scan_timer; @@ -521,6 +544,8 @@ struct ath6kl_vif { u32 send_action_id; bool probe_req_report; u16 next_chan; + enum nl80211_channel_type next_ch_type; + enum ieee80211_band next_ch_band; u16 assoc_bss_beacon_int; u16 listen_intvl_t; u16 bmiss_time_t; @@ -568,6 +593,7 @@ struct ath6kl { struct ath6kl_bmi bmi; const struct ath6kl_hif_ops *hif_ops; + const struct ath6kl_htc_ops *htc_ops; struct wmi *wmi; int tx_pending[ENDPOINT_MAX]; int total_tx_data_pend; @@ -746,7 +772,8 @@ void init_netdev(struct net_device *dev); void ath6kl_cookie_init(struct ath6kl *ar); void ath6kl_cookie_cleanup(struct ath6kl *ar); void ath6kl_rx(struct htc_target *target, struct htc_packet *packet); -void ath6kl_tx_complete(void *context, struct list_head *packet_queue); +void ath6kl_tx_complete(struct htc_target *context, + struct list_head *packet_queue); enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, struct htc_packet *packet); void ath6kl_stop_txrx(struct ath6kl *ar); @@ -821,8 +848,11 @@ int ath6kl_init_hw_params(struct ath6kl *ar); void ath6kl_check_wow_status(struct ath6kl *ar); +void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb); +void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe); + struct ath6kl *ath6kl_core_create(struct device *dev); -int ath6kl_core_init(struct ath6kl *ar); +int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type); void ath6kl_core_cleanup(struct ath6kl *ar); void ath6kl_core_destroy(struct ath6kl *ar); diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index d01403a263f..1b76aff7850 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -616,6 +616,12 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, "Num disconnects", tgt_stats->cs_discon_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt received", tgt_stats->arp_received); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt matched", tgt_stats->arp_matched); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "ARP pkt replied", tgt_stats->arp_replied); if (len > buf_len) len = buf_len; diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h index 1803a0baae8..49639d8266c 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.h +++ b/drivers/net/wireless/ath/ath6kl/debug.h @@ -43,6 +43,7 @@ enum ATH6K_DEBUG_MASK { ATH6KL_DBG_WMI_DUMP = BIT(19), ATH6KL_DBG_SUSPEND = BIT(20), ATH6KL_DBG_USB = BIT(21), + ATH6KL_DBG_USB_BULK = BIT(22), ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ }; diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h index fd84086638e..8c9e72d5250 100644 --- a/drivers/net/wireless/ath/ath6kl/hif-ops.h +++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h @@ -150,4 +150,38 @@ static inline void ath6kl_hif_stop(struct ath6kl *ar) ar->hif_ops->stop(ar); } +static inline int ath6kl_hif_pipe_send(struct ath6kl *ar, + u8 pipe, struct sk_buff *hdr_buf, + struct sk_buff *buf) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n"); + + return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf); +} + +static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n"); + + ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe); +} + +static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar, + u16 service_id, u8 *ul_pipe, + u8 *dl_pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n"); + + return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe); +} + +static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar, + u8 pipe) +{ + ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n"); + + return ar->hif_ops->pipe_get_free_queue_number(ar, pipe); +} + #endif diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h index 20ed6b73517..61f6b21fb0a 100644 --- a/drivers/net/wireless/ath/ath6kl/hif.h +++ b/drivers/net/wireless/ath/ath6kl/hif.h @@ -256,6 +256,12 @@ struct ath6kl_hif_ops { int (*power_on)(struct ath6kl *ar); int (*power_off)(struct ath6kl *ar); void (*stop)(struct ath6kl *ar); + int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf, + struct sk_buff *buf); + void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl); + int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul, + u8 *pipe_dl); + u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe); }; int ath6kl_hif_setup(struct ath6kl_device *dev); diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h new file mode 100644 index 00000000000..2d4eed55cfd --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_OPS_H +#define HTC_OPS_H + +#include "htc.h" +#include "debug.h" + +static inline void *ath6kl_htc_create(struct ath6kl *ar) +{ + return ar->htc_ops->create(ar); +} + +static inline int ath6kl_htc_wait_target(struct htc_target *target) +{ + return target->dev->ar->htc_ops->wait_target(target); +} + +static inline int ath6kl_htc_start(struct htc_target *target) +{ + return target->dev->ar->htc_ops->start(target); +} + +static inline int ath6kl_htc_conn_service(struct htc_target *target, + struct htc_service_connect_req *req, + struct htc_service_connect_resp *resp) +{ + return target->dev->ar->htc_ops->conn_service(target, req, resp); +} + +static inline int ath6kl_htc_tx(struct htc_target *target, + struct htc_packet *packet) +{ + return target->dev->ar->htc_ops->tx(target, packet); +} + +static inline void ath6kl_htc_stop(struct htc_target *target) +{ + return target->dev->ar->htc_ops->stop(target); +} + +static inline void ath6kl_htc_cleanup(struct htc_target *target) +{ + return target->dev->ar->htc_ops->cleanup(target); +} + +static inline void ath6kl_htc_flush_txep(struct htc_target *target, + enum htc_endpoint_id endpoint, + u16 tag) +{ + return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag); +} + +static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target) +{ + return target->dev->ar->htc_ops->flush_rx_buf(target); +} + +static inline void ath6kl_htc_activity_changed(struct htc_target *target, + enum htc_endpoint_id endpoint, + bool active) +{ + return target->dev->ar->htc_ops->activity_changed(target, endpoint, + active); +} + +static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target, + enum htc_endpoint_id endpoint) +{ + return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint); +} + +static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pktq) +{ + return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq); +} + +static inline int ath6kl_htc_credit_setup(struct htc_target *target, + struct ath6kl_htc_credit_info *info) +{ + return target->dev->ar->htc_ops->credit_setup(target, info); +} + +static inline void ath6kl_htc_tx_complete(struct ath6kl *ar, + struct sk_buff *skb) +{ + ar->htc_ops->tx_complete(ar, skb); +} + + +static inline void ath6kl_htc_rx_complete(struct ath6kl *ar, + struct sk_buff *skb, u8 pipe) +{ + ar->htc_ops->rx_complete(ar, skb, pipe); +} + + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h index 5027ccc36b6..a2c8ff80979 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.h +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -25,6 +25,7 @@ /* send direction */ #define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0) #define HTC_FLAGS_SEND_BUNDLE (1 << 1) +#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2) /* receive direction */ #define HTC_FLG_RX_UNUSED (1 << 0) @@ -56,6 +57,10 @@ #define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2 #define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4 #define HTC_CONN_FLGS_THRESH_MASK 0x3 +/* disable credit flow control on a specific service */ +#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3) +#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8 +#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00 /* connect response status codes */ #define HTC_SERVICE_SUCCESS 0 @@ -75,6 +80,7 @@ #define HTC_RECORD_LOOKAHEAD_BUNDLE 3 #define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0) +#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1) #define MAKE_SERVICE_ID(group, index) \ (int)(((int)group << 8) | (int)(index)) @@ -109,6 +115,8 @@ /* HTC operational parameters */ #define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_RESPONSE_POLL_WAIT 10 +#define HTC_TARGET_RESPONSE_POLL_COUNT 200 #define HTC_TARGET_DEBUG_INTR_MASK 0x01 #define HTC_TARGET_CREDIT_INTR_MASK 0xF0 @@ -128,6 +136,7 @@ #define HTC_RECV_WAIT_BUFFERS (1 << 0) #define HTC_OP_STATE_STOPPING (1 << 0) +#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1) /* * The frame header length and message formats defined herein were selected @@ -311,6 +320,14 @@ struct htc_packet { void (*completion) (struct htc_target *, struct htc_packet *); struct htc_target *context; + + /* + * optimization for network-oriented data, the HTC packet + * can pass the network buffer corresponding to the HTC packet + * lower layers may optimized the transfer knowing this is + * a network buffer + */ + struct sk_buff *skb; }; enum htc_send_full_action { @@ -319,12 +336,14 @@ enum htc_send_full_action { }; struct htc_ep_callbacks { + void (*tx_complete) (struct htc_target *, struct htc_packet *); void (*rx) (struct htc_target *, struct htc_packet *); void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint); enum htc_send_full_action (*tx_full) (struct htc_target *, struct htc_packet *); struct htc_packet *(*rx_allocthresh) (struct htc_target *, enum htc_endpoint_id, int); + void (*tx_comp_multi) (struct htc_target *, struct list_head *); int rx_alloc_thresh; int rx_refill_thresh; }; @@ -502,6 +521,13 @@ struct htc_endpoint { u32 conn_flags; struct htc_endpoint_stats ep_st; u16 tx_drop_packet_threshold; + + struct { + u8 pipeid_ul; + u8 pipeid_dl; + struct list_head tx_lookup_queue; + bool tx_credit_flow_enabled; + } pipe; }; struct htc_control_buffer { @@ -509,6 +535,42 @@ struct htc_control_buffer { u8 *buf; }; +struct htc_pipe_txcredit_alloc { + u16 service_id; + u8 credit_alloc; +}; + +enum htc_send_queue_result { + HTC_SEND_QUEUE_OK = 0, /* packet was queued */ + HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */ +}; + +struct ath6kl_htc_ops { + void* (*create)(struct ath6kl *ar); + int (*wait_target)(struct htc_target *target); + int (*start)(struct htc_target *target); + int (*conn_service)(struct htc_target *target, + struct htc_service_connect_req *req, + struct htc_service_connect_resp *resp); + int (*tx)(struct htc_target *target, struct htc_packet *packet); + void (*stop)(struct htc_target *target); + void (*cleanup)(struct htc_target *target); + void (*flush_txep)(struct htc_target *target, + enum htc_endpoint_id endpoint, u16 tag); + void (*flush_rx_buf)(struct htc_target *target); + void (*activity_changed)(struct htc_target *target, + enum htc_endpoint_id endpoint, + bool active); + int (*get_rxbuf_num)(struct htc_target *target, + enum htc_endpoint_id endpoint); + int (*add_rxbuf_multiple)(struct htc_target *target, + struct list_head *pktq); + int (*credit_setup)(struct htc_target *target, + struct ath6kl_htc_credit_info *cred_info); + int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb); + int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe); +}; + struct ath6kl_device; /* our HTC target state */ @@ -557,36 +619,19 @@ struct htc_target { /* counts the number of Tx without bundling continously per AC */ u32 ac_tx_count[WMM_NUM_AC]; + + struct { + struct htc_packet *htc_packet_pool; + u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN]; + int ctrl_response_len; + bool ctrl_response_valid; + struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX]; + } pipe; }; -void *ath6kl_htc_create(struct ath6kl *ar); -void ath6kl_htc_set_credit_dist(struct htc_target *target, - struct ath6kl_htc_credit_info *cred_info, - u16 svc_pri_order[], int len); -int ath6kl_htc_wait_target(struct htc_target *target); -int ath6kl_htc_start(struct htc_target *target); -int ath6kl_htc_conn_service(struct htc_target *target, - struct htc_service_connect_req *req, - struct htc_service_connect_resp *resp); -int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet); -void ath6kl_htc_stop(struct htc_target *target); -void ath6kl_htc_cleanup(struct htc_target *target); -void ath6kl_htc_flush_txep(struct htc_target *target, - enum htc_endpoint_id endpoint, u16 tag); -void ath6kl_htc_flush_rx_buf(struct htc_target *target); -void ath6kl_htc_indicate_activity_change(struct htc_target *target, - enum htc_endpoint_id endpoint, - bool active); -int ath6kl_htc_get_rxbuf_num(struct htc_target *target, - enum htc_endpoint_id endpoint); -int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, - struct list_head *pktq); int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, u32 msg_look_ahead, int *n_pkts); -int ath6kl_credit_setup(void *htc_handle, - struct ath6kl_htc_credit_info *cred_info); - static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, u8 *buf, unsigned int len, enum htc_endpoint_id eid, u16 tag) @@ -626,4 +671,7 @@ static inline int get_queue_depth(struct list_head *queue) return depth; } +void ath6kl_htc_pipe_attach(struct ath6kl *ar); +void ath6kl_htc_mbox_attach(struct ath6kl *ar); + #endif diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index 4849d99cce7..065e61516d7 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -23,6 +23,14 @@ #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) +static void ath6kl_htc_mbox_cleanup(struct htc_target *target); +static void ath6kl_htc_mbox_stop(struct htc_target *target); +static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pkt_queue); +static void ath6kl_htc_set_credit_dist(struct htc_target *target, + struct ath6kl_htc_credit_info *cred_info, + u16 svc_pri_order[], int len); + /* threshold to re-enable Tx bundling for an AC*/ #define TX_RESUME_BUNDLE_THRESHOLD 1500 @@ -130,8 +138,8 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, } /* initialize and setup credit distribution */ -int ath6kl_credit_setup(void *htc_handle, - struct ath6kl_htc_credit_info *cred_info) +static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target, + struct ath6kl_htc_credit_info *cred_info) { u16 servicepriority[5]; @@ -144,7 +152,7 @@ int ath6kl_credit_setup(void *htc_handle, servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ /* set priority list */ - ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); + ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5); return 0; } @@ -432,7 +440,7 @@ static void htc_tx_complete(struct htc_endpoint *endpoint, "htc tx complete ep %d pkts %d\n", endpoint->eid, get_queue_depth(txq)); - ath6kl_tx_complete(endpoint->target->dev->ar, txq); + ath6kl_tx_complete(endpoint->target, txq); } static void htc_tx_comp_handler(struct htc_target *target, @@ -1065,7 +1073,7 @@ static int htc_setup_tx_complete(struct htc_target *target) return status; } -void ath6kl_htc_set_credit_dist(struct htc_target *target, +static void ath6kl_htc_set_credit_dist(struct htc_target *target, struct ath6kl_htc_credit_info *credit_info, u16 srvc_pri_order[], int list_len) { @@ -1093,7 +1101,8 @@ void ath6kl_htc_set_credit_dist(struct htc_target *target, } } -int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) +static int ath6kl_htc_mbox_tx(struct htc_target *target, + struct htc_packet *packet) { struct htc_endpoint *endpoint; struct list_head queue; @@ -1121,7 +1130,7 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) } /* flush endpoint TX queue */ -void ath6kl_htc_flush_txep(struct htc_target *target, +static void ath6kl_htc_mbox_flush_txep(struct htc_target *target, enum htc_endpoint_id eid, u16 tag) { struct htc_packet *packet, *tmp_pkt; @@ -1173,12 +1182,13 @@ static void ath6kl_htc_flush_txep_all(struct htc_target *target) if (endpoint->svc_id == 0) /* not in use.. */ continue; - ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); + ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); } } -void ath6kl_htc_indicate_activity_change(struct htc_target *target, - enum htc_endpoint_id eid, bool active) +static void ath6kl_htc_mbox_activity_changed(struct htc_target *target, + enum htc_endpoint_id eid, + bool active) { struct htc_endpoint *endpoint = &target->endpoint[eid]; bool dist = false; @@ -1246,7 +1256,7 @@ static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) INIT_LIST_HEAD(&queue); list_add_tail(&packet->list, &queue); - return ath6kl_htc_add_rxbuf_multiple(target, &queue); + return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue); } static void htc_reclaim_rxbuf(struct htc_target *target, @@ -1353,7 +1363,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target, sizeof(*htc_hdr)); if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { - ath6kl_warn("Rx buffer requested with invalid length\n"); + ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n", + htc_hdr->eid, htc_hdr->flags, + le16_to_cpu(htc_hdr->payld_len)); return -EINVAL; } @@ -2288,7 +2300,7 @@ fail_ctrl_rx: return NULL; } -int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, +static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, struct list_head *pkt_queue) { struct htc_endpoint *endpoint; @@ -2350,7 +2362,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, return status; } -void ath6kl_htc_flush_rx_buf(struct htc_target *target) +static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target) { struct htc_endpoint *endpoint; struct htc_packet *packet, *tmp_pkt; @@ -2392,7 +2404,7 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target) } } -int ath6kl_htc_conn_service(struct htc_target *target, +static int ath6kl_htc_mbox_conn_service(struct htc_target *target, struct htc_service_connect_req *conn_req, struct htc_service_connect_resp *conn_resp) { @@ -2564,7 +2576,7 @@ static void reset_ep_state(struct htc_target *target) INIT_LIST_HEAD(&target->cred_dist_list); } -int ath6kl_htc_get_rxbuf_num(struct htc_target *target, +static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint) { int num; @@ -2624,7 +2636,7 @@ static void htc_setup_msg_bndl(struct htc_target *target) } } -int ath6kl_htc_wait_target(struct htc_target *target) +static int ath6kl_htc_mbox_wait_target(struct htc_target *target) { struct htc_packet *packet = NULL; struct htc_ready_ext_msg *rdy_msg; @@ -2693,12 +2705,12 @@ int ath6kl_htc_wait_target(struct htc_target *target) connect.svc_id = HTC_CTRL_RSVD_SVC; /* connect fake service */ - status = ath6kl_htc_conn_service((void *)target, &connect, &resp); + status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp); if (status) /* * FIXME: this call doesn't make sense, the caller should - * call ath6kl_htc_cleanup() when it wants remove htc + * call ath6kl_htc_mbox_cleanup() when it wants remove htc */ ath6kl_hif_cleanup_scatter(target->dev->ar); @@ -2715,7 +2727,7 @@ fail_wait_target: * Start HTC, enable interrupts and let the target know * host has finished setup. */ -int ath6kl_htc_start(struct htc_target *target) +static int ath6kl_htc_mbox_start(struct htc_target *target) { struct htc_packet *packet; int status; @@ -2752,7 +2764,7 @@ int ath6kl_htc_start(struct htc_target *target) status = ath6kl_hif_unmask_intrs(target->dev); if (status) - ath6kl_htc_stop(target); + ath6kl_htc_mbox_stop(target); return status; } @@ -2796,7 +2808,7 @@ static int ath6kl_htc_reset(struct htc_target *target) } /* htc_stop: stop interrupt reception, and flush all queued buffers */ -void ath6kl_htc_stop(struct htc_target *target) +static void ath6kl_htc_mbox_stop(struct htc_target *target) { spin_lock_bh(&target->htc_lock); target->htc_flags |= HTC_OP_STATE_STOPPING; @@ -2811,12 +2823,12 @@ void ath6kl_htc_stop(struct htc_target *target) ath6kl_htc_flush_txep_all(target); - ath6kl_htc_flush_rx_buf(target); + ath6kl_htc_mbox_flush_rx_buf(target); ath6kl_htc_reset(target); } -void *ath6kl_htc_create(struct ath6kl *ar) +static void *ath6kl_htc_mbox_create(struct ath6kl *ar) { struct htc_target *target = NULL; int status = 0; @@ -2857,13 +2869,13 @@ void *ath6kl_htc_create(struct ath6kl *ar) return target; err_htc_cleanup: - ath6kl_htc_cleanup(target); + ath6kl_htc_mbox_cleanup(target); return NULL; } /* cleanup the HTC instance */ -void ath6kl_htc_cleanup(struct htc_target *target) +static void ath6kl_htc_mbox_cleanup(struct htc_target *target) { struct htc_packet *packet, *tmp_packet; @@ -2888,3 +2900,24 @@ void ath6kl_htc_cleanup(struct htc_target *target) kfree(target->dev); kfree(target); } + +static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = { + .create = ath6kl_htc_mbox_create, + .wait_target = ath6kl_htc_mbox_wait_target, + .start = ath6kl_htc_mbox_start, + .conn_service = ath6kl_htc_mbox_conn_service, + .tx = ath6kl_htc_mbox_tx, + .stop = ath6kl_htc_mbox_stop, + .cleanup = ath6kl_htc_mbox_cleanup, + .flush_txep = ath6kl_htc_mbox_flush_txep, + .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf, + .activity_changed = ath6kl_htc_mbox_activity_changed, + .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num, + .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple, + .credit_setup = ath6kl_htc_mbox_credit_setup, +}; + +void ath6kl_htc_mbox_attach(struct ath6kl *ar) +{ + ar->htc_ops = &ath6kl_htc_mbox_ops; +} diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c new file mode 100644 index 00000000000..b277b344688 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -0,0 +1,1713 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "debug.h" +#include "hif-ops.h" + +#define HTC_PACKET_CONTAINER_ALLOCATION 32 +#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH) + +static int ath6kl_htc_pipe_tx(struct htc_target *handle, + struct htc_packet *packet); +static void ath6kl_htc_pipe_cleanup(struct htc_target *handle); + +/* htc pipe tx path */ +static inline void restore_tx_packet(struct htc_packet *packet) +{ + if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) { + skb_pull(packet->skb, sizeof(struct htc_frame_hdr)); + packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF; + } +} + +static void do_send_completion(struct htc_endpoint *ep, + struct list_head *queue_to_indicate) +{ + struct htc_packet *packet; + + if (list_empty(queue_to_indicate)) { + /* nothing to indicate */ + return; + } + + if (ep->ep_cb.tx_comp_multi != NULL) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: calling ep %d, send complete multiple callback (%d pkts)\n", + __func__, ep->eid, + get_queue_depth(queue_to_indicate)); + /* + * a multiple send complete handler is being used, + * pass the queue to the handler + */ + ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); + /* + * all packets are now owned by the callback, + * reset queue to be safe + */ + INIT_LIST_HEAD(queue_to_indicate); + } else { + /* using legacy EpTxComplete */ + do { + packet = list_first_entry(queue_to_indicate, + struct htc_packet, list); + + list_del(&packet->list); + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: calling ep %d send complete callback on packet 0x%p\n", + __func__, ep->eid, packet); + ep->ep_cb.tx_complete(ep->target, packet); + } while (!list_empty(queue_to_indicate)); + } +} + +static void send_packet_completion(struct htc_target *target, + struct htc_packet *packet) +{ + struct htc_endpoint *ep = &target->endpoint[packet->endpoint]; + struct list_head container; + + restore_tx_packet(packet); + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* do completion */ + do_send_completion(ep, &container); +} + +static void get_htc_packet_credit_based(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *queue) +{ + int credits_required; + int remainder; + u8 send_flags; + struct htc_packet *packet; + unsigned int transfer_len; + + /* NOTE : the TX lock is held when this function is called */ + + /* loop until we can grab as many packets out of the queue as we can */ + while (true) { + send_flags = 0; + if (list_empty(&ep->txq)) + break; + + /* get packet at head, but don't remove it */ + packet = list_first_entry(&ep->txq, struct htc_packet, list); + if (packet == NULL) + break; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: got head packet:0x%p , queue depth: %d\n", + __func__, packet, get_queue_depth(&ep->txq)); + + transfer_len = packet->act_len + HTC_HDR_LENGTH; + + if (transfer_len <= target->tgt_cred_sz) { + credits_required = 1; + } else { + /* figure out how many credits this message requires */ + credits_required = transfer_len / target->tgt_cred_sz; + remainder = transfer_len % target->tgt_cred_sz; + + if (remainder) + credits_required++; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n", + __func__, credits_required, ep->cred_dist.credits); + + if (ep->eid == ENDPOINT_0) { + /* + * endpoint 0 is special, it always has a credit and + * does not require credit based flow control + */ + credits_required = 0; + + } else { + + if (ep->cred_dist.credits < credits_required) + break; + + ep->cred_dist.credits -= credits_required; + ep->ep_st.cred_cosumd += credits_required; + + /* check if we need credits back from the target */ + if (ep->cred_dist.credits < + ep->cred_dist.cred_per_msg) { + /* tell the target we need credits ASAP! */ + send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; + ep->ep_st.cred_low_indicate += 1; + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: host needs credits\n", + __func__); + } + } + + /* now we can fully dequeue */ + packet = list_first_entry(&ep->txq, struct htc_packet, list); + + list_del(&packet->list); + /* save the number of credits this packet consumed */ + packet->info.tx.cred_used = credits_required; + /* save send flags */ + packet->info.tx.flags = send_flags; + packet->info.tx.seqno = ep->seqno; + ep->seqno++; + /* queue this packet into the caller's queue */ + list_add_tail(&packet->list, queue); + } + +} + +static void get_htc_packet(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *queue, int resources) +{ + struct htc_packet *packet; + + /* NOTE : the TX lock is held when this function is called */ + + /* loop until we can grab as many packets out of the queue as we can */ + while (resources) { + if (list_empty(&ep->txq)) + break; + + packet = list_first_entry(&ep->txq, struct htc_packet, list); + list_del(&packet->list); + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: got packet:0x%p , new queue depth: %d\n", + __func__, packet, get_queue_depth(&ep->txq)); + packet->info.tx.seqno = ep->seqno; + packet->info.tx.flags = 0; + packet->info.tx.cred_used = 0; + ep->seqno++; + + /* queue this packet into the caller's queue */ + list_add_tail(&packet->list, queue); + resources--; + } +} + +static int htc_issue_packets(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *pkt_queue) +{ + int status = 0; + u16 payload_len; + struct sk_buff *skb; + struct htc_frame_hdr *htc_hdr; + struct htc_packet *packet; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: queue: 0x%p, pkts %d\n", __func__, + pkt_queue, get_queue_depth(pkt_queue)); + + while (!list_empty(pkt_queue)) { + packet = list_first_entry(pkt_queue, struct htc_packet, list); + list_del(&packet->list); + + skb = packet->skb; + if (!skb) { + WARN_ON_ONCE(1); + status = -EINVAL; + break; + } + + payload_len = packet->act_len; + + /* setup HTC frame header */ + htc_hdr = (struct htc_frame_hdr *) skb_push(skb, + sizeof(*htc_hdr)); + if (!htc_hdr) { + WARN_ON_ONCE(1); + status = -EINVAL; + break; + } + + packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF; + + /* Endianess? */ + put_unaligned((u16) payload_len, &htc_hdr->payld_len); + htc_hdr->flags = packet->info.tx.flags; + htc_hdr->eid = (u8) packet->endpoint; + htc_hdr->ctrl[0] = 0; + htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno; + + spin_lock_bh(&target->tx_lock); + + /* store in look up queue to match completions */ + list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue); + ep->ep_st.tx_issued += 1; + spin_unlock_bh(&target->tx_lock); + + status = ath6kl_hif_pipe_send(target->dev->ar, + ep->pipe.pipeid_ul, NULL, skb); + + if (status != 0) { + if (status != -ENOMEM) { + /* TODO: if more than 1 endpoint maps to the + * same PipeID, it is possible to run out of + * resources in the HIF layer. + * Don't emit the error + */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: failed status:%d\n", + __func__, status); + } + spin_lock_bh(&target->tx_lock); + list_del(&packet->list); + + /* reclaim credits */ + ep->cred_dist.credits += packet->info.tx.cred_used; + spin_unlock_bh(&target->tx_lock); + + /* put it back into the callers queue */ + list_add(&packet->list, pkt_queue); + break; + } + + } + + if (status != 0) { + while (!list_empty(pkt_queue)) { + if (status != -ENOMEM) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: failed pkt:0x%p status:%d\n", + __func__, packet, status); + } + + packet = list_first_entry(pkt_queue, + struct htc_packet, list); + list_del(&packet->list); + packet->status = status; + send_packet_completion(target, packet); + } + } + + return status; +} + +static enum htc_send_queue_result htc_try_send(struct htc_target *target, + struct htc_endpoint *ep, + struct list_head *txq) +{ + struct list_head send_queue; /* temp queue to hold packets */ + struct htc_packet *packet, *tmp_pkt; + struct ath6kl *ar = target->dev->ar; + enum htc_send_full_action action; + int tx_resources, overflow, txqueue_depth, i, good_pkts; + u8 pipeid; + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n", + __func__, txq, + (txq == NULL) ? 0 : get_queue_depth(txq)); + + /* init the local send queue */ + INIT_LIST_HEAD(&send_queue); + + /* + * txq equals to NULL means + * caller didn't provide a queue, just wants us to + * check queues and send + */ + if (txq != NULL) { + if (list_empty(txq)) { + /* empty queue */ + return HTC_SEND_QUEUE_DROP; + } + + spin_lock_bh(&target->tx_lock); + txqueue_depth = get_queue_depth(&ep->txq); + spin_unlock_bh(&target->tx_lock); + + if (txqueue_depth >= ep->max_txq_depth) { + /* we've already overflowed */ + overflow = get_queue_depth(txq); + } else { + /* get how much we will overflow by */ + overflow = txqueue_depth; + overflow += get_queue_depth(txq); + /* get how much we will overflow the TX queue by */ + overflow -= ep->max_txq_depth; + } + + /* if overflow is negative or zero, we are okay */ + if (overflow > 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n", + __func__, ep->eid, overflow, txqueue_depth, + ep->max_txq_depth); + } + if ((overflow <= 0) || + (ep->ep_cb.tx_full == NULL)) { + /* + * all packets will fit or caller did not provide send + * full indication handler -- just move all of them + * to the local send_queue object + */ + list_splice_tail_init(txq, &send_queue); + } else { + good_pkts = get_queue_depth(txq) - overflow; + if (good_pkts < 0) { + WARN_ON_ONCE(1); + return HTC_SEND_QUEUE_DROP; + } + + /* we have overflowed, and a callback is provided */ + /* dequeue all non-overflow packets to the sendqueue */ + for (i = 0; i < good_pkts; i++) { + /* pop off caller's queue */ + packet = list_first_entry(txq, + struct htc_packet, + list); + list_del(&packet->list); + /* insert into local queue */ + list_add_tail(&packet->list, &send_queue); + } + + /* + * the caller's queue has all the packets that won't fit + * walk through the caller's queue and indicate each to + * the send full handler + */ + list_for_each_entry_safe(packet, tmp_pkt, + txq, list) { + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: Indicat overflowed TX pkts: %p\n", + __func__, packet); + action = ep->ep_cb.tx_full(ep->target, packet); + if (action == HTC_SEND_FULL_DROP) { + /* callback wants the packet dropped */ + ep->ep_st.tx_dropped += 1; + + /* leave this one in the caller's queue + * for cleanup */ + } else { + /* callback wants to keep this packet, + * remove from caller's queue */ + list_del(&packet->list); + /* put it in the send queue */ + list_add_tail(&packet->list, + &send_queue); + } + + } + + if (list_empty(&send_queue)) { + /* no packets made it in, caller will cleanup */ + return HTC_SEND_QUEUE_DROP; + } + } + } + + if (!ep->pipe.tx_credit_flow_enabled) { + tx_resources = + ath6kl_hif_pipe_get_free_queue_number(ar, + ep->pipe.pipeid_ul); + } else { + tx_resources = 0; + } + + spin_lock_bh(&target->tx_lock); + if (!list_empty(&send_queue)) { + /* transfer packets to tail */ + list_splice_tail_init(&send_queue, &ep->txq); + if (!list_empty(&send_queue)) { + WARN_ON_ONCE(1); + spin_unlock_bh(&target->tx_lock); + return HTC_SEND_QUEUE_DROP; + } + INIT_LIST_HEAD(&send_queue); + } + + /* increment tx processing count on entry */ + ep->tx_proc_cnt++; + + if (ep->tx_proc_cnt > 1) { + /* + * Another thread or task is draining the TX queues on this + * endpoint that thread will reset the tx processing count + * when the queue is drained. + */ + ep->tx_proc_cnt--; + spin_unlock_bh(&target->tx_lock); + return HTC_SEND_QUEUE_OK; + } + + /***** beyond this point only 1 thread may enter ******/ + + /* + * Now drain the endpoint TX queue for transmission as long as we have + * enough transmit resources. + */ + while (true) { + + if (get_queue_depth(&ep->txq) == 0) + break; + + if (ep->pipe.tx_credit_flow_enabled) { + /* + * Credit based mechanism provides flow control + * based on target transmit resource availability, + * we assume that the HIF layer will always have + * bus resources greater than target transmit + * resources. + */ + get_htc_packet_credit_based(target, ep, &send_queue); + } else { + /* + * Get all packets for this endpoint that we can + * for this pass. + */ + get_htc_packet(target, ep, &send_queue, tx_resources); + } + + if (get_queue_depth(&send_queue) == 0) { + /* + * Didn't get packets due to out of resources or TX + * queue was drained. + */ + break; + } + + spin_unlock_bh(&target->tx_lock); + + /* send what we can */ + htc_issue_packets(target, ep, &send_queue); + + if (!ep->pipe.tx_credit_flow_enabled) { + pipeid = ep->pipe.pipeid_ul; + tx_resources = + ath6kl_hif_pipe_get_free_queue_number(ar, pipeid); + } + + spin_lock_bh(&target->tx_lock); + + } + /* done with this endpoint, we can clear the count */ + ep->tx_proc_cnt = 0; + spin_unlock_bh(&target->tx_lock); + + return HTC_SEND_QUEUE_OK; +} + +/* htc control packet manipulation */ +static void destroy_htc_txctrl_packet(struct htc_packet *packet) +{ + struct sk_buff *skb; + skb = packet->skb; + if (skb != NULL) + dev_kfree_skb(skb); + + kfree(packet); +} + +static struct htc_packet *build_htc_txctrl_packet(void) +{ + struct htc_packet *packet = NULL; + struct sk_buff *skb; + + packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); + if (packet == NULL) + return NULL; + + skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL); + + if (skb == NULL) { + kfree(packet); + return NULL; + } + packet->skb = skb; + + return packet; +} + +static void htc_free_txctrl_packet(struct htc_target *target, + struct htc_packet *packet) +{ + destroy_htc_txctrl_packet(packet); +} + +static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target) +{ + return build_htc_txctrl_packet(); +} + +static void htc_txctrl_complete(struct htc_target *target, + struct htc_packet *packet) +{ + htc_free_txctrl_packet(target, packet); +} + +#define MAX_MESSAGE_SIZE 1536 + +static int htc_setup_target_buffer_assignments(struct htc_target *target) +{ + int status, credits, credit_per_maxmsg, i; + struct htc_pipe_txcredit_alloc *entry; + unsigned int hif_usbaudioclass = 0; + + credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz; + if (MAX_MESSAGE_SIZE % target->tgt_cred_sz) + credit_per_maxmsg++; + + /* TODO, this should be configured by the caller! */ + + credits = target->tgt_creds; + entry = &target->pipe.txcredit_alloc[0]; + + status = -ENOMEM; + + /* FIXME: hif_usbaudioclass is always zero */ + if (hif_usbaudioclass) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: For USB Audio Class- Total:%d\n", + __func__, credits); + entry++; + entry++; + /* Setup VO Service To have Max Credits */ + entry->service_id = WMI_DATA_VO_SVC; + entry->credit_alloc = (credits - 6); + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_CONTROL_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + /* leftovers go to best effort */ + entry++; + entry++; + entry->service_id = WMI_DATA_BE_SVC; + entry->credit_alloc = (u8) credits; + status = 0; + } else { + entry++; + entry->service_id = WMI_DATA_VI_SVC; + entry->credit_alloc = credits / 4; + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_DATA_VO_SVC; + entry->credit_alloc = credits / 4; + if (entry->credit_alloc == 0) + entry->credit_alloc++; + + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_CONTROL_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + entry++; + entry->service_id = WMI_DATA_BK_SVC; + entry->credit_alloc = credit_per_maxmsg; + credits -= (int) entry->credit_alloc; + if (credits <= 0) + return status; + + /* leftovers go to best effort */ + entry++; + entry->service_id = WMI_DATA_BE_SVC; + entry->credit_alloc = (u8) credits; + status = 0; + } + + if (status == 0) { + for (i = 0; i < ENDPOINT_MAX; i++) { + if (target->pipe.txcredit_alloc[i].service_id != 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n", + i, + target->pipe.txcredit_alloc[i]. + service_id, + target->pipe.txcredit_alloc[i]. + credit_alloc); + } + } + } + return status; +} + +/* process credit reports and call distribution function */ +static void htc_process_credit_report(struct htc_target *target, + struct htc_credit_report *rpt, + int num_entries, + enum htc_endpoint_id from_ep) +{ + int total_credits = 0, i; + struct htc_endpoint *ep; + + /* lock out TX while we update credits */ + spin_lock_bh(&target->tx_lock); + + for (i = 0; i < num_entries; i++, rpt++) { + if (rpt->eid >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + spin_unlock_bh(&target->tx_lock); + return; + } + + ep = &target->endpoint[rpt->eid]; + ep->cred_dist.credits += rpt->credits; + + if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) { + spin_unlock_bh(&target->tx_lock); + htc_try_send(target, ep, NULL); + spin_lock_bh(&target->tx_lock); + } + + total_credits += rpt->credits; + } + ath6kl_dbg(ATH6KL_DBG_HTC, + "Report indicated %d credits to distribute\n", + total_credits); + + spin_unlock_bh(&target->tx_lock); +} + +/* flush endpoint TX queue */ +static void htc_flush_tx_endpoint(struct htc_target *target, + struct htc_endpoint *ep, u16 tag) +{ + struct htc_packet *packet; + + spin_lock_bh(&target->tx_lock); + while (get_queue_depth(&ep->txq)) { + packet = list_first_entry(&ep->txq, struct htc_packet, list); + list_del(&packet->list); + packet->status = 0; + send_packet_completion(target, packet); + } + spin_unlock_bh(&target->tx_lock); +} + +/* + * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC, + * since upper layers expects struct htc_packet containers we use the completed + * skb and lookup it's corresponding HTC packet buffer from a lookup list. + * This is extra overhead that can be fixed by re-aligning HIF interfaces with + * HTC. + */ +static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target, + struct htc_endpoint *ep, + struct sk_buff *skb) +{ + struct htc_packet *packet, *tmp_pkt, *found_packet = NULL; + + spin_lock_bh(&target->tx_lock); + + /* + * interate from the front of tx lookup queue + * this lookup should be fast since lower layers completes in-order and + * so the completed packet should be at the head of the list generally + */ + list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue, + list) { + /* check for removal */ + if (skb == packet->skb) { + /* found it */ + list_del(&packet->list); + found_packet = packet; + break; + } + } + + spin_unlock_bh(&target->tx_lock); + + return found_packet; +} + +static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb) +{ + struct htc_target *target = ar->htc_target; + struct htc_frame_hdr *htc_hdr; + struct htc_endpoint *ep; + struct htc_packet *packet; + u8 ep_id, *netdata; + u32 netlen; + + netdata = skb->data; + netlen = skb->len; + + htc_hdr = (struct htc_frame_hdr *) netdata; + + ep_id = htc_hdr->eid; + ep = &target->endpoint[ep_id]; + + packet = htc_lookup_tx_packet(target, ep, skb); + if (packet == NULL) { + /* may have already been flushed and freed */ + ath6kl_err("HTC TX lookup failed!\n"); + } else { + /* will be giving this buffer back to upper layers */ + packet->status = 0; + send_packet_completion(target, packet); + } + skb = NULL; + + if (!ep->pipe.tx_credit_flow_enabled) { + /* + * note: when using TX credit flow, the re-checking of queues + * happens when credits flow back from the target. in the + * non-TX credit case, we recheck after the packet completes + */ + htc_try_send(target, ep, NULL); + } + + return 0; +} + +static int htc_send_packets_multiple(struct htc_target *target, + struct list_head *pkt_queue) +{ + struct htc_endpoint *ep; + struct htc_packet *packet, *tmp_pkt; + + if (list_empty(pkt_queue)) + return -EINVAL; + + /* get first packet to find out which ep the packets will go into */ + packet = list_first_entry(pkt_queue, struct htc_packet, list); + if (packet == NULL) + return -EINVAL; + + if (packet->endpoint >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + return -EINVAL; + } + ep = &target->endpoint[packet->endpoint]; + + htc_try_send(target, ep, pkt_queue); + + /* do completion on any packets that couldn't get in */ + if (!list_empty(pkt_queue)) { + list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { + packet->status = -ENOMEM; + } + + do_send_completion(ep, pkt_queue); + } + + return 0; +} + +/* htc pipe rx path */ +static struct htc_packet *alloc_htc_packet_container(struct htc_target *target) +{ + struct htc_packet *packet; + spin_lock_bh(&target->rx_lock); + + if (target->pipe.htc_packet_pool == NULL) { + spin_unlock_bh(&target->rx_lock); + return NULL; + } + + packet = target->pipe.htc_packet_pool; + target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next; + + spin_unlock_bh(&target->rx_lock); + + packet->list.next = NULL; + return packet; +} + +static void free_htc_packet_container(struct htc_target *target, + struct htc_packet *packet) +{ + struct list_head *lh; + + spin_lock_bh(&target->rx_lock); + + if (target->pipe.htc_packet_pool == NULL) { + target->pipe.htc_packet_pool = packet; + packet->list.next = NULL; + } else { + lh = (struct list_head *) target->pipe.htc_packet_pool; + packet->list.next = lh; + target->pipe.htc_packet_pool = packet; + } + + spin_unlock_bh(&target->rx_lock); +} + +static int htc_process_trailer(struct htc_target *target, u8 *buffer, + int len, enum htc_endpoint_id from_ep) +{ + struct htc_credit_report *report; + struct htc_record_hdr *record; + u8 *record_buf, *orig_buf; + int orig_len, status; + + orig_buf = buffer; + orig_len = len; + status = 0; + + while (len > 0) { + if (len < sizeof(struct htc_record_hdr)) { + status = -EINVAL; + break; + } + + /* these are byte aligned structs */ + record = (struct htc_record_hdr *) buffer; + len -= sizeof(struct htc_record_hdr); + buffer += sizeof(struct htc_record_hdr); + + if (record->len > len) { + /* no room left in buffer for record */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "invalid length: %d (id:%d) buffer has: %d bytes left\n", + record->len, record->rec_id, len); + status = -EINVAL; + break; + } + + /* start of record follows the header */ + record_buf = buffer; + + switch (record->rec_id) { + case HTC_RECORD_CREDITS: + if (record->len < sizeof(struct htc_credit_report)) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + report = (struct htc_credit_report *) record_buf; + htc_process_credit_report(target, report, + record->len / sizeof(*report), + from_ep); + break; + default: + ath6kl_dbg(ATH6KL_DBG_HTC, + "unhandled record: id:%d length:%d\n", + record->rec_id, record->len); + break; + } + + if (status != 0) + break; + + /* advance buffer past this record for next time around */ + buffer += record->len; + len -= record->len; + } + + return status; +} + +static void do_recv_completion(struct htc_endpoint *ep, + struct list_head *queue_to_indicate) +{ + struct htc_packet *packet; + + if (list_empty(queue_to_indicate)) { + /* nothing to indicate */ + return; + } + + /* using legacy EpRecv */ + while (!list_empty(queue_to_indicate)) { + packet = list_first_entry(queue_to_indicate, + struct htc_packet, list); + list_del(&packet->list); + ep->ep_cb.rx(ep->target, packet); + } + + return; +} + +static void recv_packet_completion(struct htc_target *target, + struct htc_endpoint *ep, + struct htc_packet *packet) +{ + struct list_head container; + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* do completion */ + do_recv_completion(ep, &container); +} + +static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, + u8 pipeid) +{ + struct htc_target *target = ar->htc_target; + u8 *netdata, *trailer, hdr_info; + struct htc_frame_hdr *htc_hdr; + u32 netlen, trailerlen = 0; + struct htc_packet *packet; + struct htc_endpoint *ep; + u16 payload_len; + int status = 0; + + netdata = skb->data; + netlen = skb->len; + + htc_hdr = (struct htc_frame_hdr *) netdata; + + ep = &target->endpoint[htc_hdr->eid]; + + if (htc_hdr->eid >= ENDPOINT_MAX) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Rx: invalid EndpointID=%d\n", + htc_hdr->eid); + status = -EINVAL; + goto free_skb; + } + + payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); + + if (netlen < (payload_len + HTC_HDR_LENGTH)) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Rx: insufficient length, got:%d expected =%u\n", + netlen, payload_len + HTC_HDR_LENGTH); + status = -EINVAL; + goto free_skb; + } + + /* get flags to check for trailer */ + hdr_info = htc_hdr->flags; + if (hdr_info & HTC_FLG_RX_TRAILER) { + /* extract the trailer length */ + hdr_info = htc_hdr->ctrl[0]; + if ((hdr_info < sizeof(struct htc_record_hdr)) || + (hdr_info > payload_len)) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "invalid header: payloadlen should be %d, CB[0]: %d\n", + payload_len, hdr_info); + status = -EINVAL; + goto free_skb; + } + + trailerlen = hdr_info; + /* process trailer after hdr/apps payload */ + trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH + + payload_len - hdr_info; + status = htc_process_trailer(target, trailer, hdr_info, + htc_hdr->eid); + if (status != 0) + goto free_skb; + } + + if (((int) payload_len - (int) trailerlen) <= 0) { + /* zero length packet with trailer, just drop these */ + goto free_skb; + } + + if (htc_hdr->eid == ENDPOINT_0) { + /* handle HTC control message */ + if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) { + /* + * fatal: target should not send unsolicited + * messageson the endpoint 0 + */ + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC ignores Rx Ctrl after setup complete\n"); + status = -EINVAL; + goto free_skb; + } + + /* remove HTC header */ + skb_pull(skb, HTC_HDR_LENGTH); + + netdata = skb->data; + netlen = skb->len; + + spin_lock_bh(&target->rx_lock); + + target->pipe.ctrl_response_valid = true; + target->pipe.ctrl_response_len = min_t(int, netlen, + HTC_MAX_CTRL_MSG_LEN); + memcpy(target->pipe.ctrl_response_buf, netdata, + target->pipe.ctrl_response_len); + + spin_unlock_bh(&target->rx_lock); + + dev_kfree_skb(skb); + skb = NULL; + goto free_skb; + } + + /* + * TODO: the message based HIF architecture allocates net bufs + * for recv packets since it bridges that HIF to upper layers, + * which expects HTC packets, we form the packets here + */ + packet = alloc_htc_packet_container(target); + if (packet == NULL) { + status = -ENOMEM; + goto free_skb; + } + + packet->status = 0; + packet->endpoint = htc_hdr->eid; + packet->pkt_cntxt = skb; + + /* TODO: for backwards compatibility */ + packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH; + packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen; + + /* + * TODO: this is a hack because the driver layer will set the + * actual len of the skb again which will just double the len + */ + skb_trim(skb, 0); + + recv_packet_completion(target, ep, packet); + + /* recover the packet container */ + free_htc_packet_container(target, packet); + skb = NULL; + +free_skb: + if (skb != NULL) + dev_kfree_skb(skb); + + return status; + +} + +static void htc_flush_rx_queue(struct htc_target *target, + struct htc_endpoint *ep) +{ + struct list_head container; + struct htc_packet *packet; + + spin_lock_bh(&target->rx_lock); + + while (1) { + if (list_empty(&ep->rx_bufq)) + break; + + packet = list_first_entry(&ep->rx_bufq, + struct htc_packet, list); + list_del(&packet->list); + + spin_unlock_bh(&target->rx_lock); + packet->status = -ECANCELED; + packet->act_len = 0; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "Flushing RX packet:0x%p, length:%d, ep:%d\n", + packet, packet->buf_len, + packet->endpoint); + + INIT_LIST_HEAD(&container); + list_add_tail(&packet->list, &container); + + /* give the packet back */ + do_recv_completion(ep, &container); + spin_lock_bh(&target->rx_lock); + } + + spin_unlock_bh(&target->rx_lock); +} + +/* polling routine to wait for a control packet to be received */ +static int htc_wait_recv_ctrl_message(struct htc_target *target) +{ + int count = HTC_TARGET_RESPONSE_POLL_COUNT; + + while (count > 0) { + spin_lock_bh(&target->rx_lock); + + if (target->pipe.ctrl_response_valid) { + target->pipe.ctrl_response_valid = false; + spin_unlock_bh(&target->rx_lock); + break; + } + + spin_unlock_bh(&target->rx_lock); + + count--; + + msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT); + } + + if (count <= 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__); + return -ECOMM; + } + + return 0; +} + +static void htc_rxctrl_complete(struct htc_target *context, + struct htc_packet *packet) +{ + /* TODO, can't really receive HTC control messages yet.... */ + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__); +} + +/* htc pipe initialization */ +static void reset_endpoint_states(struct htc_target *target) +{ + struct htc_endpoint *ep; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + ep = &target->endpoint[i]; + ep->svc_id = 0; + ep->len_max = 0; + ep->max_txq_depth = 0; + ep->eid = i; + INIT_LIST_HEAD(&ep->txq); + INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue); + INIT_LIST_HEAD(&ep->rx_bufq); + ep->target = target; + ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */ + } +} + +/* start HTC, this is called after all services are connected */ +static int htc_config_target_hif_pipe(struct htc_target *target) +{ + return 0; +} + +/* htc service functions */ +static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id) +{ + u8 allocation = 0; + int i; + + for (i = 0; i < ENDPOINT_MAX; i++) { + if (target->pipe.txcredit_alloc[i].service_id == service_id) + allocation = + target->pipe.txcredit_alloc[i].credit_alloc; + } + + if (allocation == 0) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "HTC Service TX : 0x%2.2X : allocation is zero!\n", + service_id); + } + + return allocation; +} + +static int ath6kl_htc_pipe_conn_service(struct htc_target *target, + struct htc_service_connect_req *conn_req, + struct htc_service_connect_resp *conn_resp) +{ + struct ath6kl *ar = target->dev->ar; + struct htc_packet *packet = NULL; + struct htc_conn_service_resp *resp_msg; + struct htc_conn_service_msg *conn_msg; + enum htc_endpoint_id assigned_epid = ENDPOINT_MAX; + bool disable_credit_flowctrl = false; + unsigned int max_msg_size = 0; + struct htc_endpoint *ep; + int length, status = 0; + struct sk_buff *skb; + u8 tx_alloc; + u16 flags; + + if (conn_req->svc_id == 0) { + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { + /* special case for pseudo control service */ + assigned_epid = ENDPOINT_0; + max_msg_size = HTC_MAX_CTRL_MSG_LEN; + tx_alloc = 0; + + } else { + + tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); + if (tx_alloc == 0) { + status = -ENOMEM; + goto free_packet; + } + + /* allocate a packet to send to the target */ + packet = htc_alloc_txctrl_packet(target); + + if (packet == NULL) { + WARN_ON_ONCE(1); + status = -ENOMEM; + goto free_packet; + } + + skb = packet->skb; + length = sizeof(struct htc_conn_service_msg); + + /* assemble connect service message */ + conn_msg = (struct htc_conn_service_msg *) skb_put(skb, + length); + if (conn_msg == NULL) { + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + memset(conn_msg, 0, + sizeof(struct htc_conn_service_msg)); + conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); + conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); + conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags & + ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK); + + /* tell target desired recv alloc for this ep */ + flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT; + conn_msg->conn_flags |= cpu_to_le16(flags); + + if (conn_req->conn_flags & + HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) { + disable_credit_flowctrl = true; + } + + set_htc_pkt_info(packet, NULL, (u8 *) conn_msg, + length, + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + status = ath6kl_htc_pipe_tx(target, packet); + + /* we don't own it anymore */ + packet = NULL; + if (status != 0) + goto free_packet; + + /* wait for response */ + status = htc_wait_recv_ctrl_message(target); + if (status != 0) + goto free_packet; + + /* we controlled the buffer creation so it has to be + * properly aligned + */ + resp_msg = (struct htc_conn_service_resp *) + target->pipe.ctrl_response_buf; + + if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) || + (target->pipe.ctrl_response_len < sizeof(*resp_msg))) { + /* this message is not valid */ + WARN_ON_ONCE(1); + status = -EINVAL; + goto free_packet; + } + + ath6kl_dbg(ATH6KL_DBG_TRC, + "%s: service 0x%X conn resp: status: %d ep: %d\n", + __func__, resp_msg->svc_id, resp_msg->status, + resp_msg->eid); + + conn_resp->resp_code = resp_msg->status; + /* check response status */ + if (resp_msg->status != HTC_SERVICE_SUCCESS) { + ath6kl_dbg(ATH6KL_DBG_HTC, + "Target failed service 0x%X connect request (status:%d)\n", + resp_msg->svc_id, resp_msg->status); + status = -EINVAL; + goto free_packet; + } + + assigned_epid = (enum htc_endpoint_id) resp_msg->eid; + max_msg_size = le16_to_cpu(resp_msg->max_msg_sz); + } + + /* the rest are parameter checks so set the error status */ + status = -EINVAL; + + if (assigned_epid >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + goto free_packet; + } + + if (max_msg_size == 0) { + WARN_ON_ONCE(1); + goto free_packet; + } + + ep = &target->endpoint[assigned_epid]; + ep->eid = assigned_epid; + if (ep->svc_id != 0) { + /* endpoint already in use! */ + WARN_ON_ONCE(1); + goto free_packet; + } + + /* return assigned endpoint to caller */ + conn_resp->endpoint = assigned_epid; + conn_resp->len_max = max_msg_size; + + /* setup the endpoint */ + ep->svc_id = conn_req->svc_id; /* this marks ep in use */ + ep->max_txq_depth = conn_req->max_txq_depth; + ep->len_max = max_msg_size; + ep->cred_dist.credits = tx_alloc; + ep->cred_dist.cred_sz = target->tgt_cred_sz; + ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz; + if (max_msg_size % target->tgt_cred_sz) + ep->cred_dist.cred_per_msg++; + + /* copy all the callbacks */ + ep->ep_cb = conn_req->ep_cb; + + status = ath6kl_hif_pipe_map_service(ar, ep->svc_id, + &ep->pipe.pipeid_ul, + &ep->pipe.pipeid_dl); + if (status != 0) + goto free_packet; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n", + ep->svc_id, ep->pipe.pipeid_ul, + ep->pipe.pipeid_dl, ep->eid); + + if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) { + ep->pipe.tx_credit_flow_enabled = false; + ath6kl_dbg(ATH6KL_DBG_HTC, + "SVC: 0x%4.4X ep:%d TX flow control off\n", + ep->svc_id, assigned_epid); + } + +free_packet: + if (packet != NULL) + htc_free_txctrl_packet(target, packet); + return status; +} + +/* htc export functions */ +static void *ath6kl_htc_pipe_create(struct ath6kl *ar) +{ + int status = 0; + struct htc_endpoint *ep = NULL; + struct htc_target *target = NULL; + struct htc_packet *packet; + int i; + + target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); + if (target == NULL) { + ath6kl_err("htc create unable to allocate memory\n"); + status = -ENOMEM; + goto fail_htc_create; + } + + spin_lock_init(&target->htc_lock); + spin_lock_init(&target->rx_lock); + spin_lock_init(&target->tx_lock); + + reset_endpoint_states(target); + + for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) { + packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); + + if (packet != NULL) + free_htc_packet_container(target, packet); + } + + target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); + if (!target->dev) { + ath6kl_err("unable to allocate memory\n"); + status = -ENOMEM; + goto fail_htc_create; + } + target->dev->ar = ar; + target->dev->htc_cnxt = target; + + /* Get HIF default pipe for HTC message exchange */ + ep = &target->endpoint[ENDPOINT_0]; + + ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul, + &ep->pipe.pipeid_dl); + + return target; + +fail_htc_create: + if (status != 0) { + if (target != NULL) + ath6kl_htc_pipe_cleanup(target); + + target = NULL; + } + return target; +} + +/* cleanup the HTC instance */ +static void ath6kl_htc_pipe_cleanup(struct htc_target *target) +{ + struct htc_packet *packet; + + while (true) { + packet = alloc_htc_packet_container(target); + if (packet == NULL) + break; + kfree(packet); + } + + kfree(target->dev); + + /* kfree our instance */ + kfree(target); +} + +static int ath6kl_htc_pipe_start(struct htc_target *target) +{ + struct sk_buff *skb; + struct htc_setup_comp_ext_msg *setup; + struct htc_packet *packet; + + htc_config_target_hif_pipe(target); + + /* allocate a buffer to send */ + packet = htc_alloc_txctrl_packet(target); + if (packet == NULL) { + WARN_ON_ONCE(1); + return -ENOMEM; + } + + skb = packet->skb; + + /* assemble setup complete message */ + setup = (struct htc_setup_comp_ext_msg *) skb_put(skb, + sizeof(*setup)); + memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg)); + setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); + + ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n"); + + set_htc_pkt_info(packet, NULL, (u8 *) setup, + sizeof(struct htc_setup_comp_ext_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE; + + return ath6kl_htc_pipe_tx(target, packet); +} + +static void ath6kl_htc_pipe_stop(struct htc_target *target) +{ + int i; + struct htc_endpoint *ep; + + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + ep = &target->endpoint[i]; + htc_flush_rx_queue(target, ep); + htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL); + } + + reset_endpoint_states(target); + target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE; +} + +static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target, + enum htc_endpoint_id endpoint) +{ + int num; + + spin_lock_bh(&target->rx_lock); + num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); + spin_unlock_bh(&target->rx_lock); + + return num; +} + +static int ath6kl_htc_pipe_tx(struct htc_target *target, + struct htc_packet *packet) +{ + struct list_head queue; + + ath6kl_dbg(ATH6KL_DBG_HTC, + "%s: endPointId: %d, buffer: 0x%p, length: %d\n", + __func__, packet->endpoint, packet->buf, + packet->act_len); + + INIT_LIST_HEAD(&queue); + list_add_tail(&packet->list, &queue); + + return htc_send_packets_multiple(target, &queue); +} + +static int ath6kl_htc_pipe_wait_target(struct htc_target *target) +{ + struct htc_ready_ext_msg *ready_msg; + struct htc_service_connect_req connect; + struct htc_service_connect_resp resp; + int status = 0; + + status = htc_wait_recv_ctrl_message(target); + + if (status != 0) + return status; + + if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { + ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n", + target->pipe.ctrl_response_len); + return -ECOMM; + } + + ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; + + if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) { + ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n", + ready_msg->ver2_0_info.msg_id); + return -ECOMM; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, + "Target Ready! : transmit resources : %d size:%d\n", + ready_msg->ver2_0_info.cred_cnt, + ready_msg->ver2_0_info.cred_sz); + + target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt); + target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz); + + if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0)) + return -ECOMM; + + htc_setup_target_buffer_assignments(target); + + /* setup our pseudo HTC control endpoint connection */ + memset(&connect, 0, sizeof(connect)); + memset(&resp, 0, sizeof(resp)); + connect.ep_cb.tx_complete = htc_txctrl_complete; + connect.ep_cb.rx = htc_rxctrl_complete; + connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS; + connect.svc_id = HTC_CTRL_RSVD_SVC; + + /* connect fake service */ + status = ath6kl_htc_pipe_conn_service(target, &connect, &resp); + + return status; +} + +static void ath6kl_htc_pipe_flush_txep(struct htc_target *target, + enum htc_endpoint_id endpoint, u16 tag) +{ + struct htc_endpoint *ep = &target->endpoint[endpoint]; + + if (ep->svc_id == 0) { + WARN_ON_ONCE(1); + /* not in use.. */ + return; + } + + htc_flush_tx_endpoint(target, ep, tag); +} + +static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target, + struct list_head *pkt_queue) +{ + struct htc_packet *packet, *tmp_pkt, *first; + struct htc_endpoint *ep; + int status = 0; + + if (list_empty(pkt_queue)) + return -EINVAL; + + first = list_first_entry(pkt_queue, struct htc_packet, list); + if (first == NULL) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + if (first->endpoint >= ENDPOINT_MAX) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n", + __func__, first->endpoint, get_queue_depth(pkt_queue), + first->buf_len); + + ep = &target->endpoint[first->endpoint]; + + spin_lock_bh(&target->rx_lock); + + /* store receive packets */ + list_splice_tail_init(pkt_queue, &ep->rx_bufq); + + spin_unlock_bh(&target->rx_lock); + + if (status != 0) { + /* walk through queue and mark each one canceled */ + list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { + packet->status = -ECANCELED; + } + + do_recv_completion(ep, pkt_queue); + } + + return status; +} + +static void ath6kl_htc_pipe_activity_changed(struct htc_target *target, + enum htc_endpoint_id ep, + bool active) +{ + /* TODO */ +} + +static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target) +{ + /* TODO */ +} + +static int ath6kl_htc_pipe_credit_setup(struct htc_target *target, + struct ath6kl_htc_credit_info *info) +{ + return 0; +} + +static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = { + .create = ath6kl_htc_pipe_create, + .wait_target = ath6kl_htc_pipe_wait_target, + .start = ath6kl_htc_pipe_start, + .conn_service = ath6kl_htc_pipe_conn_service, + .tx = ath6kl_htc_pipe_tx, + .stop = ath6kl_htc_pipe_stop, + .cleanup = ath6kl_htc_pipe_cleanup, + .flush_txep = ath6kl_htc_pipe_flush_txep, + .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf, + .activity_changed = ath6kl_htc_pipe_activity_changed, + .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num, + .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple, + .credit_setup = ath6kl_htc_pipe_credit_setup, + .tx_complete = ath6kl_htc_pipe_tx_complete, + .rx_complete = ath6kl_htc_pipe_rx_complete, +}; + +void ath6kl_htc_pipe_attach(struct ath6kl *ar) +{ + ar->htc_ops = &ath6kl_htc_pipe_ops; +} diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 03cae142f17..29ef50ea07d 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -16,17 +16,21 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/of.h> #include <linux/mmc/sdio_func.h> +#include <linux/vmalloc.h> #include "core.h" #include "cfg80211.h" #include "target.h" #include "debug.h" #include "hif-ops.h" +#include "htc-ops.h" static const struct ath6kl_hw hw_list[] = { { @@ -256,6 +260,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar) memset(&connect, 0, sizeof(connect)); /* these fields are the same for all service endpoints */ + connect.ep_cb.tx_comp_multi = ath6kl_tx_complete; connect.ep_cb.rx = ath6kl_rx; connect.ep_cb.rx_refill = ath6kl_rx_refill; connect.ep_cb.tx_full = ath6kl_tx_queue_full; @@ -485,22 +490,31 @@ int ath6kl_configure_target(struct ath6kl *ar) fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); /* - * By default, submodes : + * Submodes when fw does not support dynamic interface + * switching: * vif[0] - AP/STA/IBSS * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" + * Otherwise, All the interface are initialized to p2p dev. */ - for (i = 0; i < ar->max_norm_iface; i++) - fw_submode |= HI_OPTION_FW_SUBMODE_NONE << - (i * HI_OPTION_FW_SUBMODE_BITS); + if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, + ar->fw_capabilities)) { + for (i = 0; i < ar->vif_max; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << + (i * HI_OPTION_FW_SUBMODE_BITS); + } else { + for (i = 0; i < ar->max_norm_iface; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_NONE << + (i * HI_OPTION_FW_SUBMODE_BITS); - for (i = ar->max_norm_iface; i < ar->vif_max; i++) - fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << - (i * HI_OPTION_FW_SUBMODE_BITS); + for (i = ar->max_norm_iface; i < ar->vif_max; i++) + fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << + (i * HI_OPTION_FW_SUBMODE_BITS); - if (ar->p2p && ar->vif_max == 1) - fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; + if (ar->p2p && ar->vif_max == 1) + fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; + } if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION) != 0) { @@ -539,18 +553,20 @@ int ath6kl_configure_target(struct ath6kl *ar) * but possible in theory. */ - param = ar->hw.board_ext_data_addr; - ram_reserved_size = ar->hw.reserved_ram_size; + if (ar->target_type == TARGET_TYPE_AR6003) { + param = ar->hw.board_ext_data_addr; + ram_reserved_size = ar->hw.reserved_ram_size; - if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { - ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); - return -EIO; - } + if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { + ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); + return -EIO; + } - if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, - ram_reserved_size) != 0) { - ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); - return -EIO; + if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, + ram_reserved_size) != 0) { + ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); + return -EIO; + } } /* set the block size for the target */ @@ -924,13 +940,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) if (ar->fw != NULL) break; - ar->fw = kmemdup(data, ie_len, GFP_KERNEL); + ar->fw = vmalloc(ie_len); if (ar->fw == NULL) { ret = -ENOMEM; goto out; } + memcpy(ar->fw, data, ie_len); ar->fw_len = ie_len; break; case ATH6KL_FW_IE_PATCH_IMAGE: @@ -1507,7 +1524,7 @@ int ath6kl_init_hw_start(struct ath6kl *ar) } /* setup credit distribution */ - ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info); + ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info); /* start HTC */ ret = ath6kl_htc_start(ar->htc_target); diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 229e1922ebe..4d818f96c41 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -15,6 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "core.h" #include "hif-ops.h" #include "cfg80211.h" @@ -756,6 +758,10 @@ static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) stats->wow_evt_discarded += le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); + stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received); + stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied); + stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched); + if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { clear_bit(STATS_UPDATE_PEND, &vif->flags); wake_up(&ar->event_wq); diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 53528648b42..44ea7a74210 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1362,7 +1362,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func, goto err_core_alloc; } - ret = ath6kl_core_init(ar); + ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX); if (ret) { ath6kl_err("Failed to init ath6kl core\n"); goto err_core_alloc; diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c index 6675c92b542..acc9aa832f7 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.c +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -55,8 +55,9 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len) ath6kl_warn("failed to allocate testmode rx skb!\n"); return; } - NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD); - NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf); + if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) || + nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf)) + goto nla_put_failure; cfg80211_testmode_event(skb, GFP_KERNEL); return; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index f85353fd179..82f2f5cb475 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -15,8 +15,11 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "core.h" #include "debug.h" +#include "htc-ops.h" /* * tid - tid_mux0..tid_mux3 @@ -322,6 +325,7 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb, cookie->map_no = 0; set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, eid, ATH6KL_CONTROL_PKT_TAG); + cookie->htc_pkt.skb = skb; /* * This interface is asynchronous, if there is an error, cleanup @@ -490,6 +494,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) cookie->map_no = map_no; set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, eid, htc_tag); + cookie->htc_pkt.skb = skb; ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", skb->data, skb->len); @@ -570,7 +575,7 @@ void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active) notify_htc: /* notify HTC, this may cause credit distribution changes */ - ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); + ath6kl_htc_activity_changed(ar->htc_target, eid, active); } enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, @@ -666,9 +671,10 @@ static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif, } } -void ath6kl_tx_complete(void *context, struct list_head *packet_queue) +void ath6kl_tx_complete(struct htc_target *target, + struct list_head *packet_queue) { - struct ath6kl *ar = context; + struct ath6kl *ar = target->dev->ar; struct sk_buff_head skb_queue; struct htc_packet *packet; struct sk_buff *skb; @@ -887,6 +893,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint) skb->data = PTR_ALIGN(skb->data - 4, 4); set_htc_rxpkt_info(packet, skb, skb->data, ATH6KL_BUFFER_SIZE, endpoint); + packet->skb = skb; list_add_tail(&packet->list, &queue); } @@ -909,6 +916,8 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count) skb->data = PTR_ALIGN(skb->data - 4, 4); set_htc_rxpkt_info(packet, skb, skb->data, ATH6KL_AMSDU_BUFFER_SIZE, 0); + packet->skb = skb; + spin_lock_bh(&ar->lock); list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); spin_unlock_bh(&ar->lock); @@ -1281,6 +1290,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) struct wmi_data_hdr *dhdr; int min_hdr_len; u8 meta_type, dot11_hdr = 0; + u8 pad_before_data_start; int status = packet->status; enum htc_endpoint_id ept = packet->endpoint; bool is_amsdu, prev_ps, ps_state = false; @@ -1492,6 +1502,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) seq_no = wmi_data_hdr_get_seqno(dhdr); meta_type = wmi_data_hdr_get_meta(dhdr); dot11_hdr = wmi_data_hdr_get_dot11(dhdr); + pad_before_data_start = + (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) + & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; + skb_pull(skb, sizeof(struct wmi_data_hdr)); switch (meta_type) { @@ -1510,6 +1524,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) break; } + skb_pull(skb, pad_before_data_start); + if (dot11_hdr) status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); else if (!is_amsdu) @@ -1579,7 +1595,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) /* aggregation code will handle the skb */ return; } - } + } else if (!is_broadcast_ether_addr(datap->h_dest)) + vif->net_stats.multicast++; ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); } diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 325b1224c2b..ec7f1f5fd1c 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -21,15 +21,77 @@ #include "debug.h" #include "core.h" +/* constants */ +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 +#define ATH6KL_USB_RX_BUFFER_SIZE 1700 + +/* tx/rx pipes for usb */ +enum ATH6KL_USB_PIPE_ID { + ATH6KL_USB_PIPE_TX_CTRL = 0, + ATH6KL_USB_PIPE_TX_DATA_LP, + ATH6KL_USB_PIPE_TX_DATA_MP, + ATH6KL_USB_PIPE_TX_DATA_HP, + ATH6KL_USB_PIPE_RX_CTRL, + ATH6KL_USB_PIPE_RX_DATA, + ATH6KL_USB_PIPE_RX_DATA2, + ATH6KL_USB_PIPE_RX_INT, + ATH6KL_USB_PIPE_MAX +}; + +#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX + +struct ath6kl_usb_pipe { + struct list_head urb_list_head; + struct usb_anchor urb_submitted; + u32 urb_alloc; + u32 urb_cnt; + u32 urb_cnt_thresh; + unsigned int usb_pipe_handle; + u32 flags; + u8 ep_address; + u8 logical_pipe_num; + struct ath6kl_usb *ar_usb; + u16 max_packet_size; + struct work_struct io_complete_work; + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; +}; + +#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0) + /* usb device object */ struct ath6kl_usb { + /* protects pipe->urb_list_head and pipe->urb_cnt */ + spinlock_t cs_lock; + struct usb_device *udev; struct usb_interface *interface; + struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX]; u8 *diag_cmd_buffer; u8 *diag_resp_buffer; struct ath6kl *ar; }; +/* usb urb object */ +struct ath6kl_urb_context { + struct list_head link; + struct ath6kl_usb_pipe *pipe; + struct sk_buff *skb; + struct ath6kl *ar; +}; + +/* USB endpoint definitions */ +#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81 +#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82 +#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83 +#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84 + +#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01 +#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02 +#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03 +#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04 + /* diagnostic command defnitions */ #define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1 #define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2 @@ -55,11 +117,493 @@ struct ath6kl_usb_ctrl_diag_resp_read { __le32 value; } __packed; +/* function declarations */ +static void ath6kl_usb_recv_complete(struct urb *urb); + +#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80) + +/* pipe/urb operations */ +static struct ath6kl_urb_context * +ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) +{ + struct ath6kl_urb_context *urb_context = NULL; + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = + list_first_entry(&pipe->urb_list_head, + struct ath6kl_urb_context, link); + list_del(&urb_context->link); + pipe->urb_cnt--; + } + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); + + return urb_context; +} + +static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, + struct ath6kl_urb_context *urb_context) +{ + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + pipe->urb_cnt++; + + list_add(&urb_context->link, &pipe->urb_list_head); + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); +} + +static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context) +{ + if (urb_context->skb != NULL) { + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; + } + + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar) +{ + return ar->hif_priv; +} + +/* pipe resource allocation/cleanup */ +static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe, + int urb_cnt) +{ + struct ath6kl_urb_context *urb_context; + int status = 0, i; + + INIT_LIST_HEAD(&pipe->urb_list_head); + init_usb_anchor(&pipe->urb_submitted); + + for (i = 0; i < urb_cnt; i++) { + urb_context = kzalloc(sizeof(struct ath6kl_urb_context), + GFP_KERNEL); + if (urb_context == NULL) + /* FIXME: set status to -ENOMEM */ + break; + + urb_context->pipe = pipe; + + /* + * we are only allocate the urb contexts here, the actual URB + * is allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + ath6kl_usb_free_urb_to_pipe(pipe, urb_context); + } + + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc); + + return status; +} + +static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe) +{ + struct ath6kl_urb_context *urb_context; + + if (pipe->ar_usb == NULL) { + /* nothing allocated for this pipe */ + return; + } + + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: free resources lpipe:%d" + "hpipe:0x%X urbs:%d avail:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + ath6kl_dbg(ATH6KL_DBG_USB, + "ath6kl usb: urb leak! lpipe:%d" + "hpipe:0x%X urbs:%d avail:%d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + } + + while (true) { + urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe); + if (urb_context == NULL) + break; + kfree(urb_context); + } + +} + +static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb) +{ + int i; + + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) + ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]); + +} + +static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb, + u8 ep_address, int *urb_count) +{ + u8 pipe_num = ATH6KL_USB_PIPE_INVALID; + + switch (ep_address) { + case ATH6KL_USB_EP_ADDR_APP_CTRL_IN: + pipe_num = ATH6KL_USB_PIPE_RX_CTRL; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_IN: + pipe_num = ATH6KL_USB_PIPE_RX_DATA; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_INT_IN: + pipe_num = ATH6KL_USB_PIPE_RX_INT; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA2_IN: + pipe_num = ATH6KL_USB_PIPE_RX_DATA2; + *urb_count = RX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_CTRL; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP; + *urb_count = TX_URB_COUNT; + break; + case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb) +{ + struct usb_interface *interface = ar_usb->interface; + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + struct ath6kl_usb_pipe *pipe; + int i, urbcount, status = 0; + u8 pipe_num; + + ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n"); + + /* walk decriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) { + ath6kl_dbg(ATH6KL_DBG_USB, + "%s Bulk Ep:0x%2.2X maxpktsz:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) { + ath6kl_dbg(ATH6KL_DBG_USB, + "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + ath6kl_dbg(ATH6KL_DBG_USB, + "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n", + ATH6KL_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "RX" : "TX", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + urbcount = 0; + + pipe_num = + ath6kl_usb_get_logical_pipe_num(ar_usb, + endpoint->bEndpointAddress, + &urbcount); + if (pipe_num == ATH6KL_USB_PIPE_INVALID) + continue; + + pipe = &ar_usb->pipes[pipe_num]; + if (pipe->ar_usb != NULL) { + /* hmmm..pipe was already setup */ + continue; + } + + pipe->ar_usb = ar_usb; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); + + if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) { + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) { + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(ar_usb->udev, + pipe->ep_address); + } + } + + pipe->ep_desc = endpoint; + + if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) + pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX; + + status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount); + if (status != 0) + break; + } + + return status; +} + +/* pipe operations */ +static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe, + int buffer_length) +{ + struct ath6kl_urb_context *urb_context; + struct urb *urb; + int usb_status; + + while (true) { + urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe); + if (urb_context == NULL) + break; + + urb_context->skb = dev_alloc_skb(buffer_length); + if (urb_context->skb == NULL) + goto err_cleanup_urb; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb == NULL) + goto err_cleanup_urb; + + usb_fill_bulk_urb(urb, + recv_pipe->ar_usb->udev, + recv_pipe->usb_pipe_handle, + urb_context->skb->data, + buffer_length, + ath6kl_usb_recv_complete, urb_context); + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, recv_pipe->ep_address, + buffer_length, urb_context->skb); + + usb_anchor_urb(urb, &recv_pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb : usb bulk recv failed %d\n", + usb_status); + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err_cleanup_urb; + } + usb_free_urb(urb); + } + return; + +err_cleanup_urb: + ath6kl_usb_cleanup_recv_urb(urb_context); + return; +} + +static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb) +{ + int i; + + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) { + if (ar_usb->pipes[i].ar_usb != NULL) + usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); + } + + /* + * Flushing any pending I/O may schedule work this call will block + * until all scheduled work runs to completion. + */ + flush_scheduled_work(); +} + +static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb) +{ + /* + * note: control pipe is no longer used + * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh = + * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2; + * ath6kl_usb_post_recv_transfers(&ar_usb-> + * pipes[ATH6KL_USB_PIPE_RX_CTRL], + * ATH6KL_USB_RX_BUFFER_SIZE); + */ + + ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = + ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2; + ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA], + ATH6KL_USB_RX_BUFFER_SIZE); +} + +/* hif usb rx/tx completion functions */ +static void ath6kl_usb_recv_complete(struct urb *urb) +{ + struct ath6kl_urb_context *urb_context = urb->context; + struct ath6kl_usb_pipe *pipe = urb_context->pipe; + struct sk_buff *skb = NULL; + int status = 0; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__, + pipe->logical_pipe_num, urb->status, urb->actual_length, + urb); + + if (urb->status != 0) { + status = -EIO; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* + * no need to spew these errors when device + * removed or urb killed due to driver shutdown + */ + status = -ECANCELED; + break; + default: + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n", + __func__, pipe->logical_pipe_num, + pipe->ep_address, urb->status); + break; + } + goto cleanup_recv_urb; + } + + if (urb->actual_length == 0) + goto cleanup_recv_urb; + + skb = urb_context->skb; + + /* we are going to pass it up */ + urb_context->skb = NULL; + skb_put(skb, urb->actual_length); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); + +cleanup_recv_urb: + ath6kl_usb_cleanup_recv_urb(urb_context); + + if (status == 0 && + pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE); + } +} + +static void ath6kl_usb_usb_transmit_complete(struct urb *urb) +{ + struct ath6kl_urb_context *urb_context = urb->context; + struct ath6kl_usb_pipe *pipe = urb_context->pipe; + struct sk_buff *skb; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: pipe: %d, stat:%d, len:%d\n", + __func__, pipe->logical_pipe_num, urb->status, + urb->actual_length); + + if (urb->status != 0) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s: pipe: %d, failed:%d\n", + __func__, pipe->logical_pipe_num, urb->status); + } + + skb = urb_context->skb; + urb_context->skb = NULL; + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); +} + +static void ath6kl_usb_io_comp_work(struct work_struct *work) +{ + struct ath6kl_usb_pipe *pipe = container_of(work, + struct ath6kl_usb_pipe, + io_complete_work); + struct ath6kl_usb *ar_usb; + struct sk_buff *skb; + + ar_usb = pipe->ar_usb; + + while ((skb = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb xmit callback buf:0x%p\n", skb); + ath6kl_core_tx_complete(ar_usb->ar, skb); + } else { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb recv callback buf:0x%p\n", skb); + ath6kl_core_rx_complete(ar_usb->ar, skb, + pipe->logical_pipe_num); + } + } +} + #define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)) #define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read)) static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) { + ath6kl_usb_flush_all(ar_usb); + + ath6kl_usb_cleanup_pipe_resources(ar_usb); + usb_set_intfdata(ar_usb->interface, NULL); kfree(ar_usb->diag_cmd_buffer); @@ -70,19 +614,28 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) { - struct ath6kl_usb *ar_usb = NULL; struct usb_device *dev = interface_to_usbdev(interface); + struct ath6kl_usb *ar_usb; + struct ath6kl_usb_pipe *pipe; int status = 0; + int i; ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL); if (ar_usb == NULL) goto fail_ath6kl_usb_create; - memset(ar_usb, 0, sizeof(struct ath6kl_usb)); usb_set_intfdata(interface, ar_usb); + spin_lock_init(&(ar_usb->cs_lock)); ar_usb->udev = dev; ar_usb->interface = interface; + for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) { + pipe = &ar_usb->pipes[i]; + INIT_WORK(&pipe->io_complete_work, + ath6kl_usb_io_comp_work); + skb_queue_head_init(&pipe->io_comp_queue); + } + ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL); if (ar_usb->diag_cmd_buffer == NULL) { status = -ENOMEM; @@ -96,6 +649,8 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) goto fail_ath6kl_usb_create; } + status = ath6kl_usb_setup_pipe_resources(ar_usb); + fail_ath6kl_usb_create: if (status != 0) { ath6kl_usb_destroy(ar_usb); @@ -114,11 +669,177 @@ static void ath6kl_usb_device_detached(struct usb_interface *interface) ath6kl_stop_txrx(ar_usb->ar); + /* Delay to wait for the target to reboot */ + mdelay(20); ath6kl_core_cleanup(ar_usb->ar); - ath6kl_usb_destroy(ar_usb); } +/* exported hif usb APIs for htc pipe */ +static void hif_start(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + int i; + + ath6kl_usb_start_recv_pipes(device); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = ATH6KL_USB_PIPE_TX_CTRL; + i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) { + device->pipes[i].urb_cnt_thresh = + device->pipes[i].urb_alloc / 2; + } +} + +static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID, + struct sk_buff *hdr_skb, struct sk_buff *skb) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID]; + struct ath6kl_urb_context *urb_context; + int usb_status, status = 0; + struct urb *urb; + u8 *data; + u32 len; + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n", + __func__, PipeID, skb); + + urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe); + + if (urb_context == NULL) { + /* + * TODO: it is possible to run out of urbs if + * 2 endpoints map to the same pipe ID + */ + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "%s pipe:%d no urbs left. URB Cnt : %d\n", + __func__, PipeID, pipe->urb_cnt); + status = -ENOMEM; + goto fail_hif_send; + } + + urb_context->skb = skb; + + data = skb->data; + len = skb->len; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb == NULL) { + status = -ENOMEM; + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, + urb_context); + goto fail_hif_send; + } + + usb_fill_bulk_urb(urb, + device->udev, + pipe->usb_pipe_handle, + data, + len, + ath6kl_usb_usb_transmit_complete, urb_context); + + if ((len % pipe->max_packet_size) == 0) { + /* hit a max packet boundary on this pipe */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->ep_address, len); + + usb_anchor_urb(urb, &pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath6kl_dbg(ATH6KL_DBG_USB_BULK, + "ath6kl usb : usb bulk transmit failed %d\n", + usb_status); + usb_unanchor_urb(urb); + ath6kl_usb_free_urb_to_pipe(urb_context->pipe, + urb_context); + status = -EINVAL; + } + usb_free_urb(urb); + +fail_hif_send: + return status; +} + +static void hif_stop(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + ath6kl_usb_flush_all(device); +} + +static void ath6kl_usb_get_default_pipe(struct ath6kl *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL; + *dl_pipe = ATH6KL_USB_PIPE_RX_CTRL; +} + +static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + int status = 0; + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL; + /* due to large control packets, shift to data pipe */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_VI_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + case WMI_DATA_VO_SVC: + *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP; + /* + * Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; + break; + default: + status = -EPERM; + break; + } + + return status; +} + +static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + return device->pipes[pipe_id].urb_cnt; +} + +static void hif_detach_htc(struct ath6kl *ar) +{ + struct ath6kl_usb *device = ath6kl_usb_priv(ar); + + ath6kl_usb_flush_all(device); +} + static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb, u8 req, u16 value, u16 index, void *data, u32 size) @@ -301,14 +1022,21 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) static int ath6kl_usb_power_on(struct ath6kl *ar) { + hif_start(ar); return 0; } static int ath6kl_usb_power_off(struct ath6kl *ar) { + hif_detach_htc(ar); return 0; } +static void ath6kl_usb_stop(struct ath6kl *ar) +{ + hif_stop(ar); +} + static const struct ath6kl_hif_ops ath6kl_usb_ops = { .diag_read32 = ath6kl_usb_diag_read32, .diag_write32 = ath6kl_usb_diag_write32, @@ -316,6 +1044,11 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = { .bmi_write = ath6kl_usb_bmi_write, .power_on = ath6kl_usb_power_on, .power_off = ath6kl_usb_power_off, + .stop = ath6kl_usb_stop, + .pipe_send = ath6kl_usb_send, + .pipe_get_default = ath6kl_usb_get_default_pipe, + .pipe_map_service = ath6kl_usb_map_service_pipe, + .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, }; /* ath6kl usb driver registered functions */ @@ -368,7 +1101,7 @@ static int ath6kl_usb_probe(struct usb_interface *interface, ar_usb->ar = ar; - ret = ath6kl_core_init(ar); + ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE); if (ret) { ath6kl_err("Failed to init ath6kl core: %d\n", ret); goto err_core_free; @@ -392,6 +1125,46 @@ static void ath6kl_usb_remove(struct usb_interface *interface) ath6kl_usb_device_detached(interface); } +#ifdef CONFIG_PM + +static int ath6kl_usb_suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct ath6kl_usb *device; + device = usb_get_intfdata(interface); + + ath6kl_usb_flush_all(device); + return 0; +} + +static int ath6kl_usb_resume(struct usb_interface *interface) +{ + struct ath6kl_usb *device; + device = usb_get_intfdata(interface); + + ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA], + ATH6KL_USB_RX_BUFFER_SIZE); + ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2], + ATH6KL_USB_RX_BUFFER_SIZE); + + return 0; +} + +static int ath6kl_usb_reset_resume(struct usb_interface *intf) +{ + if (usb_get_intfdata(intf)) + ath6kl_usb_remove(intf); + return 0; +} + +#else + +#define ath6kl_usb_suspend NULL +#define ath6kl_usb_resume NULL +#define ath6kl_usb_reset_resume NULL + +#endif + /* table of devices that work with this driver */ static struct usb_device_id ath6kl_usb_ids[] = { {USB_DEVICE(0x0cf3, 0x9374)}, @@ -403,8 +1176,12 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids); static struct usb_driver ath6kl_usb_driver = { .name = "ath6kl_usb", .probe = ath6kl_usb_probe, + .suspend = ath6kl_usb_suspend, + .resume = ath6kl_usb_resume, + .reset_resume = ath6kl_usb_reset_resume, .disconnect = ath6kl_usb_remove, .id_table = ath6kl_usb_ids, + .supports_autosuspend = true, }; static int ath6kl_usb_init(void) diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 2b442332cd0..7c8a9977faf 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2882,6 +2882,43 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, return ret; } +int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, + enum ieee80211_band band, + struct ath6kl_htcap *htcap) +{ + struct sk_buff *skb; + struct wmi_set_htcap_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_htcap_cmd *) skb->data; + + /* + * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely + * this will be changed in firmware. If at all there is any change in + * band value, the host needs to be fixed. + */ + cmd->band = band; + cmd->ht_enable = !!htcap->ht_enable; + cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20); + cmd->ht40_supported = + !!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40); + cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40); + cmd->intolerant_40mhz = + !!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT); + cmd->max_ampdu_len_exp = htcap->ampdu_factor; + + ath6kl_dbg(ATH6KL_DBG_WMI, + "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n", + cmd->band, cmd->ht_enable, cmd->ht40_supported, + cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz, + cmd->max_ampdu_len_exp); + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID, + NO_SYNC_WMIFLAG); +} + int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) { struct sk_buff *skb; @@ -3032,6 +3069,9 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac, cm->reason = cpu_to_le16(reason); cm->cmd = cmd; + ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd, + cm->reason); + return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, NO_SYNC_WMIFLAG); } @@ -3181,6 +3221,29 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, NO_SYNC_WMIFLAG); } +int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field, + const u8 *ie_info, u8 ie_len) +{ + struct sk_buff *skb; + struct wmi_set_ie_cmd *p; + + skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len); + if (!skb) + return -ENOMEM; + + ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n", + ie_id, ie_field, ie_len); + p = (struct wmi_set_ie_cmd *) skb->data; + p->ie_id = ie_id; + p->ie_field = ie_field; + p->ie_len = ie_len; + if (ie_info && ie_len > 0) + memcpy(p->ie_info, ie_info, ie_len); + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID, + NO_SYNC_WMIFLAG); +} + int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) { struct sk_buff *skb; @@ -3392,6 +3455,23 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx) WMI_CANCEL_REMAIN_ON_CHNL_CMDID); } +int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout) +{ + struct sk_buff *skb; + struct wmi_set_inact_period_cmd *cmd; + + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_inact_period_cmd *) skb->data; + cmd->inact_period = cpu_to_le32(inact_timeout); + cmd->num_null_func = 0; + + return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID, + NO_SYNC_WMIFLAG); +} + static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) { struct wmix_cmd_hdr *cmd; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index 4092e3e8079..d3d2ab5c168 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -182,6 +182,9 @@ enum wmi_data_hdr_flags { #define WMI_DATA_HDR_META_MASK 0x7 #define WMI_DATA_HDR_META_SHIFT 13 +#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF +#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8 + /* Macros for operating on WMI_DATA_HDR (info3) field */ #define WMI_DATA_HDR_IF_IDX_MASK 0xF @@ -423,6 +426,7 @@ enum wmi_cmd_id { WMI_SET_FRAMERATES_CMDID, WMI_SET_AP_PS_CMDID, WMI_SET_QOS_SUPP_CMDID, + WMI_SET_IE_CMDID, /* WMI_THIN_RESERVED_... mark the start and end * values for WMI_THIN_RESERVED command IDs. These @@ -629,6 +633,11 @@ enum wmi_mgmt_frame_type { WMI_NUM_MGMT_FRAME }; +enum wmi_ie_field_type { + WMI_RSN_IE_CAPB = 0x1, + WMI_IE_FULL = 0xFF, /* indicats full IE */ +}; + /* WMI_CONNECT_CMDID */ enum network_type { INFRA_NETWORK = 0x01, @@ -1268,6 +1277,16 @@ struct wmi_mcast_filter_add_del_cmd { u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; } __packed; +struct wmi_set_htcap_cmd { + u8 band; + u8 ht_enable; + u8 ht40_supported; + u8 ht20_sgi; + u8 ht40_sgi; + u8 intolerant_40mhz; + u8 max_ampdu_len_exp; +} __packed; + /* Command Replies */ /* WMI_GET_CHANNEL_LIST_CMDID reply */ @@ -1913,6 +1932,14 @@ struct wmi_set_appie_cmd { u8 ie_info[0]; } __packed; +struct wmi_set_ie_cmd { + u8 ie_id; + u8 ie_field; /* enum wmi_ie_field_type */ + u8 ie_len; + u8 reserved; + u8 ie_info[0]; +} __packed; + /* Notify the WSC registration status to the target */ #define WSC_REG_ACTIVE 1 #define WSC_REG_INACTIVE 0 @@ -2141,6 +2168,11 @@ struct wmi_ap_hidden_ssid_cmd { u8 hidden_ssid; } __packed; +struct wmi_set_inact_period_cmd { + __le32 inact_period; + u8 num_null_func; +} __packed; + /* AP mode events */ struct wmi_ap_set_apsd_cmd { u8 enable; @@ -2465,6 +2497,9 @@ int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi); int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, u8 keep_alive_intvl); +int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, + enum ieee80211_band band, + struct ath6kl_htcap *htcap); int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); s32 ath6kl_wmi_get_rate(s8 rate_index); @@ -2515,6 +2550,9 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx, int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, const u8 *ie, u8 ie_len); +int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field, + const u8 *ie_info, u8 ie_len); + /* P2P */ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); @@ -2538,6 +2576,8 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx); int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, const u8 *ie, u8 ie_len); +int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout); + void ath6kl_wmi_sscan_timer(unsigned long ptr); struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 27d95fe5ade..3f0b8472378 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -11,7 +11,10 @@ ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o -ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o +ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \ + dfs.o \ + dfs_pattern_detector.o \ + dfs_pri_detector.o obj-$(CONFIG_ATH9K) += ath9k.o diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 7e0ea4e9833..b4c77f9d747 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = { { 5, 4, 1 }, /* lvl 5 */ { 6, 5, 1 }, /* lvl 6 */ { 7, 6, 1 }, /* lvl 7 */ - { 7, 7, 1 }, /* lvl 8 */ - { 7, 8, 0 } /* lvl 9 */ + { 7, 6, 0 }, /* lvl 8 */ + { 7, 7, 0 } /* lvl 9 */ }; #define ATH9K_ANI_OFDM_NUM_LEVEL \ ARRAY_SIZE(ofdm_level_table) @@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = { { 4, 0 }, /* lvl 4 */ { 5, 0 }, /* lvl 5 */ { 6, 0 }, /* lvl 6 */ - { 7, 0 }, /* lvl 7 (only for high rssi) */ - { 8, 0 } /* lvl 8 (only for high rssi) */ + { 6, 0 }, /* lvl 7 (only for high rssi) */ + { 7, 0 } /* lvl 8 (only for high rssi) */ }; #define ATH9K_ANI_CCK_NUM_LEVEL \ @@ -274,7 +274,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) aniState->rssiThrLow, aniState->rssiThrHigh); if (aniState->update_ani) - aniState->ofdmNoiseImmunityLevel = immunityLevel; + aniState->ofdmNoiseImmunityLevel = + (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ? + immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL; entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; @@ -290,16 +292,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) ATH9K_ANI_FIRSTEP_LEVEL, entry_ofdm->fir_step_level); - if ((ah->opmode != NL80211_IFTYPE_STATION && - ah->opmode != NL80211_IFTYPE_ADHOC) || - aniState->noiseFloor <= aniState->rssiThrHigh) { - if (aniState->ofdmWeakSigDetectOff) - /* force on ofdm weak sig detect */ - ath9k_hw_ani_control(ah, - ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, - true); - else if (aniState->ofdmWeakSigDetectOff == - entry_ofdm->ofdm_weak_signal_on) + if ((aniState->noiseFloor >= aniState->rssiThrHigh) && + (!aniState->ofdmWeakSigDetectOff != + entry_ofdm->ofdm_weak_signal_on)) { ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, entry_ofdm->ofdm_weak_signal_on); @@ -347,7 +342,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; if (aniState->update_ani) - aniState->cckNoiseImmunityLevel = immunityLevel; + aniState->cckNoiseImmunityLevel = + (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ? + immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL; entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; @@ -717,26 +714,30 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, cckPhyErrRate, aniState->ofdmsTurn); - if (aniState->listenTime > 5 * ah->aniperiod) { - if (ofdmPhyErrRate <= ah->config.ofdm_trig_low && - cckPhyErrRate <= ah->config.cck_trig_low) { + if (aniState->listenTime > ah->aniperiod) { + if (cckPhyErrRate < ah->config.cck_trig_low && + ((ofdmPhyErrRate < ah->config.ofdm_trig_low && + aniState->ofdmNoiseImmunityLevel < + ATH9K_ANI_OFDM_DEF_LEVEL) || + (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI && + aniState->ofdmNoiseImmunityLevel >= + ATH9K_ANI_OFDM_DEF_LEVEL))) { ath9k_hw_ani_lower_immunity(ah); aniState->ofdmsTurn = !aniState->ofdmsTurn; - } - ath9k_ani_restart(ah); - } else if (aniState->listenTime > ah->aniperiod) { - /* check to see if need to raise immunity */ - if (ofdmPhyErrRate > ah->config.ofdm_trig_high && - (cckPhyErrRate <= ah->config.cck_trig_high || - aniState->ofdmsTurn)) { + } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high && + aniState->ofdmNoiseImmunityLevel >= + ATH9K_ANI_OFDM_DEF_LEVEL) || + (ofdmPhyErrRate > + ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI && + aniState->ofdmNoiseImmunityLevel < + ATH9K_ANI_OFDM_DEF_LEVEL)) { ath9k_hw_ani_ofdm_err_trigger(ah); - ath9k_ani_restart(ah); aniState->ofdmsTurn = false; } else if (cckPhyErrRate > ah->config.cck_trig_high) { ath9k_hw_ani_cck_err_trigger(ah); - ath9k_ani_restart(ah); aniState->ofdmsTurn = true; } + ath9k_ani_restart(ah); } } EXPORT_SYMBOL(ath9k_hw_ani_monitor); diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index 83029d6c7b2..72e2b874e17 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -25,11 +25,13 @@ /* units are errors per second */ #define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 -#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000 +#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500 +#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 /* units are errors per second */ #define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 #define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400 +#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 /* units are errors per second */ #define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 @@ -53,7 +55,7 @@ #define ATH9K_ANI_RSSI_THR_LOW 7 #define ATH9K_ANI_PERIOD_OLD 100 -#define ATH9K_ANI_PERIOD_NEW 1000 +#define ATH9K_ANI_PERIOD_NEW 300 /* in ms */ #define ATH9K_ANI_POLLINTERVAL_OLD 100 diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index aba088005b2..c7492c6a251 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -245,7 +245,6 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) REG_WRITE(ah, AR_PHY(0x37), reg32); ah->curchan = chan; - ah->curchan_rad_index = -1; return 0; } @@ -619,19 +618,10 @@ static void ar5008_hw_init_bb(struct ath_hw *ah, u32 synthDelay; synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; - if (IS_CHAN_B(chan)) - synthDelay = (4 * synthDelay) / 22; - else - synthDelay /= 10; - - if (IS_CHAN_HALF_RATE(chan)) - synthDelay *= 2; - else if (IS_CHAN_QUARTER_RATE(chan)) - synthDelay *= 4; REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); - udelay(synthDelay + BASE_ACTIVATE_DELAY); + ath9k_hw_synth_delay(ah, chan, synthDelay); } static void ar5008_hw_init_chain_masks(struct ath_hw *ah) @@ -949,12 +939,8 @@ static bool ar5008_hw_rfbus_req(struct ath_hw *ah) static void ar5008_hw_rfbus_done(struct ath_hw *ah) { u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; - if (IS_CHAN_B(ah->curchan)) - synthDelay = (4 * synthDelay) / 22; - else - synthDelay /= 10; - udelay(synthDelay + BASE_ACTIVATE_DELAY); + ath9k_hw_synth_delay(ah, ah->curchan, synthDelay); REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); } @@ -1047,46 +1033,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah, break; } case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ - static const int m1ThreshLow[] = { 127, 50 }; - static const int m2ThreshLow[] = { 127, 40 }; - static const int m1Thresh[] = { 127, 0x4d }; - static const int m2Thresh[] = { 127, 0x40 }; - static const int m2CountThr[] = { 31, 16 }; - static const int m2CountThrLow[] = { 63, 48 }; u32 on = param ? 1 : 0; - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M1_THRESH_LOW, - m1ThreshLow[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M2_THRESH_LOW, - m2ThreshLow[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M1_THRESH, - m1Thresh[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2_THRESH, - m2Thresh[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2COUNT_THR, - m2CountThr[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, - m2CountThrLow[on]); - - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH_LOW, - m1ThreshLow[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH_LOW, - m2ThreshLow[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH, - m1Thresh[on]); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH, - m2Thresh[on]); - if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index aa2abaf31cb..8d78253c26c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -136,6 +136,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) } if (sync_cause) { + ath9k_debug_sync_cause(common, sync_cause); fatal_int = (sync_cause & (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 3cbbb033fce..846dd7974eb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -152,7 +152,6 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); ah->curchan = chan; - ah->curchan_rad_index = -1; return 0; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 46c79a3d473..952cb2b4656 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -777,11 +777,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = { {0x0000a074, 0x00000000}, {0x0000a078, 0x00000000}, {0x0000a07c, 0x00000000}, - {0x0000a080, 0x22222229}, - {0x0000a084, 0x1d1d1d1d}, - {0x0000a088, 0x1d1d1d1d}, - {0x0000a08c, 0x1d1d1d1d}, - {0x0000a090, 0x171d1d1d}, + {0x0000a080, 0x1a1a1a1a}, + {0x0000a084, 0x1a1a1a1a}, + {0x0000a088, 0x1a1a1a1a}, + {0x0000a08c, 0x1a1a1a1a}, + {0x0000a090, 0x171a1a1a}, {0x0000a094, 0x11111717}, {0x0000a098, 0x00030311}, {0x0000a09c, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 63089cc1faf..a0387a027db 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1000,10 +1000,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) ar9003_mci_init_cal_req(ah, &is_reusable); - txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); - REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); - udelay(5); - REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) { + txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); + udelay(5); + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + } skip_tx_iqcal: if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 6bb4db052bb..ac53d901801 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -30,11 +30,6 @@ #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) -#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ -#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ -#define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ -#define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ -#define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ @@ -2936,15 +2931,6 @@ static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id) #undef N_LOOP } - -static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) -{ - if (fbin == AR5416_BCHAN_UNUSED) - return fbin; - - return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); -} - static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) { return 0; @@ -4070,7 +4056,7 @@ static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah, * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { - freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); + freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } @@ -4106,7 +4092,7 @@ static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah, * from targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { - freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); + freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } @@ -4142,7 +4128,7 @@ static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah, * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { - freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); + freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } @@ -4167,7 +4153,7 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah, * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { - freqArray[i] = FBIN2FREQ(pFreqBin[i], 1); + freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], 1); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } @@ -4295,18 +4281,10 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) #undef POW_SM } -static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, - u8 *targetPowerValT2) +static void ar9003_hw_get_legacy_target_powers(struct ath_hw *ah, u16 freq, + u8 *targetPowerValT2, + bool is2GHz) { - /* XXX: hard code for now, need to get from eeprom struct */ - u8 ht40PowerIncForPdadc = 0; - bool is2GHz = false; - unsigned int i = 0; - struct ath_common *common = ath9k_hw_common(ah); - - if (freq < 4000) - is2GHz = true; - targetPowerValT2[ALL_TARGET_LEGACY_6_24] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq, is2GHz); @@ -4319,6 +4297,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, targetPowerValT2[ALL_TARGET_LEGACY_54] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq, is2GHz); +} + +static void ar9003_hw_get_cck_target_powers(struct ath_hw *ah, u16 freq, + u8 *targetPowerValT2) +{ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L, freq); @@ -4328,6 +4311,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq); targetPowerValT2[ALL_TARGET_LEGACY_11S] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq); +} + +static void ar9003_hw_get_ht20_target_powers(struct ath_hw *ah, u16 freq, + u8 *targetPowerValT2, bool is2GHz) +{ targetPowerValT2[ALL_TARGET_HT20_0_8_16] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz); @@ -4370,6 +4358,16 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, targetPowerValT2[ALL_TARGET_HT20_23] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz); +} + +static void ar9003_hw_get_ht40_target_powers(struct ath_hw *ah, + u16 freq, + u8 *targetPowerValT2, + bool is2GHz) +{ + /* XXX: hard code for now, need to get from eeprom struct */ + u8 ht40PowerIncForPdadc = 0; + targetPowerValT2[ALL_TARGET_HT40_0_8_16] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz) + ht40PowerIncForPdadc; @@ -4413,6 +4411,26 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, targetPowerValT2[ALL_TARGET_HT40_23] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz) + ht40PowerIncForPdadc; +} + +static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah, + struct ath9k_channel *chan, + u8 *targetPowerValT2) +{ + bool is2GHz = IS_CHAN_2GHZ(chan); + unsigned int i = 0; + struct ath_common *common = ath9k_hw_common(ah); + u16 freq = chan->channel; + + if (is2GHz) + ar9003_hw_get_cck_target_powers(ah, freq, targetPowerValT2); + + ar9003_hw_get_legacy_target_powers(ah, freq, targetPowerValT2, is2GHz); + ar9003_hw_get_ht20_target_powers(ah, freq, targetPowerValT2, is2GHz); + + if (IS_CHAN_HT40(chan)) + ar9003_hw_get_ht40_target_powers(ah, freq, targetPowerValT2, + is2GHz); for (i = 0; i < ar9300RateSize; i++) { ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", @@ -4464,7 +4482,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, is2GHz = 1; } - *pfrequency = FBIN2FREQ(*pCalPier, is2GHz); + *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz); *pcorrection = pCalPierStruct->refPower; *ptemperature = pCalPierStruct->tempMeas; *pvoltage = pCalPierStruct->voltMeas; @@ -4789,34 +4807,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, bool is2ghz = IS_CHAN_2GHZ(chan); ath9k_hw_get_channel_centers(ah, chan, ¢ers); - scaledPower = powerLimit - antenna_reduction; - - /* - * Reduce scaled Power by number of chains active to get - * to per chain tx power level - */ - switch (ar5416_get_ntxchains(ah->txchainmask)) { - case 1: - break; - case 2: - if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; - else - scaledPower = 0; - break; - case 3: - if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; - else - scaledPower = 0; - break; - } + scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit, + antenna_reduction); - scaledPower = max((u16)0, scaledPower); - - /* - * Get target powers from EEPROM - our baseline for TX Power - */ if (is2ghz) { /* Setup for CTL modes */ /* CTL_11B, CTL_11G, CTL_2GHT20 */ @@ -4988,7 +4981,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, unsigned int i = 0, paprd_scale_factor = 0; u8 pwr_idx, min_pwridx = 0; - ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); + memset(targetPowerValT2, 0 , sizeof(targetPowerValT2)); + + /* + * Get target powers from EEPROM - our baseline for TX Power + */ + ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2); if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { if (IS_CHAN_2GHZ(chan)) @@ -5060,8 +5058,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, i, targetPowerValT2[i]); } - ah->txpower_limit = regulatory->max_power_level; - /* Write target power array to registers */ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); ar9003_hw_calibration_apply(ah, chan->channel); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index bb223fe8281..2505ac44f0c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -42,7 +42,6 @@ #define AR9300_EEPMISC_WOW 0x02 #define AR9300_CUSTOMER_DATA_SIZE 20 -#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) #define AR9300_MAX_CHAINS 3 #define AR9300_ANT_16S 25 #define AR9300_FUTURE_MODAL_SZ 6 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 0f56e322dd3..a0e3394b10d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -305,11 +305,6 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) ar9462_common_rx_gain_table_2p0, ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2); - INIT_INI_ARRAY(&ah->ini_BTCOEX_MAX_TXPWR, - ar9462_2p0_BTCOEX_MAX_TXPWR_table, - ARRAY_SIZE(ar9462_2p0_BTCOEX_MAX_TXPWR_table), - 2); - /* Awake -> Sleep Setting */ INIT_INI_ARRAY(&ah->iniPcieSerdes, PCIE_PLL_ON_CREQ_DIS_L1_2P0, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index a66a13b7684..d9e0824af09 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -306,6 +306,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) ar9003_mci_get_isr(ah, masked); if (sync_cause) { + ath9k_debug_sync_cause(common, sync_cause); + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, 0); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 600aca9fe6b..11abb972be1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -152,7 +152,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32); ah->curchan = chan; - ah->curchan_rad_index = -1; return 0; } @@ -209,11 +208,12 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, continue; negative = 0; if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) - cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], - IS_CHAN_2GHZ(chan)) - synth_freq; + cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i], + IS_CHAN_2GHZ(chan)); else - cur_bb_spur = spur_freq[i] - synth_freq; + cur_bb_spur = spur_freq[i]; + cur_bb_spur -= synth_freq; if (cur_bb_spur < 0) { negative = 1; cur_bb_spur = -cur_bb_spur; @@ -443,7 +443,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, ar9003_hw_spur_ofdm_clear(ah); for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { - freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq; + freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode); + freq_offset -= synth_freq; if (abs(freq_offset) < range) { ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); break; @@ -525,22 +526,10 @@ static void ar9003_hw_init_bb(struct ath_hw *ah, * Value is in 100ns increments. */ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; - if (IS_CHAN_B(chan)) - synthDelay = (4 * synthDelay) / 22; - else - synthDelay /= 10; /* Activate the PHY (includes baseband activate + synthesizer on) */ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); - - /* - * There is an issue if the AP starts the calibration before - * the base band timeout completes. This could result in the - * rx_clear false triggering. As a workaround we add delay an - * extra BASE_ACTIVATE_DELAY usecs to ensure this condition - * does not happen. - */ - udelay(synthDelay + BASE_ACTIVATE_DELAY); + ath9k_hw_synth_delay(ah, chan, synthDelay); } static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) @@ -684,9 +673,6 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); - if (AR_SREV_9462(ah)) - ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1); - if (chan->channel == 2484) ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); @@ -725,6 +711,14 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah, if (IS_CHAN_A_FAST_CLOCK(ah, chan)) rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); + if (IS_CHAN_QUARTER_RATE(chan)) + rfMode |= AR_PHY_MODE_QUARTER; + if (IS_CHAN_HALF_RATE(chan)) + rfMode |= AR_PHY_MODE_HALF; + + if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF)) + REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, + AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3); REG_WRITE(ah, AR_PHY_MODE, rfMode); } @@ -795,12 +789,8 @@ static bool ar9003_hw_rfbus_req(struct ath_hw *ah) static void ar9003_hw_rfbus_done(struct ath_hw *ah) { u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; - if (IS_CHAN_B(ah->curchan)) - synthDelay = (4 * synthDelay) / 22; - else - synthDelay /= 10; - udelay(synthDelay + BASE_ACTIVATE_DELAY); + ath9k_hw_synth_delay(ah, ah->curchan, synthDelay); REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); } @@ -823,55 +813,6 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, * on == 0 means more noise imm */ u32 on = param ? 1 : 0; - /* - * make register setting for default - * (weak sig detect ON) come from INI file - */ - int m1ThreshLow = on ? - aniState->iniDef.m1ThreshLow : m1ThreshLow_off; - int m2ThreshLow = on ? - aniState->iniDef.m2ThreshLow : m2ThreshLow_off; - int m1Thresh = on ? - aniState->iniDef.m1Thresh : m1Thresh_off; - int m2Thresh = on ? - aniState->iniDef.m2Thresh : m2Thresh_off; - int m2CountThr = on ? - aniState->iniDef.m2CountThr : m2CountThr_off; - int m2CountThrLow = on ? - aniState->iniDef.m2CountThrLow : m2CountThrLow_off; - int m1ThreshLowExt = on ? - aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; - int m2ThreshLowExt = on ? - aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; - int m1ThreshExt = on ? - aniState->iniDef.m1ThreshExt : m1ThreshExt_off; - int m2ThreshExt = on ? - aniState->iniDef.m2ThreshExt : m2ThreshExt_off; - - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M1_THRESH_LOW, - m1ThreshLow); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M2_THRESH_LOW, - m2ThreshLow); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M1_THRESH, m1Thresh); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2_THRESH, m2Thresh); - REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2COUNT_THR, m2CountThr); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, - AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, - m2CountThrLow); - - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt); - REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt); if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index d834d97fe72..7268a48a92a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -468,6 +468,9 @@ #define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150) #define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158) +#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3 +#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0 + #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index b6ba1e8149b..1d6658e139b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -1115,9 +1115,9 @@ static const u32 ar9462_2p0_mac_core[][2] = { {0x000081f8, 0x00000000}, {0x000081fc, 0x00000000}, {0x00008240, 0x00100000}, - {0x00008244, 0x0010f400}, + {0x00008244, 0x0010f424}, {0x00008248, 0x00000800}, - {0x0000824c, 0x0001e800}, + {0x0000824c, 0x0001e848}, {0x00008250, 0x00000000}, {0x00008254, 0x00000000}, {0x00008258, 0x00000000}, @@ -1448,16 +1448,4 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = { {0x0000b1fc, 0x00000196}, }; -static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = { - /* Addr allmodes */ - {0x000018c0, 0x10101010}, - {0x000018c4, 0x10101010}, - {0x000018c8, 0x10101010}, - {0x000018cc, 0x10101010}, - {0x000018d0, 0x10101010}, - {0x000018d4, 0x10101010}, - {0x000018d8, 0x10101010}, - {0x000018dc, 0x10101010}, -}; - #endif /* INITVALS_9462_2P0_H */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 8c84049682a..a277cf6f339 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -26,6 +26,7 @@ #include "debug.h" #include "common.h" #include "mci.h" +#include "dfs.h" /* * Header for the ath9k.ko driver core *only* -- hw code nor any other driver @@ -369,7 +370,7 @@ struct ath_vif { * number of beacon intervals, the game's up. */ #define BSTUCK_THRESH 9 -#define ATH_BCBUF 4 +#define ATH_BCBUF 8 #define ATH_DEFAULT_BINTVAL 100 /* TU */ #define ATH_DEFAULT_BMISS_LIMIT 10 #define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) @@ -430,6 +431,8 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status); void ath_reset_work(struct work_struct *work); void ath_hw_check(struct work_struct *work); void ath_hw_pll_work(struct work_struct *work); +void ath_rx_poll(unsigned long data); +void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon); void ath_paprd_calibrate(struct work_struct *work); void ath_ani_calibrate(unsigned long data); void ath_start_ani(struct ath_common *common); @@ -670,6 +673,7 @@ struct ath_softc { struct ath_beacon_config cur_beacon_conf; struct delayed_work tx_complete_work; struct delayed_work hw_pll_work; + struct timer_list rx_poll_timer; #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex btcoex; @@ -680,6 +684,7 @@ struct ath_softc { struct ath_ant_comb ant_comb; u8 ant_tx, ant_rx; + struct dfs_pattern_detector *dfs_detector; }; void ath9k_tasklet(unsigned long data); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 626418222c8..11bc55e3d69 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -91,7 +91,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, info.txpower = MAX_RATE_POWER; info.keyix = ATH9K_TXKEYIX_INVALID; info.keytype = ATH9K_KEY_TYPE_CLEAR; - info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ; + info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK; info.buf_addr[0] = bf->bf_buf_addr; info.buf_len[0] = roundup(skb->len, 4); @@ -359,6 +359,11 @@ void ath_beacon_tasklet(unsigned long data) int slot; u32 bfaddr, bc = 0; + if (work_pending(&sc->hw_reset_work)) { + ath_dbg(common, RESET, + "reset work is pending, skip beaconing now\n"); + return; + } /* * Check if the previous beacon has gone out. If * not don't try to post another, skip this period @@ -369,6 +374,9 @@ void ath_beacon_tasklet(unsigned long data) if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { sc->beacon.bmisscnt++; + if (!ath9k_hw_check_alive(ah)) + ieee80211_queue_work(sc->hw, &sc->hw_check_work); + if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) { ath_dbg(common, BSTUCK, "missed %u consecutive beacons\n", @@ -378,6 +386,7 @@ void ath_beacon_tasklet(unsigned long data) ath9k_hw_bstuck_nfcal(ah); } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); + sc->beacon.bmisscnt = 0; sc->sc_flags |= SC_OP_TSF_RESET; ieee80211_queue_work(sc->hw, &sc->hw_reset_work); } @@ -650,6 +659,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, u32 tsf, intval, nexttbtt; ath9k_reset_beacon_status(sc); + if (!(sc->sc_flags & SC_OP_BEACONS)) + ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp); intval = TU_TO_USEC(conf->beacon_interval); tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval); @@ -806,8 +817,10 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status) { struct ath_hw *ah = sc->sc_ah; - if (!ath_has_valid_bslot(sc)) + if (!ath_has_valid_bslot(sc)) { + sc->sc_flags &= ~SC_OP_BEACONS; return; + } ath9k_ps_wakeup(sc); if (status) { diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index ec327199341..1ca6da80d4a 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -108,9 +108,7 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) return; } - if (AR_SREV_9462(ah)) { - btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; - } else if (AR_SREV_9300_20_OR_LATER(ah)) { + if (AR_SREV_9300_20_OR_LATER(ah)) { btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; @@ -284,11 +282,12 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah) ath9k_hw_btcoex_enable_2wire(ah); break; case ATH_BTCOEX_CFG_3WIRE: + if (AR_SREV_9462(ah)) { + ath9k_hw_btcoex_enable_mci(ah); + return; + } ath9k_hw_btcoex_enable_3wire(ah); break; - case ATH_BTCOEX_CFG_MCI: - ath9k_hw_btcoex_enable_mci(ah); - return; } REG_RMW(ah, AR_GPIO_PDPU, @@ -305,11 +304,12 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah) int i; btcoex_hw->enabled = false; - if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) { + if (AR_SREV_9462(ah)) { ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), btcoex_hw->wlan_weight[i]); + return; } ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index 8f93aef4414..3a1e1cfabd5 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -51,7 +51,6 @@ enum ath_btcoex_scheme { ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_3WIRE, - ATH_BTCOEX_CFG_MCI, }; struct ath9k_hw_mci { diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index ff47b32ecaf..fde700c4e49 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -380,63 +380,75 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; - char buf[512]; unsigned int len = 0; + int rv; + int mxlen = 4000; + char *buf = kmalloc(mxlen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + +#define PR_IS(a, s) \ + do { \ + len += snprintf(buf + len, mxlen - len, \ + "%21s: %10u\n", a, \ + sc->debug.stats.istats.s); \ + } while (0) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "WATCHDOG", - sc->debug.stats.istats.bb_watchdog); + PR_IS("RXLP", rxlp); + PR_IS("RXHP", rxhp); + PR_IS("WATHDOG", bb_watchdog); } else { - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); + PR_IS("RX", rxok); } - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "TX", sc->debug.stats.istats.txok); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "CST", sc->debug.stats.istats.cst); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor); - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); - - - if (len > sizeof(buf)) - len = sizeof(buf); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + PR_IS("RXEOL", rxeol); + PR_IS("RXORN", rxorn); + PR_IS("TX", txok); + PR_IS("TXURN", txurn); + PR_IS("MIB", mib); + PR_IS("RXPHY", rxphyerr); + PR_IS("RXKCM", rx_keycache_miss); + PR_IS("SWBA", swba); + PR_IS("BMISS", bmiss); + PR_IS("BNR", bnr); + PR_IS("CST", cst); + PR_IS("GTT", gtt); + PR_IS("TIM", tim); + PR_IS("CABEND", cabend); + PR_IS("DTIMSYNC", dtimsync); + PR_IS("DTIM", dtim); + PR_IS("TSFOOR", tsfoor); + PR_IS("TOTAL", total); + + len += snprintf(buf + len, mxlen - len, + "SYNC_CAUSE stats:\n"); + + PR_IS("Sync-All", sync_cause_all); + PR_IS("RTC-IRQ", sync_rtc_irq); + PR_IS("MAC-IRQ", sync_mac_irq); + PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access); + PR_IS("APB-Timeout", apb_timeout); + PR_IS("PCI-Mode-Conflict", pci_mode_conflict); + PR_IS("HOST1-Fatal", host1_fatal); + PR_IS("HOST1-Perr", host1_perr); + PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr); + PR_IS("RADM-CPL-EP", radm_cpl_ep); + PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort); + PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort); + PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err); + PR_IS("RADM-CPL-Timeout", radm_cpl_timeout); + PR_IS("Local-Bus-Timeout", local_timeout); + PR_IS("PM-Access", pm_access); + PR_IS("MAC-Awake", mac_awake); + PR_IS("MAC-Asleep", mac_asleep); + PR_IS("MAC-Sleep-Access", mac_sleep_access); + + if (len > mxlen) + len = mxlen; + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return rv; } static const struct file_operations fops_interrupt = { @@ -524,6 +536,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, PR("hw-put-tx-buf: ", puttxbuf); PR("hw-tx-start: ", txstart); PR("hw-tx-proc-desc: ", txprocdesc); + PR("TX-Failed: ", txfailed); len += snprintf(buf + len, size - len, "%s%11p%11p%10p%10p\n", "txq-memory-address:", sc->tx.txq_map[WME_AC_BE], @@ -880,6 +893,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \ sc->debug.stats.rxstats.phy_err_stats[p]); +#define RXS_ERR(s, e) \ + do { \ + len += snprintf(buf + len, size - len, \ + "%22s : %10u\n", s, \ + sc->debug.stats.rxstats.e); \ + } while (0) + struct ath_softc *sc = file->private_data; char *buf; unsigned int len = 0, size = 1600; @@ -889,27 +909,18 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, if (buf == NULL) return -ENOMEM; - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "CRC ERR", - sc->debug.stats.rxstats.crc_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "DECRYPT CRC ERR", - sc->debug.stats.rxstats.decrypt_crc_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "PHY ERR", - sc->debug.stats.rxstats.phy_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "MIC ERR", - sc->debug.stats.rxstats.mic_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "PRE-DELIM CRC ERR", - sc->debug.stats.rxstats.pre_delim_crc_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "POST-DELIM CRC ERR", - sc->debug.stats.rxstats.post_delim_crc_err); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "DECRYPT BUSY ERR", - sc->debug.stats.rxstats.decrypt_busy_err); + RXS_ERR("CRC ERR", crc_err); + RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err); + RXS_ERR("PHY ERR", phy_err); + RXS_ERR("MIC ERR", mic_err); + RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err); + RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err); + RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err); + RXS_ERR("RX-LENGTH-ERR", rx_len_err); + RXS_ERR("RX-OOM-ERR", rx_oom_err); + RXS_ERR("RX-RATE-ERR", rx_rate_err); + RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush); + RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err); PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING); @@ -938,12 +949,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "RX-Pkts-All", - sc->debug.stats.rxstats.rx_pkts_all); - len += snprintf(buf + len, size - len, - "%22s : %10u\n", "RX-Bytes-All", - sc->debug.stats.rxstats.rx_bytes_all); + RXS_ERR("RX-Pkts-All", rx_pkts_all); + RXS_ERR("RX-Bytes-All", rx_bytes_all); + RXS_ERR("RX-Beacons", rx_beacons); + RXS_ERR("RX-Frags", rx_frags); if (len > size) len = size; @@ -953,12 +962,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, return retval; +#undef RXS_ERR #undef PHY_ERR } void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) { -#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ #define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ [sc->debug.rsidx].c) @@ -1004,7 +1013,6 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) #endif -#undef RX_STAT_INC #undef RX_PHY_ERR_INC #undef RX_SAMP_DBG } diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 64fcfad467b..c34da09d910 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -60,6 +60,7 @@ struct ath_buf; * @tsfoor: TSF out of range, indicates that the corrected TSF received * from a beacon differs from the PCU's internal TSF by more than a * (programmable) threshold + * @local_timeout: Internal bus timeout. */ struct ath_interrupt_stats { u32 total; @@ -85,8 +86,30 @@ struct ath_interrupt_stats { u32 dtim; u32 bb_watchdog; u32 tsfoor; + + /* Sync-cause stats */ + u32 sync_cause_all; + u32 sync_rtc_irq; + u32 sync_mac_irq; + u32 eeprom_illegal_access; + u32 apb_timeout; + u32 pci_mode_conflict; + u32 host1_fatal; + u32 host1_perr; + u32 trcv_fifo_perr; + u32 radm_cpl_ep; + u32 radm_cpl_dllp_abort; + u32 radm_cpl_tlp_abort; + u32 radm_cpl_ecrc_err; + u32 radm_cpl_timeout; + u32 local_timeout; + u32 pm_access; + u32 mac_awake; + u32 mac_asleep; + u32 mac_sleep_access; }; + /** * struct ath_tx_stats - Statistics about TX * @tx_pkts_all: No. of total frames transmitted, including ones that @@ -113,6 +136,7 @@ struct ath_interrupt_stats { * @puttxbuf: Number of times hardware was given txbuf to write. * @txstart: Number of times hardware was told to start tx. * @txprocdesc: Number of times tx descriptor was processed + * @txfailed: Out-of-memory or other errors in xmit path. */ struct ath_tx_stats { u32 tx_pkts_all; @@ -135,8 +159,11 @@ struct ath_tx_stats { u32 puttxbuf; u32 txstart; u32 txprocdesc; + u32 txfailed; }; +#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) + /** * struct ath_rx_stats - RX Statistics * @rx_pkts_all: No. of total frames received, including ones that @@ -153,6 +180,13 @@ struct ath_tx_stats { * @post_delim_crc_err: Post-Frame delimiter CRC error detections * @decrypt_busy_err: Decryption interruptions counter * @phy_err_stats: Individual PHY error statistics + * @rx_len_err: No. of frames discarded due to bad length. + * @rx_oom_err: No. of frames dropped due to OOM issues. + * @rx_rate_err: No. of frames dropped due to rate errors. + * @rx_too_many_frags_err: Frames dropped due to too-many-frags received. + * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH. + * @rx_beacons: No. of beacons received. + * @rx_frags: No. of rx-fragements received. */ struct ath_rx_stats { u32 rx_pkts_all; @@ -165,6 +199,13 @@ struct ath_rx_stats { u32 post_delim_crc_err; u32 decrypt_busy_err; u32 phy_err_stats[ATH9K_PHYERR_MAX]; + u32 rx_len_err; + u32 rx_oom_err; + u32 rx_rate_err; + u32 rx_too_many_frags_err; + u32 rx_drop_rxflush; + u32 rx_beacons; + u32 rx_frags; }; enum ath_reset_type { @@ -174,6 +215,7 @@ enum ath_reset_type { RESET_TYPE_TX_ERROR, RESET_TYPE_TX_HANG, RESET_TYPE_PLL_HANG, + RESET_TYPE_MAC_HANG, __RESET_TYPE_MAX }; @@ -247,6 +289,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); #else +#define RX_STAT_INC(c) /* NOP */ + static inline int ath9k_init_debug(struct ath_hw *ah) { return 0; diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index f4f56aff1e9..ecc81792f2d 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -21,17 +21,6 @@ #include "dfs.h" #include "dfs_debug.h" -/* - * TODO: move into or synchronize this with generic header - * as soon as IF is defined - */ -struct dfs_radar_pulse { - u16 freq; - u64 ts; - u32 width; - u8 rssi; -}; - /* internal struct to pass radar data */ struct ath_radar_data { u8 pulse_bw_info; @@ -60,44 +49,44 @@ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) #define EXT_CH_RADAR_FOUND 0x02 static bool ath9k_postprocess_radar_event(struct ath_softc *sc, - struct ath_radar_data *are, - struct dfs_radar_pulse *drp) + struct ath_radar_data *ard, + struct pulse_event *pe) { u8 rssi; u16 dur; ath_dbg(ath9k_hw_common(sc->sc_ah), DFS, "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n", - are->pulse_bw_info, - are->pulse_length_pri, are->rssi, - are->pulse_length_ext, are->ext_rssi); + ard->pulse_bw_info, + ard->pulse_length_pri, ard->rssi, + ard->pulse_length_ext, ard->ext_rssi); /* * Only the last 2 bits of the BW info are relevant, they indicate * which channel the radar was detected in. */ - are->pulse_bw_info &= 0x03; + ard->pulse_bw_info &= 0x03; - switch (are->pulse_bw_info) { + switch (ard->pulse_bw_info) { case PRI_CH_RADAR_FOUND: /* radar in ctrl channel */ - dur = are->pulse_length_pri; + dur = ard->pulse_length_pri; DFS_STAT_INC(sc, pri_phy_errors); /* * cannot use ctrl channel RSSI * if extension channel is stronger */ - rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi; + rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi; break; case EXT_CH_RADAR_FOUND: /* radar in extension channel */ - dur = are->pulse_length_ext; + dur = ard->pulse_length_ext; DFS_STAT_INC(sc, ext_phy_errors); /* * cannot use extension channel RSSI * if control channel is stronger */ - rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi; + rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi; break; case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): /* @@ -107,14 +96,14 @@ ath9k_postprocess_radar_event(struct ath_softc *sc, * Radiated testing, when pulse is on DC, different pri and * ext durations are reported, so take the larger of the two */ - if (are->pulse_length_ext >= are->pulse_length_pri) - dur = are->pulse_length_ext; + if (ard->pulse_length_ext >= ard->pulse_length_pri) + dur = ard->pulse_length_ext; else - dur = are->pulse_length_pri; + dur = ard->pulse_length_pri; DFS_STAT_INC(sc, dc_phy_errors); /* when both are present use stronger one */ - rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi; + rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi; break; default: /* @@ -137,8 +126,8 @@ ath9k_postprocess_radar_event(struct ath_softc *sc, */ /* convert duration to usecs */ - drp->width = dur_to_usecs(sc->sc_ah, dur); - drp->rssi = rssi; + pe->width = dur_to_usecs(sc->sc_ah, dur); + pe->rssi = rssi; DFS_STAT_INC(sc, pulses_detected); return true; @@ -155,15 +144,17 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, struct ath_radar_data ard; u16 datalen; char *vdata_end; - struct dfs_radar_pulse drp; + struct pulse_event pe; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) && - (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) { + DFS_STAT_INC(sc, pulses_total); + if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) && + (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) { ath_dbg(common, DFS, "Error: rs_phyer=0x%x not a radar error\n", rs->rs_phyerr); + DFS_STAT_INC(sc, pulses_no_dfs); return; } @@ -189,27 +180,22 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, ard.pulse_bw_info = vdata_end[-1]; ard.pulse_length_ext = vdata_end[-2]; ard.pulse_length_pri = vdata_end[-3]; - - ath_dbg(common, DFS, - "bw_info=%d, length_pri=%d, length_ext=%d, " - "rssi_pri=%d, rssi_ext=%d\n", - ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext, - ard.rssi, ard.ext_rssi); - - drp.freq = ah->curchan->channel; - drp.ts = mactime; - if (ath9k_postprocess_radar_event(sc, &ard, &drp)) { + pe.freq = ah->curchan->channel; + pe.ts = mactime; + if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { + struct dfs_pattern_detector *pd = sc->dfs_detector; static u64 last_ts; ath_dbg(common, DFS, "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " "width=%d, rssi=%d, delta_ts=%llu\n", - drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts); - last_ts = drp.ts; - /* - * TODO: forward pulse to pattern detector - * - * ieee80211_add_radar_pulse(drp.freq, drp.ts, - * drp.width, drp.rssi); - */ + pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts); + last_ts = pe.ts; + DFS_STAT_INC(sc, pulses_processed); + if (pd != NULL && pd->add_pulse(pd, &pe)) { + DFS_STAT_INC(sc, radar_detected); + /* + * TODO: forward radar event to DFS management layer + */ + } } } diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h index c2412857f12..3c839f06a06 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.h +++ b/drivers/net/wireless/ath/ath9k/dfs.h @@ -17,6 +17,7 @@ #ifndef ATH9K_DFS_H #define ATH9K_DFS_H +#include "dfs_pattern_detector.h" #if defined(CONFIG_ATH9K_DFS_CERTIFIED) /** @@ -31,13 +32,14 @@ * * The radar information provided as raw payload data is validated and * filtered for false pulses. Events passing all tests are forwarded to - * the upper layer for pattern detection. + * the DFS detector for pattern detection. */ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, struct ath_rx_status *rs, u64 mactime); #else -static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, - struct ath_rx_status *rs, u64 mactime) { } +static inline void +ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, + struct ath_rx_status *rs, u64 mactime) { } #endif #endif /* ATH9K_DFS_H */ diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 4364c103ed3..55d28072ade 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -21,9 +21,15 @@ #include "ath9k.h" #include "dfs_debug.h" + +struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 }; + #define ATH9K_DFS_STAT(s, p) \ len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ sc->debug.stats.dfs_stats.p); +#define ATH9K_DFS_POOL_STAT(s, p) \ + len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ + global_dfs_pool_stats.p); static ssize_t read_file_dfs(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -43,6 +49,9 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, hw_ver->macVersion, hw_ver->macRev, (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? "enabled" : "disabled"); + len += snprintf(buf + len, size - len, "Pulse detector statistics:\n"); + ATH9K_DFS_STAT("pulse events reported ", pulses_total); + ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); ATH9K_DFS_STAT("Datalen discards ", datalen_discards); ATH9K_DFS_STAT("RSSI discards ", rssi_discards); @@ -50,6 +59,18 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); + len += snprintf(buf + len, size - len, "Radar detector statistics " + "(current DFS region: %d)\n", sc->dfs_detector->region); + ATH9K_DFS_STAT("Pulse events processed ", pulses_processed); + ATH9K_DFS_STAT("Radars detected ", radar_detected); + len += snprintf(buf + len, size - len, "Global Pool statistics:\n"); + ATH9K_DFS_POOL_STAT("Pool references ", pool_reference); + ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated); + ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error); + ATH9K_DFS_POOL_STAT("Pulses in use ", pulse_used); + ATH9K_DFS_POOL_STAT("Seqs. allocated ", pseq_allocated); + ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); + ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); if (len > size) len = size; @@ -60,8 +81,33 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, return retval; } +/* magic number to prevent accidental reset of DFS statistics */ +#define DFS_STATS_RESET_MAGIC 0x80000000 +static ssize_t write_file_dfs(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if (val == DFS_STATS_RESET_MAGIC) + memset(&sc->debug.stats.dfs_stats, 0, + sizeof(sc->debug.stats.dfs_stats)); + return count; +} + static const struct file_operations fops_dfs_stats = { .read = read_file_dfs, + .write = write_file_dfs, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h index 4911724cb44..e36810a4b58 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.h +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h @@ -22,17 +22,23 @@ #include "hw.h" /** - * struct ath_dfs_stats - DFS Statistics - * - * @pulses_detected: No. of pulses detected so far - * @datalen_discards: No. of pulses discarded due to invalid datalen - * @rssi_discards: No. of pulses discarded due to invalid RSSI - * @bwinfo_discards: No. of pulses discarded due to invalid BW info - * @pri_phy_errors: No. of pulses reported for primary channel - * @ext_phy_errors: No. of pulses reported for extension channel - * @dc_phy_errors: No. of pulses reported for primary + extension channel + * struct ath_dfs_stats - DFS Statistics per wiphy + * @pulses_total: pulses reported by HW + * @pulses_no_dfs: pulses wrongly reported as DFS + * @pulses_detected: pulses detected so far + * @datalen_discards: pulses discarded due to invalid datalen + * @rssi_discards: pulses discarded due to invalid RSSI + * @bwinfo_discards: pulses discarded due to invalid BW info + * @pri_phy_errors: pulses reported for primary channel + * @ext_phy_errors: pulses reported for extension channel + * @dc_phy_errors: pulses reported for primary + extension channel + * @pulses_processed: pulses forwarded to detector + * @radar_detected: radars detected */ struct ath_dfs_stats { + /* pulse stats */ + u32 pulses_total; + u32 pulses_no_dfs; u32 pulses_detected; u32 datalen_discards; u32 rssi_discards; @@ -40,18 +46,39 @@ struct ath_dfs_stats { u32 pri_phy_errors; u32 ext_phy_errors; u32 dc_phy_errors; + /* pattern detection stats */ + u32 pulses_processed; + u32 radar_detected; }; +/** + * struct ath_dfs_pool_stats - DFS Statistics for global pools + */ +struct ath_dfs_pool_stats { + u32 pool_reference; + u32 pulse_allocated; + u32 pulse_alloc_error; + u32 pulse_used; + u32 pseq_allocated; + u32 pseq_alloc_error; + u32 pseq_used; +}; #if defined(CONFIG_ATH9K_DFS_DEBUGFS) #define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) void ath9k_dfs_init_debug(struct ath_softc *sc); +#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++) +#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--) +extern struct ath_dfs_pool_stats global_dfs_pool_stats; + #else #define DFS_STAT_INC(sc, c) do { } while (0) static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } +#define DFS_POOL_STAT_INC(c) do { } while (0) +#define DFS_POOL_STAT_DEC(c) do { } while (0) #endif /* CONFIG_ATH9K_DFS_DEBUGFS */ #endif /* ATH9K_DFS_DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c new file mode 100644 index 00000000000..ea2a6cf7ef2 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2012 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/slab.h> +#include <linux/export.h> + +#include "dfs_pattern_detector.h" +#include "dfs_pri_detector.h" + +/* + * tolerated deviation of radar time stamp in usecs on both sides + * TODO: this might need to be HW-dependent + */ +#define PRI_TOLERANCE 16 + +/** + * struct radar_types - contains array of patterns defined for one DFS domain + * @domain: DFS regulatory domain + * @num_radar_types: number of radar types to follow + * @radar_types: radar types array + */ +struct radar_types { + enum nl80211_dfs_regions region; + u32 num_radar_types; + const struct radar_detector_specs *radar_types; +}; + +/* percentage on ppb threshold to trigger detection */ +#define MIN_PPB_THRESH 50 +#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100) +#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF) + +#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \ +{ \ + ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \ + (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \ + PPB_THRESH(PPB), PRI_TOLERANCE, \ +} + +/* radar types as defined by ETSI EN-301-893 v1.5.1 */ +static const struct radar_detector_specs etsi_radar_ref_types_v15[] = { + ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18), + ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10), + ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15), + ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25), + ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20), + ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10), + ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15), +}; + +static const struct radar_types etsi_radar_types_v15 = { + .region = NL80211_DFS_ETSI, + .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15), + .radar_types = etsi_radar_ref_types_v15, +}; + +/* for now, we support ETSI radar types, FCC and JP are TODO */ +static const struct radar_types *dfs_domains[] = { + &etsi_radar_types_v15, +}; + +/** + * get_dfs_domain_radar_types() - get radar types for a given DFS domain + * @param domain DFS domain + * @return radar_types ptr on success, NULL if DFS domain is not supported + */ +static const struct radar_types * +get_dfs_domain_radar_types(enum nl80211_dfs_regions region) +{ + u32 i; + for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) { + if (dfs_domains[i]->region == region) + return dfs_domains[i]; + } + return NULL; +} + +/** + * struct channel_detector - detector elements for a DFS channel + * @head: list_head + * @freq: frequency for this channel detector in MHz + * @detectors: array of dynamically created detector elements for this freq + * + * Channel detectors are required to provide multi-channel DFS detection, e.g. + * to support off-channel scanning. A pattern detector has a list of channels + * radar pulses have been reported for in the past. + */ +struct channel_detector { + struct list_head head; + u16 freq; + struct pri_detector **detectors; +}; + +/* channel_detector_reset() - reset detector lines for a given channel */ +static void channel_detector_reset(struct dfs_pattern_detector *dpd, + struct channel_detector *cd) +{ + u32 i; + if (cd == NULL) + return; + for (i = 0; i < dpd->num_radar_types; i++) + cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts); +} + +/* channel_detector_exit() - destructor */ +static void channel_detector_exit(struct dfs_pattern_detector *dpd, + struct channel_detector *cd) +{ + u32 i; + if (cd == NULL) + return; + list_del(&cd->head); + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_detector *de = cd->detectors[i]; + if (de != NULL) + de->exit(de); + } + kfree(cd->detectors); + kfree(cd); +} + +static struct channel_detector * +channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) +{ + u32 sz, i; + struct channel_detector *cd; + + cd = kmalloc(sizeof(*cd), GFP_KERNEL); + if (cd == NULL) + goto fail; + + INIT_LIST_HEAD(&cd->head); + cd->freq = freq; + sz = sizeof(cd->detectors) * dpd->num_radar_types; + cd->detectors = kzalloc(sz, GFP_KERNEL); + if (cd->detectors == NULL) + goto fail; + + for (i = 0; i < dpd->num_radar_types; i++) { + const struct radar_detector_specs *rs = &dpd->radar_spec[i]; + struct pri_detector *de = pri_detector_init(rs); + if (de == NULL) + goto fail; + cd->detectors[i] = de; + } + list_add(&cd->head, &dpd->channel_detectors); + return cd; + +fail: + pr_err("failed to allocate channel_detector for freq=%d\n", freq); + channel_detector_exit(dpd, cd); + return NULL; +} + +/** + * channel_detector_get() - get channel detector for given frequency + * @param dpd instance pointer + * @param freq frequency in MHz + * @return pointer to channel detector on success, NULL otherwise + * + * Return existing channel detector for the given frequency or return a + * newly create one. + */ +static struct channel_detector * +channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq) +{ + struct channel_detector *cd; + list_for_each_entry(cd, &dpd->channel_detectors, head) { + if (cd->freq == freq) + return cd; + } + return channel_detector_create(dpd, freq); +} + +/* + * DFS Pattern Detector + */ + +/* dpd_reset(): reset all channel detectors */ +static void dpd_reset(struct dfs_pattern_detector *dpd) +{ + struct channel_detector *cd; + if (!list_empty(&dpd->channel_detectors)) + list_for_each_entry(cd, &dpd->channel_detectors, head) + channel_detector_reset(dpd, cd); + +} +static void dpd_exit(struct dfs_pattern_detector *dpd) +{ + struct channel_detector *cd, *cd0; + if (!list_empty(&dpd->channel_detectors)) + list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head) + channel_detector_exit(dpd, cd); + kfree(dpd); +} + +static bool +dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) +{ + u32 i; + bool ts_wraparound; + struct channel_detector *cd; + + if (dpd->region == NL80211_DFS_UNSET) { + /* + * pulses received for a non-supported or un-initialized + * domain are treated as detected radars + */ + return true; + } + + cd = channel_detector_get(dpd, event->freq); + if (cd == NULL) + return false; + + ts_wraparound = (event->ts < dpd->last_pulse_ts); + dpd->last_pulse_ts = event->ts; + if (ts_wraparound) { + /* + * reset detector on time stamp wraparound + * with monotonic time stamps, this should never happen + */ + pr_warn("DFS: time stamp wraparound detected, resetting\n"); + dpd_reset(dpd); + } + /* do type individual pattern matching */ + for (i = 0; i < dpd->num_radar_types; i++) { + if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) { + channel_detector_reset(dpd, cd); + return true; + } + } + return false; +} + +static bool dpd_set_domain(struct dfs_pattern_detector *dpd, + enum nl80211_dfs_regions region) +{ + const struct radar_types *rt; + struct channel_detector *cd, *cd0; + + if (dpd->region == region) + return true; + + dpd->region = NL80211_DFS_UNSET; + + rt = get_dfs_domain_radar_types(region); + if (rt == NULL) + return false; + + /* delete all channel detectors for previous DFS domain */ + if (!list_empty(&dpd->channel_detectors)) + list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head) + channel_detector_exit(dpd, cd); + dpd->radar_spec = rt->radar_types; + dpd->num_radar_types = rt->num_radar_types; + + dpd->region = region; + return true; +} + +static struct dfs_pattern_detector default_dpd = { + .exit = dpd_exit, + .set_domain = dpd_set_domain, + .add_pulse = dpd_add_pulse, + .region = NL80211_DFS_UNSET, +}; + +struct dfs_pattern_detector * +dfs_pattern_detector_init(enum nl80211_dfs_regions region) +{ + struct dfs_pattern_detector *dpd; + dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); + if (dpd == NULL) { + pr_err("allocation of dfs_pattern_detector failed\n"); + return NULL; + } + *dpd = default_dpd; + INIT_LIST_HEAD(&dpd->channel_detectors); + + if (dpd->set_domain(dpd, region)) + return dpd; + + pr_err("Could not set DFS domain to %d. ", region); + return NULL; +} +EXPORT_SYMBOL(dfs_pattern_detector_init); diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h new file mode 100644 index 00000000000..fd0328a3099 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2012 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DFS_PATTERN_DETECTOR_H +#define DFS_PATTERN_DETECTOR_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/nl80211.h> + +/** + * struct pulse_event - describing pulses reported by PHY + * @ts: pulse time stamp in us + * @freq: channel frequency in MHz + * @width: pulse duration in us + * @rssi: rssi of radar event + */ +struct pulse_event { + u64 ts; + u16 freq; + u8 width; + u8 rssi; +}; + +/** + * struct radar_detector_specs - detector specs for a radar pattern type + * @type_id: pattern type, as defined by regulatory + * @width_min: minimum radar pulse width in [us] + * @width_max: maximum radar pulse width in [us] + * @pri_min: minimum pulse repetition interval in [us] (including tolerance) + * @pri_max: minimum pri in [us] (including tolerance) + * @num_pri: maximum number of different pri for this type + * @ppb: pulses per bursts for this type + * @ppb_thresh: number of pulses required to trigger detection + * @max_pri_tolerance: pulse time stamp tolerance on both sides [us] + */ +struct radar_detector_specs { + u8 type_id; + u8 width_min; + u8 width_max; + u16 pri_min; + u16 pri_max; + u8 num_pri; + u8 ppb; + u8 ppb_thresh; + u8 max_pri_tolerance; +}; + +/** + * struct dfs_pattern_detector - DFS pattern detector + * @exit(): destructor + * @set_domain(): set DFS domain, resets detector lines upon domain changes + * @add_pulse(): add radar pulse to detector, returns true on detection + * @region: active DFS region, NL80211_DFS_UNSET until set + * @num_radar_types: number of different radar types + * @last_pulse_ts: time stamp of last valid pulse in usecs + * @radar_detector_specs: array of radar detection specs + * @channel_detectors: list connecting channel_detector elements + */ +struct dfs_pattern_detector { + void (*exit)(struct dfs_pattern_detector *dpd); + bool (*set_domain)(struct dfs_pattern_detector *dpd, + enum nl80211_dfs_regions region); + bool (*add_pulse)(struct dfs_pattern_detector *dpd, + struct pulse_event *pe); + + enum nl80211_dfs_regions region; + u8 num_radar_types; + u64 last_pulse_ts; + + const struct radar_detector_specs *radar_spec; + struct list_head channel_detectors; +}; + +/** + * dfs_pattern_detector_init() - constructor for pattern detector class + * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation + * @return instance pointer on success, NULL otherwise + */ +#if defined(CONFIG_ATH9K_DFS_CERTIFIED) +extern struct dfs_pattern_detector * +dfs_pattern_detector_init(enum nl80211_dfs_regions region); +#else +static inline struct dfs_pattern_detector * +dfs_pattern_detector_init(enum nl80211_dfs_regions region) +{ + return NULL; +} +#endif /* CONFIG_ATH9K_DFS_CERTIFIED */ + +#endif /* DFS_PATTERN_DETECTOR_H */ diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c new file mode 100644 index 00000000000..91b8dceeadb --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2012 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "ath9k.h" +#include "dfs_pattern_detector.h" +#include "dfs_pri_detector.h" +#include "dfs_debug.h" + +/** + * struct pri_sequence - sequence of pulses matching one PRI + * @head: list_head + * @pri: pulse repetition interval (PRI) in usecs + * @dur: duration of sequence in usecs + * @count: number of pulses in this sequence + * @count_falses: number of not matching pulses in this sequence + * @first_ts: time stamp of first pulse in usecs + * @last_ts: time stamp of last pulse in usecs + * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur) + */ +struct pri_sequence { + struct list_head head; + u32 pri; + u32 dur; + u32 count; + u32 count_falses; + u64 first_ts; + u64 last_ts; + u64 deadline_ts; +}; + +/** + * struct pulse_elem - elements in pulse queue + * @ts: time stamp in usecs + */ +struct pulse_elem { + struct list_head head; + u64 ts; +}; + +/** + * pde_get_multiple() - get number of multiples considering a given tolerance + * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise + */ +static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance) +{ + u32 remainder; + u32 factor; + u32 delta; + + if (fraction == 0) + return 0; + + delta = (val < fraction) ? (fraction - val) : (val - fraction); + + if (delta <= tolerance) + /* val and fraction are within tolerance */ + return 1; + + factor = val / fraction; + remainder = val % fraction; + if (remainder > tolerance) { + /* no exact match */ + if ((fraction - remainder) <= tolerance) + /* remainder is within tolerance */ + factor++; + else + factor = 0; + } + return factor; +} + +/** + * DOC: Singleton Pulse and Sequence Pools + * + * Instances of pri_sequence and pulse_elem are kept in singleton pools to + * reduce the number of dynamic allocations. They are shared between all + * instances and grow up to the peak number of simultaneously used objects. + * + * Memory is freed after all references to the pools are released. + */ +static u32 singleton_pool_references; +static LIST_HEAD(pulse_pool); +static LIST_HEAD(pseq_pool); +static DEFINE_SPINLOCK(pool_lock); + +static void pool_register_ref(void) +{ + spin_lock_bh(&pool_lock); + singleton_pool_references++; + DFS_POOL_STAT_INC(pool_reference); + spin_unlock_bh(&pool_lock); +} + +static void pool_deregister_ref(void) +{ + spin_lock_bh(&pool_lock); + singleton_pool_references--; + DFS_POOL_STAT_DEC(pool_reference); + if (singleton_pool_references == 0) { + /* free singleton pools with no references left */ + struct pri_sequence *ps, *ps0; + struct pulse_elem *p, *p0; + + list_for_each_entry_safe(p, p0, &pulse_pool, head) { + list_del(&p->head); + DFS_POOL_STAT_DEC(pulse_allocated); + kfree(p); + } + list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { + list_del(&ps->head); + DFS_POOL_STAT_DEC(pseq_allocated); + kfree(ps); + } + } + spin_unlock_bh(&pool_lock); +} + +static void pool_put_pulse_elem(struct pulse_elem *pe) +{ + spin_lock_bh(&pool_lock); + list_add(&pe->head, &pulse_pool); + DFS_POOL_STAT_DEC(pulse_used); + spin_unlock_bh(&pool_lock); +} + +static void pool_put_pseq_elem(struct pri_sequence *pse) +{ + spin_lock_bh(&pool_lock); + list_add(&pse->head, &pseq_pool); + DFS_POOL_STAT_DEC(pseq_used); + spin_unlock_bh(&pool_lock); +} + +static struct pri_sequence *pool_get_pseq_elem(void) +{ + struct pri_sequence *pse = NULL; + spin_lock_bh(&pool_lock); + if (!list_empty(&pseq_pool)) { + pse = list_first_entry(&pseq_pool, struct pri_sequence, head); + list_del(&pse->head); + DFS_POOL_STAT_INC(pseq_used); + } + spin_unlock_bh(&pool_lock); + return pse; +} + +static struct pulse_elem *pool_get_pulse_elem(void) +{ + struct pulse_elem *pe = NULL; + spin_lock_bh(&pool_lock); + if (!list_empty(&pulse_pool)) { + pe = list_first_entry(&pulse_pool, struct pulse_elem, head); + list_del(&pe->head); + DFS_POOL_STAT_INC(pulse_used); + } + spin_unlock_bh(&pool_lock); + return pe; +} + +static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde) +{ + struct list_head *l = &pde->pulses; + if (list_empty(l)) + return NULL; + return list_entry(l->prev, struct pulse_elem, head); +} + +static bool pulse_queue_dequeue(struct pri_detector *pde) +{ + struct pulse_elem *p = pulse_queue_get_tail(pde); + if (p != NULL) { + list_del_init(&p->head); + pde->count--; + /* give it back to pool */ + pool_put_pulse_elem(p); + } + return (pde->count > 0); +} + +/* remove pulses older than window */ +static void pulse_queue_check_window(struct pri_detector *pde) +{ + u64 min_valid_ts; + struct pulse_elem *p; + + /* there is no delta time with less than 2 pulses */ + if (pde->count < 2) + return; + + if (pde->last_ts <= pde->window_size) + return; + + min_valid_ts = pde->last_ts - pde->window_size; + while ((p = pulse_queue_get_tail(pde)) != NULL) { + if (p->ts >= min_valid_ts) + return; + pulse_queue_dequeue(pde); + } +} + +static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) +{ + struct pulse_elem *p = pool_get_pulse_elem(); + if (p == NULL) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + DFS_POOL_STAT_INC(pulse_alloc_error); + return false; + } + DFS_POOL_STAT_INC(pulse_allocated); + DFS_POOL_STAT_INC(pulse_used); + } + INIT_LIST_HEAD(&p->head); + p->ts = ts; + list_add(&p->head, &pde->pulses); + pde->count++; + pde->last_ts = ts; + pulse_queue_check_window(pde); + if (pde->count >= pde->max_count) + pulse_queue_dequeue(pde); + return true; +} + +static bool pseq_handler_create_sequences(struct pri_detector *pde, + u64 ts, u32 min_count) +{ + struct pulse_elem *p; + list_for_each_entry(p, &pde->pulses, head) { + struct pri_sequence ps, *new_ps; + struct pulse_elem *p2; + u32 tmp_false_count; + u64 min_valid_ts; + u32 delta_ts = ts - p->ts; + + if (delta_ts < pde->rs->pri_min) + /* ignore too small pri */ + continue; + + if (delta_ts > pde->rs->pri_max) + /* stop on too large pri (sorted list) */ + break; + + /* build a new sequence with new potential pri */ + ps.count = 2; + ps.count_falses = 0; + ps.first_ts = p->ts; + ps.last_ts = ts; + ps.pri = ts - p->ts; + ps.dur = ps.pri * (pde->rs->ppb - 1) + + 2 * pde->rs->max_pri_tolerance; + + p2 = p; + tmp_false_count = 0; + min_valid_ts = ts - ps.dur; + /* check which past pulses are candidates for new sequence */ + list_for_each_entry_continue(p2, &pde->pulses, head) { + u32 factor; + if (p2->ts < min_valid_ts) + /* stop on crossing window border */ + break; + /* check if pulse match (multi)PRI */ + factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri, + pde->rs->max_pri_tolerance); + if (factor > 0) { + ps.count++; + ps.first_ts = p2->ts; + /* + * on match, add the intermediate falses + * and reset counter + */ + ps.count_falses += tmp_false_count; + tmp_false_count = 0; + } else { + /* this is a potential false one */ + tmp_false_count++; + } + } + if (ps.count < min_count) + /* did not reach minimum count, drop sequence */ + continue; + + /* this is a valid one, add it */ + ps.deadline_ts = ps.first_ts + ps.dur; + new_ps = pool_get_pseq_elem(); + if (new_ps == NULL) { + new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); + if (new_ps == NULL) { + DFS_POOL_STAT_INC(pseq_alloc_error); + return false; + } + DFS_POOL_STAT_INC(pseq_allocated); + DFS_POOL_STAT_INC(pseq_used); + } + memcpy(new_ps, &ps, sizeof(ps)); + INIT_LIST_HEAD(&new_ps->head); + list_add(&new_ps->head, &pde->sequences); + } + return true; +} + +/* check new ts and add to all matching existing sequences */ +static u32 +pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts) +{ + u32 max_count = 0; + struct pri_sequence *ps, *ps2; + list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { + u32 delta_ts; + u32 factor; + + /* first ensure that sequence is within window */ + if (ts > ps->deadline_ts) { + list_del_init(&ps->head); + pool_put_pseq_elem(ps); + continue; + } + + delta_ts = ts - ps->last_ts; + factor = pde_get_multiple(delta_ts, ps->pri, + pde->rs->max_pri_tolerance); + if (factor > 0) { + ps->last_ts = ts; + ps->count++; + + if (max_count < ps->count) + max_count = ps->count; + } else { + ps->count_falses++; + } + } + return max_count; +} + +static struct pri_sequence * +pseq_handler_check_detection(struct pri_detector *pde) +{ + struct pri_sequence *ps; + + if (list_empty(&pde->sequences)) + return NULL; + + list_for_each_entry(ps, &pde->sequences, head) { + /* + * we assume to have enough matching confidence if we + * 1) have enough pulses + * 2) have more matching than false pulses + */ + if ((ps->count >= pde->rs->ppb_thresh) && + (ps->count * pde->rs->num_pri >= ps->count_falses)) + return ps; + } + return NULL; +} + + +/* free pulse queue and sequences list and give objects back to pools */ +static void pri_detector_reset(struct pri_detector *pde, u64 ts) +{ + struct pri_sequence *ps, *ps0; + struct pulse_elem *p, *p0; + list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { + list_del_init(&ps->head); + pool_put_pseq_elem(ps); + } + list_for_each_entry_safe(p, p0, &pde->pulses, head) { + list_del_init(&p->head); + pool_put_pulse_elem(p); + } + pde->count = 0; + pde->last_ts = ts; +} + +static void pri_detector_exit(struct pri_detector *de) +{ + pri_detector_reset(de, 0); + pool_deregister_ref(); + kfree(de); +} + +static bool pri_detector_add_pulse(struct pri_detector *de, + struct pulse_event *event) +{ + u32 max_updated_seq; + struct pri_sequence *ps; + u64 ts = event->ts; + const struct radar_detector_specs *rs = de->rs; + + /* ignore pulses not within width range */ + if ((rs->width_min > event->width) || (rs->width_max < event->width)) + return false; + + if ((ts - de->last_ts) < rs->max_pri_tolerance) + /* if delta to last pulse is too short, don't use this pulse */ + return false; + de->last_ts = ts; + + max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts); + + if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) { + pr_err("failed to create pulse sequences\n"); + pri_detector_reset(de, ts); + return false; + } + + ps = pseq_handler_check_detection(de); + + if (ps != NULL) { + pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n", + ps->pri, ps->count, ps->count_falses); + pri_detector_reset(de, ts); + return true; + } + pulse_queue_enqueue(de, ts); + return false; +} + +struct pri_detector * +pri_detector_init(const struct radar_detector_specs *rs) +{ + struct pri_detector *de; + de = kzalloc(sizeof(*de), GFP_KERNEL); + if (de == NULL) + return NULL; + de->exit = pri_detector_exit; + de->add_pulse = pri_detector_add_pulse; + de->reset = pri_detector_reset; + + INIT_LIST_HEAD(&de->sequences); + INIT_LIST_HEAD(&de->pulses); + de->window_size = rs->pri_max * rs->ppb * rs->num_pri; + de->max_count = rs->ppb * 2; + de->rs = rs; + + pool_register_ref(); + return de; +} diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h new file mode 100644 index 00000000000..81cde9f28e4 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2012 Neratec Solutions AG + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DFS_PRI_DETECTOR_H +#define DFS_PRI_DETECTOR_H + +#include <linux/list.h> + +/** + * struct pri_detector - PRI detector element for a dedicated radar type + * @exit(): destructor + * @add_pulse(): add pulse event, returns true if pattern was detected + * @reset(): clear states and reset to given time stamp + * @rs: detector specs for this detector element + * @last_ts: last pulse time stamp considered for this element in usecs + * @sequences: list_head holding potential pulse sequences + * @pulses: list connecting pulse_elem objects + * @count: number of pulses in queue + * @max_count: maximum number of pulses to be queued + * @window_size: window size back from newest pulse time stamp in usecs + */ +struct pri_detector { + void (*exit) (struct pri_detector *de); + bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e); + void (*reset) (struct pri_detector *de, u64 ts); + +/* private: internal use only */ + const struct radar_detector_specs *rs; + u64 last_ts; + struct list_head sequences; + struct list_head pulses; + u32 count; + u32 max_count; + u32 window_size; +}; + +struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs); + +#endif /* DFS_PRI_DETECTOR_H */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index c4352323331..0512397a293 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -16,14 +16,6 @@ #include "hw.h" -static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) -{ - if (fbin == AR5416_BCHAN_UNUSED) - return fbin; - - return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); -} - void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) { REG_WRITE(ah, reg, val); @@ -290,6 +282,34 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, return twiceMaxEdgePower; } +u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit, + u8 antenna_reduction) +{ + u16 reduction = antenna_reduction; + + /* + * Reduce scaled Power by number of chains active + * to get the per chain tx power level. + */ + switch (ar5416_get_ntxchains(ah->txchainmask)) { + case 1: + break; + case 2: + reduction += POWER_CORRECTION_FOR_TWO_CHAIN; + break; + case 3: + reduction += POWER_CORRECTION_FOR_THREE_CHAIN; + break; + } + + if (power_limit > reduction) + power_limit -= reduction; + else + power_limit = 0; + + return power_limit; +} + void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -299,10 +319,10 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) case 1: break; case 2: - regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; + regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN; break; case 3: - regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; + regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN; break; default: ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 5ff7ab96512..33acb920ed3 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -79,8 +79,8 @@ #define SUB_NUM_CTL_MODES_AT_5G_40 2 #define SUB_NUM_CTL_MODES_AT_2G_40 3 -#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ -#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */ +#define POWER_CORRECTION_FOR_TWO_CHAIN 6 /* 10*log10(2)*2 */ +#define POWER_CORRECTION_FOR_THREE_CHAIN 10 /* 10*log10(3)*2 */ /* * For AR9285 and later chipsets, the following bits are not being programmed @@ -686,6 +686,8 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah, u16 numRates, bool isHt40Target); u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, bool is2GHz, int num_band_edges); +u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit, + u8 antenna_reduction); void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah); int ath9k_hw_eeprom_init(struct ath_hw *ah); @@ -697,6 +699,14 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, u16 *pPdGainBoundaries, u8 *pPDADCValues, u16 numXpdGains); +static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) +{ + if (fbin == AR5416_BCHAN_UNUSED) + return fbin; + + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); +} + #define ar5416_get_ntxchains(_txchainmask) \ (((_txchainmask >> 2) & 1) + \ ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index b34e8b2990b..aa614767adf 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -564,9 +564,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) -#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 -#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 - u16 twiceMaxEdgePower; int i; struct cal_ctl_data_ar9287 *rep; @@ -591,29 +588,8 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, tx_chainmask = ah->txchainmask; ath9k_hw_get_channel_centers(ah, chan, ¢ers); - scaledPower = powerLimit - antenna_reduction; - - /* - * Reduce scaled Power by number of chains active - * to get the per chain tx power level. - */ - switch (ar5416_get_ntxchains(tx_chainmask)) { - case 1: - break; - case 2: - if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; - else - scaledPower = 0; - break; - case 3: - if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; - else - scaledPower = 0; - break; - } - scaledPower = max((u16)0, scaledPower); + scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit, + antenna_reduction); /* * Get TX power from EEPROM. @@ -786,8 +762,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, #undef CMP_CTL #undef CMP_NO_CTL -#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN -#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN } static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 619b95d764f..b5fba8b18b8 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -991,9 +991,6 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, u16 antenna_reduction, u16 powerLimit) { -#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ -#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ - struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; u16 twiceMaxEdgePower; int i; @@ -1027,24 +1024,8 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, ath9k_hw_get_channel_centers(ah, chan, ¢ers); - scaledPower = powerLimit - antenna_reduction; - - switch (ar5416_get_ntxchains(tx_chainmask)) { - case 1: - break; - case 2: - if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; - else - scaledPower = 0; - break; - case 3: - if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; - else - scaledPower = 0; - break; - } + scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit, + antenna_reduction); if (IS_CHAN_2GHZ(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g) - @@ -1263,20 +1244,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, regulatory->max_power_level = ratesArray[i]; } - switch(ar5416_get_ntxchains(ah->txchainmask)) { - case 1: - break; - case 2: - regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; - break; - case 3: - regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; - break; - default: - ath_dbg(ath9k_hw_common(ah), EEPROM, - "Invalid chainmask configuration\n"); - break; - } + ath9k_hw_update_regulatory_maxpower(ah); if (test) return; diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index fbe23de1297..281a9af0f1b 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -41,6 +41,9 @@ void ath_init_leds(struct ath_softc *sc) { int ret; + if (AR_SREV_9100(sc->sc_ah)) + return; + if (sc->sc_ah->led_pin < 0) { if (AR_SREV_9287(sc->sc_ah)) sc->sc_ah->led_pin = ATH_LED_PIN_9287; @@ -362,7 +365,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc) ath9k_hw_btcoex_disable(ah); if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_pause(sc); - if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) + if (AR_SREV_9462(ah)) ath_mci_flush_profile(&sc->btcoex.mci); } } @@ -373,7 +376,7 @@ void ath9k_deinit_btcoex(struct ath_softc *sc) ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); - if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI) + if (AR_SREV_9462(sc->sc_ah)) ath_mci_cleanup(sc); } @@ -399,17 +402,16 @@ int ath9k_init_btcoex(struct ath_softc *sc) txq = sc->tx.txq_map[WME_AC_BE]; ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - break; - case ATH_BTCOEX_CFG_MCI: - sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; - INIT_LIST_HEAD(&sc->btcoex.mci.info); + if (AR_SREV_9462(ah)) { + sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + INIT_LIST_HEAD(&sc->btcoex.mci.info); - r = ath_mci_setup(sc); - if (r) - return r; + r = ath_mci_setup(sc); + if (r) + return r; - ath9k_hw_btcoex_init_mci(ah); + ath9k_hw_btcoex_init_mci(ah); + } break; default: diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 424aabb2c73..f67cd952e74 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -53,6 +53,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { .driver_info = AR9280_USB }, /* SMC Networks */ { USB_DEVICE(0x0411, 0x017f), .driver_info = AR9280_USB }, /* Sony UWA-BR100 */ + { USB_DEVICE(0x04da, 0x3904), + .driver_info = AR9280_USB }, { USB_DEVICE(0x0cf3, 0x20ff), .driver_info = STORAGE_DEVICE }, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index de5ee15ee63..25213d521bc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "htc.h" MODULE_AUTHOR("Atheros Communications"); @@ -711,7 +713,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; hw->queues = 4; hw->channel_change_time = 5000; @@ -966,9 +969,7 @@ int ath9k_htc_resume(struct htc_target *htc_handle) static int __init ath9k_htc_init(void) { if (ath9k_hif_usb_init() < 0) { - printk(KERN_ERR - "ath9k_htc: No USB devices found," - " driver not installed.\n"); + pr_err("No USB devices found, driver not installed\n"); return -ENODEV; } @@ -979,6 +980,6 @@ module_init(ath9k_htc_init); static void __exit ath9k_htc_exit(void) { ath9k_hif_usb_exit(); - printk(KERN_INFO "ath9k_htc: Driver unloaded\n"); + pr_info("Driver unloaded\n"); } module_exit(ath9k_htc_exit); diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index c25226a32dd..4a9570dfba7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "htc.h" static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, @@ -461,7 +463,7 @@ int ath9k_htc_hw_init(struct htc_target *target, char *product, u32 drv_info) { if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { - printk(KERN_ERR "Failed to initialize the device\n"); + pr_err("Failed to initialize the device\n"); return -ENODEV; } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index fa84e37bf09..f84477c5ebb 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -24,6 +24,8 @@ #include "rc.h" #include "ar9003_mac.h" #include "ar9003_mci.h" +#include "debug.h" +#include "ath9k.h" static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); @@ -83,6 +85,53 @@ static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) /* Helper Functions */ /********************/ +#ifdef CONFIG_ATH9K_DEBUGFS + +void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause) +{ + struct ath_softc *sc = common->priv; + if (sync_cause) + sc->debug.stats.istats.sync_cause_all++; + if (sync_cause & AR_INTR_SYNC_RTC_IRQ) + sc->debug.stats.istats.sync_rtc_irq++; + if (sync_cause & AR_INTR_SYNC_MAC_IRQ) + sc->debug.stats.istats.sync_mac_irq++; + if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS) + sc->debug.stats.istats.eeprom_illegal_access++; + if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT) + sc->debug.stats.istats.apb_timeout++; + if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT) + sc->debug.stats.istats.pci_mode_conflict++; + if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) + sc->debug.stats.istats.host1_fatal++; + if (sync_cause & AR_INTR_SYNC_HOST1_PERR) + sc->debug.stats.istats.host1_perr++; + if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR) + sc->debug.stats.istats.trcv_fifo_perr++; + if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP) + sc->debug.stats.istats.radm_cpl_ep++; + if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT) + sc->debug.stats.istats.radm_cpl_dllp_abort++; + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT) + sc->debug.stats.istats.radm_cpl_tlp_abort++; + if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR) + sc->debug.stats.istats.radm_cpl_ecrc_err++; + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) + sc->debug.stats.istats.radm_cpl_timeout++; + if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) + sc->debug.stats.istats.local_timeout++; + if (sync_cause & AR_INTR_SYNC_PM_ACCESS) + sc->debug.stats.istats.pm_access++; + if (sync_cause & AR_INTR_SYNC_MAC_AWAKE) + sc->debug.stats.istats.mac_awake++; + if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP) + sc->debug.stats.istats.mac_asleep++; + if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS) + sc->debug.stats.istats.mac_sleep_access++; +} +#endif + + static void ath9k_hw_set_clockrate(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; @@ -142,6 +191,22 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) } EXPORT_SYMBOL(ath9k_hw_wait); +void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, + int hw_delay) +{ + if (IS_CHAN_B(chan)) + hw_delay = (4 * hw_delay) / 22; + else + hw_delay /= 10; + + if (IS_CHAN_HALF_RATE(chan)) + hw_delay *= 2; + else if (IS_CHAN_QUARTER_RATE(chan)) + hw_delay *= 4; + + udelay(hw_delay + BASE_ACTIVATE_DELAY); +} + void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, int column, unsigned int *writecnt) { @@ -388,8 +453,8 @@ static void ath9k_hw_init_config(struct ath_hw *ah) { int i; - ah->config.dma_beacon_response_time = 2; - ah->config.sw_beacon_response_time = 10; + ah->config.dma_beacon_response_time = 1; + ah->config.sw_beacon_response_time = 6; ah->config.additional_swba_backoff = 0; ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; @@ -445,7 +510,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) AR_STA_ID1_MCAST_KSRCH; if (AR_SREV_9100(ah)) ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; - ah->enable_32kHz_clock = DONT_USE_32KHZ; ah->slottime = ATH9K_SLOT_TIME_9; ah->globaltxtimeout = (u32) -1; ah->power_mode = ATH9K_PM_UNDEFINED; @@ -972,7 +1036,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; const struct ath9k_channel *chan = ah->curchan; - int acktimeout, ctstimeout; + int acktimeout, ctstimeout, ack_offset = 0; int slottime; int sifstime; int rx_lat = 0, tx_lat = 0, eifs = 0; @@ -993,6 +1057,11 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) rx_lat = 37; tx_lat = 54; + if (IS_CHAN_5GHZ(chan)) + sifstime = 16; + else + sifstime = 10; + if (IS_CHAN_HALF_RATE(chan)) { eifs = 175; rx_lat *= 2; @@ -1000,8 +1069,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 11; + sifstime *= 2; + ack_offset = 16; slottime = 13; - sifstime = 32; } else if (IS_CHAN_QUARTER_RATE(chan)) { eifs = 340; rx_lat = (rx_lat * 4) - 1; @@ -1009,8 +1079,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 22; + sifstime *= 4; + ack_offset = 32; slottime = 21; - sifstime = 64; } else { if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO; @@ -1024,14 +1095,10 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) tx_lat = MS(reg, AR_USEC_TX_LAT); slottime = ah->slottime; - if (IS_CHAN_5GHZ(chan)) - sifstime = 16; - else - sifstime = 10; } /* As defined by IEEE 802.11-2007 17.3.8.6 */ - acktimeout = slottime + sifstime + 3 * ah->coverage_class; + acktimeout = slottime + sifstime + 3 * ah->coverage_class + ack_offset; ctstimeout = acktimeout; /* @@ -1041,7 +1108,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) * BA frames in some implementations, but it has been found to fix ACK * timeout issues in other cases as well. */ - if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) { + if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ && + !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) { acktimeout += 64 - sifstime - ah->slottime; ctstimeout += 48 - sifstime - ah->slottime; } @@ -1491,11 +1559,84 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) } } +static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states, + int *hang_state, int *hang_pos) +{ + static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */ + u32 chain_state, dcs_pos, i; + + for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) { + chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f; + for (i = 0; i < 3; i++) { + if (chain_state == dcu_chain_state[i]) { + *hang_state = chain_state; + *hang_pos = dcs_pos; + return true; + } + } + } + return false; +} + +#define DCU_COMPLETE_STATE 1 +#define DCU_COMPLETE_STATE_MASK 0x3 +#define NUM_STATUS_READS 50 +static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah) +{ + u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4; + u32 i, hang_pos, hang_state, num_state = 6; + + comp_state = REG_READ(ah, AR_DMADBG_6); + + if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) { + ath_dbg(ath9k_hw_common(ah), RESET, + "MAC Hang signature not found at DCU complete\n"); + return false; + } + + chain_state = REG_READ(ah, dcs_reg); + if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos)) + goto hang_check_iter; + + dcs_reg = AR_DMADBG_5; + num_state = 4; + chain_state = REG_READ(ah, dcs_reg); + if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos)) + goto hang_check_iter; + + ath_dbg(ath9k_hw_common(ah), RESET, + "MAC Hang signature 1 not found\n"); + return false; + +hang_check_iter: + ath_dbg(ath9k_hw_common(ah), RESET, + "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n", + chain_state, comp_state, hang_state, hang_pos); + + for (i = 0; i < NUM_STATUS_READS; i++) { + chain_state = REG_READ(ah, dcs_reg); + chain_state = (chain_state >> (5 * hang_pos)) & 0x1f; + comp_state = REG_READ(ah, AR_DMADBG_6); + + if (((comp_state & DCU_COMPLETE_STATE_MASK) != + DCU_COMPLETE_STATE) || + (chain_state != hang_state)) + return false; + } + + ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n"); + + return true; +} + bool ath9k_hw_check_alive(struct ath_hw *ah) { int count = 50; u32 reg; + if (AR_SREV_9300(ah)) + return !ath9k_hw_detect_mac_hang(ah); + if (AR_SREV_9285_12_OR_LATER(ah)) return true; @@ -1546,6 +1687,10 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) if (chan->channel == ah->curchan->channel) goto fail; + if ((ah->curchan->channelFlags | chan->channelFlags) & + (CHANNEL_HALF | CHANNEL_QUARTER)) + goto fail; + if ((chan->channelFlags & CHANNEL_ALL) != (ah->curchan->channelFlags & CHANNEL_ALL)) goto fail; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index e88f182ff45..828b9bbc456 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -708,7 +708,6 @@ struct ath_hw { struct ar5416Stats stats; struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; - int16_t curchan_rad_index; enum ath9k_int imask; u32 imrs2_reg; u32 txok_interrupt_mask; @@ -762,11 +761,6 @@ struct ath_hw { u32 sta_id1_defaults; u32 misc_mode; - enum { - AUTO_32KHZ, - USE_32KHZ, - DONT_USE_32KHZ, - } enable_32kHz_clock; /* Private to hardware code */ struct ath_hw_private_ops private_ops; @@ -783,7 +777,6 @@ struct ath_hw { u32 *analogBank7Data; u32 *bank6Temp; - u8 txpower_limit; int coverage_class; u32 slottime; u32 globaltxtimeout; @@ -848,7 +841,6 @@ struct ath_hw { struct ath_gen_timer_table hw_gen_timers; struct ar9003_txs *ts_ring; - void *ts_start; u32 ts_paddr_start; u32 ts_paddr_end; u16 ts_tail; @@ -915,7 +907,6 @@ static inline u8 get_streams(int mask) } /* Initialization, Detach, Reset */ -const char *ath9k_hw_probe(u16 vendorid, u16 devid); void ath9k_hw_deinit(struct ath_hw *ah); int ath9k_hw_init(struct ath_hw *ah); int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, @@ -932,6 +923,8 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); /* General Operation */ +void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, + int hw_delay); bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, int column, unsigned int *writecnt); @@ -965,6 +958,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah); bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); +#ifdef CONFIG_ATH9K_DEBUGFS +void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause); +#else +static inline void ath9k_debug_sync_cause(struct ath_common *common, + u32 sync_cause) {} +#endif + /* Generic hw timer primitives */ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, void (*trigger)(void *), @@ -1012,7 +1012,6 @@ int ar9003_paprd_create_curve(struct ath_hw *ah, int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); int ar9003_paprd_init_table(struct ath_hw *ah); bool ar9003_paprd_is_done(struct ath_hw *ah); -void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains); /* Hardware family op attach helpers */ void ar5008_hw_attach_phy_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index cb006458fc4..dee9e092449 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/ath9k_platform.h> @@ -519,6 +521,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, atomic_set(&ah->intr_ref_cnt, -1); sc->sc_ah = ah; + sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET); + if (!pdata) { ah->ah_flags |= AH_USE_EEPROM; sc->sc_ah->led_pin = -1; @@ -642,6 +646,24 @@ void ath9k_reload_chainmask_settings(struct ath_softc *sc) setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); } +static const struct ieee80211_iface_limit if_limits[] = { + { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_WDS) }, + { .max = 8, .types = +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) }, +}; + +static const struct ieee80211_iface_combination if_comb = { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = 2048, + .num_different_channels = 1, +}; void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) { @@ -671,11 +693,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); + hw->wiphy->iface_combinations = &if_comb; + hw->wiphy->n_iface_combinations = 1; + if (AR_SREV_5416(sc->sc_ah)) hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; hw->queues = 4; hw->max_rates = 4; @@ -779,6 +805,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, goto error_world; } + setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc); sc->last_rssi = ATH_RSSI_DUMMY_MARKER; ath_init_leds(sc); @@ -821,6 +848,8 @@ static void ath9k_deinit_softc(struct ath_softc *sc) ath_tx_cleanupq(sc, &sc->tx.txq[i]); ath9k_hw_deinit(sc->sc_ah); + if (sc->dfs_detector != NULL) + sc->dfs_detector->exit(sc->dfs_detector); kfree(sc->sc_ah); sc->sc_ah = NULL; @@ -866,17 +895,14 @@ static int __init ath9k_init(void) /* Register rate control algorithm */ error = ath_rate_control_register(); if (error != 0) { - printk(KERN_ERR - "ath9k: Unable to register rate control " - "algorithm: %d\n", - error); + pr_err("Unable to register rate control algorithm: %d\n", + error); goto err_out; } error = ath_pci_init(); if (error < 0) { - printk(KERN_ERR - "ath9k: No PCI devices found, driver not installed.\n"); + pr_err("No PCI devices found, driver not installed\n"); error = -ENODEV; goto err_rate_unregister; } @@ -905,6 +931,6 @@ static void __exit ath9k_exit(void) ath_ahb_exit(); ath_pci_exit(); ath_rate_control_unregister(); - printk(KERN_INFO "%s: Driver unloaded\n", dev_info); + pr_info("%s: Driver unloaded\n", dev_info); } module_exit(ath9k_exit); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index f7bd2532269..04ef775ccee 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -133,8 +133,16 @@ EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); void ath9k_hw_abort_tx_dma(struct ath_hw *ah) { + int maxdelay = 1000; int i, q; + if (ah->curchan) { + if (IS_CHAN_HALF_RATE(ah->curchan)) + maxdelay *= 2; + else if (IS_CHAN_QUARTER_RATE(ah->curchan)) + maxdelay *= 4; + } + REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); @@ -142,7 +150,7 @@ void ath9k_hw_abort_tx_dma(struct ath_hw *ah) REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); for (q = 0; q < AR_NUM_QCU; q++) { - for (i = 0; i < 1000; i++) { + for (i = 0; i < maxdelay; i++) { if (i) udelay(5); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 798ea57252b..dfa78e8b647 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -113,21 +113,25 @@ void ath9k_ps_restore(struct ath_softc *sc) struct ath_common *common = ath9k_hw_common(sc->sc_ah); enum ath9k_power_mode mode; unsigned long flags; + bool reset; spin_lock_irqsave(&sc->sc_pm_lock, flags); if (--sc->ps_usecount != 0) goto unlock; - if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) + if (sc->ps_idle) { + ath9k_hw_setrxabort(sc->sc_ah, 1); + ath9k_hw_stopdmarecv(sc->sc_ah, &reset); mode = ATH9K_PM_FULL_SLEEP; - else if (sc->ps_enabled && - !(sc->ps_flags & (PS_WAIT_FOR_BEACON | - PS_WAIT_FOR_CAB | - PS_WAIT_FOR_PSPOLL_DATA | - PS_WAIT_FOR_TX_ACK))) + } else if (sc->ps_enabled && + !(sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA | + PS_WAIT_FOR_TX_ACK))) { mode = ATH9K_PM_NETWORK_SLEEP; - else + } else { goto unlock; + } spin_lock(&common->cc_lock); ath_hw_cycle_counters_update(common); @@ -241,6 +245,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) sc->hw_busy_count = 0; del_timer_sync(&common->ani.timer); + del_timer_sync(&sc->rx_poll_timer); ath9k_debug_samp_bb_mac(sc); ath9k_hw_disable_interrupts(ah); @@ -282,6 +287,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2); + ath_start_rx_poll(sc, 3); if (!common->disable_ani) ath_start_ani(common); } @@ -690,17 +696,6 @@ void ath9k_tasklet(unsigned long data) goto out; } - /* - * Only run the baseband hang check if beacons stop working in AP or - * IBSS mode, because it has a high false positive rate. For station - * mode it should not be necessary, since the upper layers will detect - * this through a beacon miss automatically and the following channel - * change will trigger a hardware reset anyway - */ - if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 && - !ath9k_hw_check_alive(ah)) - ieee80211_queue_work(sc->hw, &sc->hw_check_work); - if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { /* * TSF sync does not look correct; remain awake to sync with @@ -912,10 +907,19 @@ void ath_hw_check(struct work_struct *work) struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long flags; int busy; + u8 is_alive, nbeacon = 1; ath9k_ps_wakeup(sc); - if (ath9k_hw_check_alive(sc->sc_ah)) + is_alive = ath9k_hw_check_alive(sc->sc_ah); + + if (is_alive && !AR_SREV_9300(sc->sc_ah)) goto out; + else if (!is_alive && AR_SREV_9300(sc->sc_ah)) { + ath_dbg(common, RESET, + "DCU stuck is detected. Schedule chip reset\n"); + RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG); + goto sched_reset; + } spin_lock_irqsave(&common->cc_lock, flags); busy = ath_update_survey_stats(sc); @@ -926,12 +930,18 @@ void ath_hw_check(struct work_struct *work) if (busy >= 99) { if (++sc->hw_busy_count >= 3) { RESET_STAT_INC(sc, RESET_TYPE_BB_HANG); - ieee80211_queue_work(sc->hw, &sc->hw_reset_work); + goto sched_reset; } - - } else if (busy >= 0) + } else if (busy >= 0) { sc->hw_busy_count = 0; + nbeacon = 3; + } + ath_start_rx_poll(sc, nbeacon); + goto out; + +sched_reset: + ieee80211_queue_work(sc->hw, &sc->hw_reset_work); out: ath9k_ps_restore(sc); } @@ -1094,14 +1104,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } } - /* - * Cannot tx while the hardware is in full sleep, it first needs a full - * chip reset to recover from that - */ - if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) - goto exit; - - if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { + if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) { /* * We are using PS-Poll and mac80211 can request TX while in * power save mode. Need to wake up hardware for the TX to be @@ -1120,12 +1123,21 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } /* * The actual restore operation will happen only after - * the sc_flags bit is cleared. We are just dropping + * the ps_flags bit is cleared. We are just dropping * the ps_usecount here. */ ath9k_ps_restore(sc); } + /* + * Cannot tx while the hardware is in full sleep, it first needs a full + * chip reset to recover from that + */ + if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) { + ath_err(common, "TX while HW is in FULL_SLEEP mode\n"); + goto exit; + } + memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; @@ -1133,6 +1145,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (ath_tx_start(hw, skb, &txctl) != 0) { ath_dbg(common, XMIT, "TX failed\n"); + TX_STAT_INC(txctl.txq->axq_qnum, txfailed); goto exit; } @@ -1151,6 +1164,7 @@ static void ath9k_stop(struct ieee80211_hw *hw) mutex_lock(&sc->mutex); ath_cancel_work(sc); + del_timer_sync(&sc->rx_poll_timer); if (sc->sc_flags & SC_OP_INVALID) { ath_dbg(common, ANY, "Device not present\n"); @@ -1237,7 +1251,6 @@ static void ath9k_reclaim_beacon(struct ath_softc *sc, ath9k_set_beaconing_status(sc, false); ath_beacon_return(sc, avp); ath9k_set_beaconing_status(sc, true); - sc->sc_flags &= ~SC_OP_BEACONS; } static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) @@ -1368,21 +1381,31 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw, ath9k_calculate_summary_state(hw, vif); if (ath9k_uses_beacons(vif->type)) { - int error; - /* This may fail because upper levels do not have beacons - * properly configured yet. That's OK, we assume it - * will be properly configured and then we will be notified - * in the info_changed method and set up beacons properly - * there. - */ + /* Reserve a beacon slot for the vif */ ath9k_set_beaconing_status(sc, false); - error = ath_beacon_alloc(sc, vif); - if (!error) - ath_beacon_config(sc, vif); + ath_beacon_alloc(sc, vif); ath9k_set_beaconing_status(sc, true); } } +void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon) +{ + if (!AR_SREV_9300(sc->sc_ah)) + return; + + if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) + return; + + mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies + (nbeacon * sc->cur_beacon_conf.beacon_interval)); +} + +void ath_rx_poll(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + + ieee80211_queue_work(sc->hw, &sc->hw_check_work); +} static int ath9k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) @@ -1511,6 +1534,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, static void ath9k_enable_ps(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); sc->ps_enabled = true; if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { @@ -1520,11 +1544,13 @@ static void ath9k_enable_ps(struct ath_softc *sc) } ath9k_hw_setrxabort(ah, 1); } + ath_dbg(common, PS, "PowerSave enabled\n"); } static void ath9k_disable_ps(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); sc->ps_enabled = false; ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); @@ -1539,7 +1565,7 @@ static void ath9k_disable_ps(struct ath_softc *sc) ath9k_hw_set_interrupts(ah); } } - + ath_dbg(common, PS, "PowerSave disabled\n"); } static int ath9k_config(struct ieee80211_hw *hw, u32 changed) @@ -1911,6 +1937,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) sc->last_rssi = ATH_RSSI_DUMMY_MARKER; sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; + ath_start_rx_poll(sc, 3); + if (!common->disable_ani) { sc->sc_flags |= SC_OP_ANI_RUN; ath_start_ani(common); @@ -1950,6 +1978,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif) /* Stop ANI */ sc->sc_flags &= ~SC_OP_ANI_RUN; del_timer_sync(&common->ani.timer); + del_timer_sync(&sc->rx_poll_timer); memset(&sc->caldata, 0, sizeof(sc->caldata)); } } @@ -1964,7 +1993,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; int slottime; - int error; ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); @@ -1993,16 +2021,29 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } else { sc->sc_flags &= ~SC_OP_ANI_RUN; del_timer_sync(&common->ani.timer); + del_timer_sync(&sc->rx_poll_timer); } } - /* Enable transmission of beacons (AP, IBSS, MESH) */ - if ((changed & BSS_CHANGED_BEACON) || - ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { + /* + * In case of AP mode, the HW TSF has to be reset + * when the beacon interval changes. + */ + if ((changed & BSS_CHANGED_BEACON_INT) && + (vif->type == NL80211_IFTYPE_AP)) + sc->sc_flags |= SC_OP_TSF_RESET; + + /* Configure beaconing (AP, IBSS, MESH) */ + if (ath9k_uses_beacons(vif->type) && + ((changed & BSS_CHANGED_BEACON) || + (changed & BSS_CHANGED_BEACON_ENABLED) || + (changed & BSS_CHANGED_BEACON_INT))) { ath9k_set_beaconing_status(sc, false); - error = ath_beacon_alloc(sc, vif); - if (!error) - ath_beacon_config(sc, vif); + if (bss_conf->enable_beacon) + ath_beacon_alloc(sc, vif); + else + avp->is_bslot_active = false; + ath_beacon_config(sc, vif); ath9k_set_beaconing_status(sc, true); } @@ -2025,30 +2066,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } } - /* Disable transmission of beacons */ - if ((changed & BSS_CHANGED_BEACON_ENABLED) && - !bss_conf->enable_beacon) { - ath9k_set_beaconing_status(sc, false); - avp->is_bslot_active = false; - ath9k_set_beaconing_status(sc, true); - } - - if (changed & BSS_CHANGED_BEACON_INT) { - /* - * In case of AP mode, the HW TSF has to be reset - * when the beacon interval changes. - */ - if (vif->type == NL80211_IFTYPE_AP) { - sc->sc_flags |= SC_OP_TSF_RESET; - ath9k_set_beaconing_status(sc, false); - error = ath_beacon_alloc(sc, vif); - if (!error) - ath_beacon_config(sc, vif); - ath9k_set_beaconing_status(sc, true); - } else - ath_beacon_config(sc, vif); - } - mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); } diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 77dc327def8..a856b51255f 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/nl80211.h> #include <linux/pci.h> #include <linux/pci-aspm.h> @@ -171,14 +173,13 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); + pr_err("32-bit DMA not available\n"); goto err_dma; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - printk(KERN_ERR "ath9k: 32-bit DMA consistent " - "DMA enable failed\n"); + pr_err("32-bit DMA consistent DMA enable failed\n"); goto err_dma; } @@ -224,7 +225,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mem = pci_iomap(pdev, 0, 0); if (!mem) { - printk(KERN_ERR "PCI memory map error\n") ; + pr_err("PCI memory map error\n") ; ret = -EIO; goto err_iomap; } diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 08bb4553270..92a6c0a87f8 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1436,7 +1436,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - u32 changed, enum nl80211_channel_type oper_chan_type) + u32 changed) { struct ath_softc *sc = priv; struct ath_rate_priv *ath_rc_priv = priv_sta; @@ -1447,12 +1447,11 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, /* FIXME: Handle AP mode later when we support CWM */ - if (changed & IEEE80211_RC_HT_CHANGED) { + if (changed & IEEE80211_RC_BW_CHANGED) { if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) return; - if (oper_chan_type == NL80211_CHAN_HT40MINUS || - oper_chan_type == NL80211_CHAN_HT40PLUS) + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) oper_cw40 = true; if (oper_cw40) diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 1c4583c7ff7..e1fcc68124d 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -812,6 +812,7 @@ static bool ath9k_rx_accept(struct ath_common *common, is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && test_bit(rx_stats->rs_keyix, common->tkip_keymap); strip_mic = is_valid_tkip && ieee80211_is_data(fc) && + ieee80211_has_protected(fc) && !(rx_stats->rs_status & (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | ATH9K_RXERR_KEYMISS)); @@ -824,15 +825,20 @@ static bool ath9k_rx_accept(struct ath_common *common, if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; - if (!rx_stats->rs_datalen) + if (!rx_stats->rs_datalen) { + RX_STAT_INC(rx_len_err); return false; + } + /* * rs_status follows rs_datalen so if rs_datalen is too large * we can take a hint that hardware corrupted it, so ignore * those frames. */ - if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) + if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { + RX_STAT_INC(rx_len_err); return false; + } /* Only use error bits from the last fragment */ if (rx_stats->rs_more) @@ -902,6 +908,7 @@ static int ath9k_process_rate(struct ath_common *common, struct ieee80211_supported_band *sband; enum ieee80211_band band; unsigned int i = 0; + struct ath_softc __maybe_unused *sc = common->priv; band = hw->conf.channel->band; sband = hw->wiphy->bands[band]; @@ -936,7 +943,7 @@ static int ath9k_process_rate(struct ath_common *common, ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", rx_stats->rs_rate); - + RX_STAT_INC(rx_rate_err); return -EINVAL; } @@ -1823,10 +1830,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); rxs = IEEE80211_SKB_RXCB(hdr_skb); - if (ieee80211_is_beacon(hdr->frame_control) && - !is_zero_ether_addr(common->curbssid) && - !compare_ether_addr(hdr->addr3, common->curbssid)) - rs.is_mybeacon = true; + if (ieee80211_is_beacon(hdr->frame_control)) { + RX_STAT_INC(rx_beacons); + if (!is_zero_ether_addr(common->curbssid) && + ether_addr_equal(hdr->addr3, common->curbssid)) + rs.is_mybeacon = true; + else + rs.is_mybeacon = false; + } else rs.is_mybeacon = false; @@ -1836,8 +1847,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) * If we're asked to flush receive queue, directly * chain it back at the queue without processing it. */ - if (sc->sc_flags & SC_OP_RXFLUSH) + if (sc->sc_flags & SC_OP_RXFLUSH) { + RX_STAT_INC(rx_drop_rxflush); goto requeue_drop_frag; + } memset(rxs, 0, sizeof(struct ieee80211_rx_status)); @@ -1855,6 +1868,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (retval) goto requeue_drop_frag; + if (rs.is_mybeacon) { + sc->hw_busy_count = 0; + ath_start_rx_poll(sc, 3); + } /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); @@ -1863,8 +1880,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) * tell hardware it can give us a new frame using the old * skb and put it at the tail of the sc->rx.rxbuf list for * processing. */ - if (!requeue_skb) + if (!requeue_skb) { + RX_STAT_INC(rx_oom_err); goto requeue_drop_frag; + } /* Unmap the frame */ dma_unmap_single(sc->dev, bf->bf_buf_addr, @@ -1895,6 +1914,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) } if (rs.rs_more) { + RX_STAT_INC(rx_frags); /* * rs_more indicates chained descriptors which can be * used to link buffers together for a sort of @@ -1904,6 +1924,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) /* too many fragments - cannot handle frame */ dev_kfree_skb_any(sc->rx.frag); dev_kfree_skb_any(skb); + RX_STAT_INC(rx_too_many_frags_err); skb = NULL; } sc->rx.frag = skb; @@ -1915,6 +1936,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { dev_kfree_skb(skb); + RX_STAT_INC(rx_oom_err); goto requeue_drop_frag; } diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h index 885c42778b8..65919c902f5 100644 --- a/drivers/net/wireless/ath/carl9170/cmd.h +++ b/drivers/net/wireless/ath/carl9170/cmd.h @@ -114,7 +114,7 @@ __regwrite_out : \ #define carl9170_regwrite_result() \ __err; \ -} while (0); +} while (0) #define carl9170_async_regwrite_get_buf() \ @@ -126,7 +126,7 @@ do { \ __err = -ENOMEM; \ goto __async_regwrite_out; \ } \ -} while (0); +} while (0) #define carl9170_async_regwrite_begin(carl) \ do { \ @@ -169,6 +169,6 @@ __async_regwrite_out: \ #define carl9170_async_regwrite_result() \ __err; \ -} while (0); +} while (0) #endif /* __CMD_H */ diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index cffde8d9a52..5c73c03872f 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -355,6 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= if_comb_types; + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + #undef SUPPORTED return carl9170_fw_tx_sequence(ar); } diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index dc99030ea8b..84b22eec7ab 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -538,7 +538,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) return; /* and only beacons from the associated BSSID, please */ - if (compare_ether_addr(hdr->addr3, ar->common.curbssid) || + if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) || !ar->common.curaid) return; diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c index ea2c737138d..8e99540cd90 100644 --- a/drivers/net/wireless/ath/main.c +++ b/drivers/net/wireless/ath/main.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> @@ -49,7 +51,7 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, if (off != 0) skb_reserve(skb, common->cachelsz - off); } else { - printk(KERN_ERR "skbuff alloc of size %u failed\n", len); + pr_err("skbuff alloc of size %u failed\n", len); return NULL; } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 10dea37431b..d81698015bf 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/export.h> #include <net/cfg80211.h> @@ -562,7 +564,7 @@ static int __ath_regd_init(struct ath_regulatory *reg) printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); if (!ath_regd_is_eeprom_valid(reg)) { - printk(KERN_ERR "ath: Invalid EEPROM contents\n"); + pr_err("Invalid EEPROM contents\n"); return -EINVAL; } diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 6c87a823f5a..d07c0301da6 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -3989,8 +3989,7 @@ static int reset_atmel_card(struct net_device *dev) atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000); } - if (fw_entry) - release_firmware(fw_entry); + release_firmware(fw_entry); } err = atmel_wakeup_firmware(priv); diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c index 9ab1192004c..51e33b53386 100644 --- a/drivers/net/wireless/atmel_pci.c +++ b/drivers/net/wireless/atmel_pci.c @@ -74,15 +74,4 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev) stop_atmel_card(pci_get_drvdata(pdev)); } -static int __init atmel_init_module(void) -{ - return pci_register_driver(&atmel_driver); -} - -static void __exit atmel_cleanup_module(void) -{ - pci_unregister_driver(&atmel_driver); -} - -module_init(atmel_init_module); -module_exit(atmel_cleanup_module); +module_pci_driver(atmel_driver); diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index e4d6dc2e37d..617afc8211b 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4010,6 +4010,20 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (modparam_nohwcrypt) return -ENOSPC; /* User disabled HW-crypto */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + /* + * For now, disable hw crypto for the RSN IBSS group keys. This + * could be optimized in the future, but until that gets + * implemented, use of software crypto for group addressed + * frames is a acceptable to allow RSN IBSS to be used. + */ + return -EOPNOTSUPP; + } + mutex_lock(&wl->mutex); dev = wl->current_dev; @@ -5281,6 +5295,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) BIT(NL80211_IFTYPE_WDS) | BIT(NL80211_IFTYPE_ADHOC); + hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; wl->mac80211_initially_registered_queues = hw->queues; hw->max_rates = 2; diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 80b0755ed3a..a54fb2d2908 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = { .name = "b43-sdio", .id_table = b43_sdio_ids, .probe = b43_sdio_probe, - .remove = b43_sdio_remove, + .remove = __devexit_p(b43_sdio_remove), }; int b43_sdio_init(void) diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index 2c5367884b3..b31ccc02fa2 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -290,7 +290,8 @@ int b43_generate_txhdr(struct b43_wldev *dev, txhdr->dur_fb = wlhdr->duration_id; } else { txhdr->dur_fb = ieee80211_generic_frame_duration( - dev->wl->hw, info->control.vif, fragment_len, fbrate); + dev->wl->hw, info->control.vif, info->band, + fragment_len, fbrate); } plcp_fragment_len = fragment_len + FCS_LEN; @@ -378,7 +379,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) phy_ctl |= B43_TXH_PHY_SHORTPRMBL; - switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) { + switch (b43_ieee80211_antenna_sanitize(dev, 0)) { case 0: /* Default */ phy_ctl |= B43_TXH_PHY_ANT01AUTO; break; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index df7e16dfb36..1be214b815f 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -1056,6 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); dur = ieee80211_generic_frame_duration(dev->wl->hw, dev->wl->vif, + IEEE80211_BAND_2GHZ, size, rate); /* Write PLCP in two parts and timing for packet transfer */ @@ -1121,6 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, IEEE80211_STYPE_PROBE_RESP); dur = ieee80211_generic_frame_duration(dev->wl->hw, dev->wl->vif, + IEEE80211_BAND_2GHZ, *dest_size, rate); hdr->duration_id = dur; diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c index 5188fab0b37..a8012f2749e 100644 --- a/drivers/net/wireless/b43legacy/xmit.c +++ b/drivers/net/wireless/b43legacy/xmit.c @@ -228,6 +228,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev, } else { txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, info->control.vif, + info->band, fragment_len, rate_fb); } @@ -277,19 +278,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev, phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM; if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; - switch (info->antenna_sel_tx) { - case 0: - phy_ctl |= B43legacy_TX4_PHY_ANTLAST; - break; - case 1: - phy_ctl |= B43legacy_TX4_PHY_ANT0; - break; - case 2: - phy_ctl |= B43legacy_TX4_PHY_ANT1; - break; - default: - B43legacy_BUG_ON(1); - } + phy_ctl |= B43legacy_TX4_PHY_ANTLAST; /* MAC control */ rates = info->control.rates; diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index c5104533e24..b480088b3db 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -36,6 +36,15 @@ config BRCMFMAC_SDIO IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to use the driver for a SDIO wireless card. +config BRCMFMAC_SDIO_OOB + bool "Out of band interrupt support for SDIO interface chipset" + depends on BRCMFMAC_SDIO + ---help--- + This option enables out-of-band interrupt support for Broadcom + SDIO Wifi chipset using fullmac in order to gain better + performance and deep sleep wake up capability on certain + platforms. Say N if you are unsure. + config BRCMFMAC_USB bool "USB bus interface support for FullMAC driver" depends on USB diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index e925290b432..4add7da2468 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -39,37 +39,113 @@ #define SDIOH_API_ACCESS_RETRY_LIMIT 2 -static void brcmf_sdioh_irqhandler(struct sdio_func *func) +#ifdef CONFIG_BRCMFMAC_SDIO_OOB +static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id) { - struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); + struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id); - brcmf_dbg(TRACE, "***IRQHandler\n"); + brcmf_dbg(INTR, "oob intr triggered\n"); - sdio_release_host(func); + /* + * out-of-band interrupt is level-triggered which won't + * be cleared until dpc + */ + if (sdiodev->irq_en) { + disable_irq_nosync(irq); + sdiodev->irq_en = false; + } brcmf_sdbrcm_isr(sdiodev->bus); - sdio_claim_host(func); + return IRQ_HANDLED; +} + +int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) +{ + int ret = 0; + u8 data; + unsigned long flags; + + brcmf_dbg(TRACE, "Entering\n"); + + brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq); + ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler, + sdiodev->irq_flags, "brcmf_oob_intr", + &sdiodev->func[1]->card->dev); + if (ret != 0) + return ret; + spin_lock_init(&sdiodev->irq_en_lock); + spin_lock_irqsave(&sdiodev->irq_en_lock, flags); + sdiodev->irq_en = true; + spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); + + ret = enable_irq_wake(sdiodev->irq); + if (ret != 0) + return ret; + sdiodev->irq_wake = true; + + /* must configure SDIO_CCCR_IENx to enable irq */ + data = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_0, + SDIO_CCCR_IENx, &ret); + data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, + data, &ret); + + /* redirect, configure ane enable io for interrupt signal */ + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; + if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) + data |= SDIO_SEPINT_ACT_HI; + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT, + data, &ret); + + return 0; +} + +int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev) +{ + brcmf_dbg(TRACE, "Entering\n"); + + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT, + 0, NULL); + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, 0, NULL); + + if (sdiodev->irq_wake) { + disable_irq_wake(sdiodev->irq); + sdiodev->irq_wake = false; + } + free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev); + sdiodev->irq_en = false; + + return 0; +} +#else /* CONFIG_BRCMFMAC_SDIO_OOB */ +static void brcmf_sdio_irqhandler(struct sdio_func *func) +{ + struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); + + brcmf_dbg(INTR, "ib intr triggered\n"); + + brcmf_sdbrcm_isr(sdiodev->bus); } /* dummy handler for SDIO function 2 interrupt */ -static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func) +static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func) { } -int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev) +int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(TRACE, "Entering\n"); sdio_claim_host(sdiodev->func[1]); - sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler); - sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler); + sdio_claim_irq(sdiodev->func[1], brcmf_sdio_irqhandler); + sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler); sdio_release_host(sdiodev->func[1]); return 0; } -int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) +int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(TRACE, "Entering\n"); @@ -80,6 +156,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) return 0; } +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr, int *err) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 758c115b556..dd07d33a927 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -27,6 +27,7 @@ #include <linux/errno.h> #include <linux/sched.h> /* request_irq() */ #include <linux/module.h> +#include <linux/platform_device.h> #include <net/cfg80211.h> #include <defs.h> @@ -55,6 +56,15 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); +#ifdef CONFIG_BRCMFMAC_SDIO_OOB +static struct list_head oobirq_lh; +struct brcmf_sdio_oobirq { + unsigned int irq; + unsigned long flags; + struct list_head list; +}; +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ + static bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) { @@ -107,7 +117,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev, } sdio_release_host(sdfunc); } - } else if (regaddr == SDIO_CCCR_ABORT) { + } else if ((regaddr == SDIO_CCCR_ABORT) || + (regaddr == SDIO_CCCR_IENx)) { sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func), GFP_KERNEL); if (!sdfunc) @@ -467,12 +478,40 @@ void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev) } +#ifdef CONFIG_BRCMFMAC_SDIO_OOB +static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev) +{ + struct brcmf_sdio_oobirq *oobirq_entry; + + if (list_empty(&oobirq_lh)) { + brcmf_dbg(ERROR, "no valid oob irq resource\n"); + return -ENXIO; + } + + oobirq_entry = list_first_entry(&oobirq_lh, struct brcmf_sdio_oobirq, + list); + + sdiodev->irq = oobirq_entry->irq; + sdiodev->irq_flags = oobirq_entry->flags; + list_del(&oobirq_entry->list); + kfree(oobirq_entry); + + return 0; +} +#else +static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev) +{ + return 0; +} +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ + static int brcmf_ops_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; + brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "func->class=%x\n", func->class); brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); @@ -511,6 +550,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, sdiodev = dev_get_drvdata(&func->card->dev); if ((!sdiodev) || (sdiodev->func[1]->card != func->card)) return -ENODEV; + + ret = brcmf_sdio_getintrcfg(sdiodev); + if (ret) + return ret; sdiodev->func[2] = func; bus_if = sdiodev->bus_if; @@ -603,6 +646,65 @@ static struct sdio_driver brcmf_sdmmc_driver = { #endif /* CONFIG_PM_SLEEP */ }; +#ifdef CONFIG_BRCMFMAC_SDIO_OOB +static int brcmf_sdio_pd_probe(struct platform_device *pdev) +{ + struct resource *res; + struct brcmf_sdio_oobirq *oobirq_entry; + int i, ret; + + INIT_LIST_HEAD(&oobirq_lh); + + for (i = 0; ; i++) { + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!res) + break; + + oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), + GFP_KERNEL); + oobirq_entry->irq = res->start; + oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK; + list_add_tail(&oobirq_entry->list, &oobirq_lh); + } + if (i == 0) + return -ENXIO; + + ret = sdio_register_driver(&brcmf_sdmmc_driver); + + if (ret) + brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); + + return ret; +} + +static struct platform_driver brcmf_sdio_pd = { + .probe = brcmf_sdio_pd_probe, + .driver = { + .name = "brcmf_sdio_pd" + } +}; + +void brcmf_sdio_exit(void) +{ + brcmf_dbg(TRACE, "Enter\n"); + + sdio_unregister_driver(&brcmf_sdmmc_driver); + + platform_driver_unregister(&brcmf_sdio_pd); +} + +void brcmf_sdio_init(void) +{ + int ret; + + brcmf_dbg(TRACE, "Enter\n"); + + ret = platform_driver_register(&brcmf_sdio_pd); + + if (ret) + brcmf_dbg(ERROR, "platform_driver_register failed: %d\n", ret); +} +#else void brcmf_sdio_exit(void) { brcmf_dbg(TRACE, "Enter\n"); @@ -621,3 +723,4 @@ void brcmf_sdio_init(void) if (ret) brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); } +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 07686a748d3..9f637014486 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -632,7 +632,6 @@ extern const struct bcmevent_name bcmevent_names[]; extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); -extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx); extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index b3e3b7f25d8..a5c15cac5e7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c @@ -421,6 +421,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx, pktbuf->priority = h->priority & BDC_PRIORITY_MASK; skb_pull(pktbuf, BDC_HEADER_LEN); + skb_pull(pktbuf, h->data_offset << 2); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 4187435220f..236cb9fa460 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -799,7 +799,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) { char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ - uint up = 0; char buf[128], *ptr; u32 dongle_align = drvr->bus_if->align; u32 glom = 0; @@ -853,9 +852,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); - /* Force STA UP */ - brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up)); - /* Setup event_msgs */ brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 2a1e5ae0c40..8933f9b31a9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -799,6 +799,7 @@ static int brcmf_netdev_open(struct net_device *ndev) struct brcmf_bus *bus_if = drvr->bus_if; u32 toe_ol; s32 ret = 0; + uint up = 0; brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); @@ -822,6 +823,10 @@ static int brcmf_netdev_open(struct net_device *ndev) drvr->iflist[ifp->idx]->ndev->features &= ~NETIF_F_IP_CSUM; } + + /* make sure RF is ready for work */ + brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up)); + /* Allow transmit calls */ netif_start_queue(ndev); drvr->bus_if->drvr_up = true; @@ -843,6 +848,63 @@ static const struct net_device_ops brcmf_netdev_ops_pri = { .ndo_set_rx_mode = brcmf_netdev_set_multicast_list }; +static int brcmf_net_attach(struct brcmf_if *ifp) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct net_device *ndev; + u8 temp_addr[ETH_ALEN]; + + brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); + + ndev = drvr->iflist[ifp->idx]->ndev; + ndev->netdev_ops = &brcmf_netdev_ops_pri; + + /* + * determine mac address to use + */ + if (is_valid_ether_addr(ifp->mac_addr)) + memcpy(temp_addr, ifp->mac_addr, ETH_ALEN); + else + memcpy(temp_addr, drvr->mac, ETH_ALEN); + + if (ifp->idx == 1) { + brcmf_dbg(TRACE, "ACCESS POINT MAC:\n"); + /* ACCESSPOINT INTERFACE CASE */ + temp_addr[0] |= 0X02; /* set bit 2 , + - Locally Administered address */ + + } + ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; + ndev->ethtool_ops = &brcmf_ethtool_ops; + + drvr->rxsz = ndev->mtu + ndev->hard_header_len + + drvr->hdrlen; + + memcpy(ndev->dev_addr, temp_addr, ETH_ALEN); + + /* attach to cfg80211 for primary interface */ + if (!ifp->idx) { + drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr); + if (drvr->config == NULL) { + brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); + goto fail; + } + } + + if (register_netdev(ndev) != 0) { + brcmf_dbg(ERROR, "couldn't register the net device\n"); + goto fail; + } + + brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); + + return 0; + +fail: + ndev->netdev_ops = NULL; + return -EBADE; +} + int brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) { @@ -882,7 +944,7 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) if (mac_addr != NULL) memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN); - if (brcmf_net_attach(drvr, ifp->idx)) { + if (brcmf_net_attach(ifp)) { brcmf_dbg(ERROR, "brcmf_net_attach failed"); free_netdev(ifp->ndev); drvr->iflist[ifidx] = NULL; @@ -1016,69 +1078,16 @@ int brcmf_bus_start(struct device *dev) if (ret < 0) return ret; + /* add primary networking interface */ + ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac); + if (ret < 0) + return ret; + /* signal bus ready */ bus_if->state = BRCMF_BUS_DATA; return 0; } -int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx) -{ - struct net_device *ndev; - u8 temp_addr[ETH_ALEN] = { - 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33}; - - brcmf_dbg(TRACE, "ifidx %d\n", ifidx); - - ndev = drvr->iflist[ifidx]->ndev; - ndev->netdev_ops = &brcmf_netdev_ops_pri; - - /* - * We have to use the primary MAC for virtual interfaces - */ - if (ifidx != 0) { - /* for virtual interfaces use the primary MAC */ - memcpy(temp_addr, drvr->mac, ETH_ALEN); - - } - - if (ifidx == 1) { - brcmf_dbg(TRACE, "ACCESS POINT MAC:\n"); - /* ACCESSPOINT INTERFACE CASE */ - temp_addr[0] |= 0X02; /* set bit 2 , - - Locally Administered address */ - - } - ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; - ndev->ethtool_ops = &brcmf_ethtool_ops; - - drvr->rxsz = ndev->mtu + ndev->hard_header_len + - drvr->hdrlen; - - memcpy(ndev->dev_addr, temp_addr, ETH_ALEN); - - /* attach to cfg80211 for primary interface */ - if (!ifidx) { - drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr); - if (drvr->config == NULL) { - brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); - goto fail; - } - } - - if (register_netdev(ndev) != 0) { - brcmf_dbg(ERROR, "couldn't register the net device\n"); - goto fail; - } - - brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); - - return 0; - -fail: - ndev->netdev_ops = NULL; - return -EBADE; -} - static void brcmf_bus_detach(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index e2b34e1563f..149ee67beb2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -2352,6 +2352,24 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev) up(&bus->sdsem); } +#ifdef CONFIG_BRCMFMAC_SDIO_OOB +static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) +{ + unsigned long flags; + + spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); + if (!bus->sdiodev->irq_en && !bus->ipend) { + enable_irq(bus->sdiodev->irq); + bus->sdiodev->irq_en = true; + } + spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags); +} +#else +static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) +{ +} +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ + static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) { u32 intstatus, newstatus = 0; @@ -2509,6 +2527,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) bus->intstatus = intstatus; clkwait: + brcmf_sdbrcm_clrintr(bus); + if (data_ok(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { int ret, i; @@ -3508,8 +3528,14 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + if (ret == 0) { + ret = brcmf_sdio_intr_register(bus->sdiodev); + if (ret != 0) + brcmf_dbg(ERROR, "intr register failed:%d\n", ret); + } + /* If we didn't come up, turn off backplane clock */ - if (!ret) + if (bus_if->state != BRCMF_BUS_DATA) brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); exit: @@ -3867,7 +3893,7 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) if (bus) { /* De-register interrupt handler */ - brcmf_sdcard_intr_dereg(bus->sdiodev); + brcmf_sdio_intr_unregister(bus->sdiodev); if (bus->sdiodev->bus_if->drvr) { brcmf_detach(bus->sdiodev->dev); @@ -3968,15 +3994,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) goto fail; } - /* Register interrupt callback, but mask it (not operational yet). */ - brcmf_dbg(INTR, "disable SDIO interrupts (not interested yet)\n"); - ret = brcmf_sdcard_intr_reg(bus->sdiodev); - if (ret != 0) { - brcmf_dbg(ERROR, "FAILED: sdcard_intr_reg returned %d\n", ret); - goto fail; - } - brcmf_dbg(INTR, "registered SDIO interrupt function ok\n"); - brcmf_dbg(INFO, "completed!!\n"); /* if firmware path present try to download and bring up bus */ @@ -3988,12 +4005,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) } } - /* add interface and open for business */ - if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) { - brcmf_dbg(ERROR, "Add primary net device interface failed!!\n"); - goto fail; - } - return bus; fail: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 0281d207d99..7010eaf71f9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -43,6 +43,13 @@ /* as of sdiod rev 0, supports 3 functions */ #define SBSDIO_NUM_FUNCTION 3 +/* function 0 vendor specific CCCR registers */ +#define SDIO_CCCR_BRCM_SEPINT 0xf2 + +#define SDIO_SEPINT_MASK 0x01 +#define SDIO_SEPINT_OE 0x02 +#define SDIO_SEPINT_ACT_HI 0x04 + /* function 1 miscellaneous registers */ /* sprom command and status */ @@ -144,13 +151,18 @@ struct brcmf_sdio_dev { wait_queue_head_t request_buffer_wait; struct device *dev; struct brcmf_bus *bus_if; +#ifdef CONFIG_BRCMFMAC_SDIO_OOB + unsigned int irq; /* oob interrupt number */ + unsigned long irq_flags; /* board specific oob flags */ + bool irq_en; /* irq enable flags */ + spinlock_t irq_en_lock; + bool irq_wake; /* irq wake enable flags */ +#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ }; -/* Register/deregister device interrupt handler. */ -extern int -brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev); - -extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev); +/* Register/deregister interrupt handler. */ +extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev); +extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev); /* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). * fn: function number diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 82364223e81..1d67ecf681b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -1383,14 +1383,6 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc, goto fail; } - /* add interface and open for business */ - ret = brcmf_add_if(dev, 0, "wlan%d", NULL); - if (ret) { - brcmf_dbg(ERROR, "Add primary net device interface failed!!\n"); - brcmf_detach(dev); - goto fail; - } - return 0; fail: /* Release resources in reverse order */ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 55e9f45fce2..0efe88e25a9 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -628,6 +628,40 @@ brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode, return false; } +/* + * Indicates whether the country provided is valid to pass + * to cfg80211 or not. + * + * returns true if valid; false if not. + */ +static bool brcms_c_country_valid(const char *ccode) +{ + /* + * only allow ascii alpha uppercase for the first 2 + * chars. + */ + if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && + (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A && + ccode[2] == '\0')) + return false; + + /* + * do not match ISO 3166-1 user assigned country codes + * that may be in the driver table + */ + if (!strcmp("AA", ccode) || /* AA */ + !strcmp("ZZ", ccode) || /* ZZ */ + ccode[0] == 'X' || /* XA - XZ */ + (ccode[0] == 'Q' && /* QM - QZ */ + (ccode[1] >= 'M' && ccode[1] <= 'Z'))) + return false; + + if (!strcmp("NA", ccode)) + return false; + + return true; +} + /* Lookup a country info structure from a null terminated country * abbreviation and regrev directly with no translation. */ @@ -1089,7 +1123,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) /* store the country code for passing up as a regulatory hint */ ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE); - if (ccode) + if (ccode && brcms_c_country_valid(ccode)) strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1); /* diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h index 1948cb2771e..3f659e09f1c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h @@ -733,7 +733,7 @@ struct cck_phy_hdr { do { \ plcp[1] = len & 0xff; \ plcp[2] = ((len >> 8) & 0xff); \ - } while (0); + } while (0) #define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU) #define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 569ab8abd2a..aa15558f75c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1069,11 +1069,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" "%d\n", __func__, err); - if (wl->pub->srom_ccode[0]) - err = brcms_set_hint(wl, wl->pub->srom_ccode); - else - err = brcms_set_hint(wl, "US"); - if (err) + if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", __func__, err); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index ce8562aa5db..0fce56235f3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -207,8 +207,7 @@ static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = { }; static const u16 iqcal_gainparams_numgains_lcnphy[1] = { - sizeof(tbl_iqcal_gainparams_lcnphy_2G) / - sizeof(*tbl_iqcal_gainparams_lcnphy_2G), + ARRAY_SIZE(tbl_iqcal_gainparams_lcnphy_2G), }; static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c index 39095741fd0..812b6e38526 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c @@ -16353,11 +16353,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events_rev3_ipa, rfseq_rx2tx_dlys_rev3_ipa, - sizeof - (rfseq_rx2tx_events_rev3_ipa) / - sizeof - (rfseq_rx2tx_events_rev3_ipa - [0])); + ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa)); mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14)); mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14)); @@ -16858,18 +16854,13 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events_rev3, rfseq_tx2rx_dlys_rev3, - sizeof(rfseq_tx2rx_events_rev3) / - sizeof(rfseq_tx2rx_events_rev3[0])); + ARRAY_SIZE(rfseq_tx2rx_events_rev3)); if (PHY_IPA(pi)) wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events_rev3_ipa, rfseq_rx2tx_dlys_rev3_ipa, - sizeof - (rfseq_rx2tx_events_rev3_ipa) / - sizeof - (rfseq_rx2tx_events_rev3_ipa - [0])); + ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa)); if ((pi->sh->hw_phyrxchain != 0x3) && (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) { @@ -16885,8 +16876,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events_rev3, rfseq_rx2tx_dlys_rev3, - sizeof(rfseq_rx2tx_events_rev3) / - sizeof(rfseq_rx2tx_events_rev3[0])); + ARRAY_SIZE(rfseq_rx2tx_events_rev3)); } if (CHSPEC_IS2G(pi->radio_chanspec)) @@ -17209,13 +17199,11 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events, rfseq_rx2tx_dlys, - sizeof(rfseq_rx2tx_events) / - sizeof(rfseq_rx2tx_events[0])); + ARRAY_SIZE(rfseq_rx2tx_events)); wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events, rfseq_tx2rx_dlys, - sizeof(rfseq_tx2rx_events) / - sizeof(rfseq_tx2rx_events[0])); + ARRAY_SIZE(rfseq_tx2rx_events)); wlc_phy_workarounds_nphy_gainctrl(pi); @@ -19357,8 +19345,7 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi) } if (isAdjustNoiseVar) { - numTonesAdjust = sizeof(nphy_adj_tone_id_buf) / - sizeof(nphy_adj_tone_id_buf[0]); + numTonesAdjust = ARRAY_SIZE(nphy_adj_tone_id_buf); wlc_phy_adjust_min_noisevar_nphy( pi, @@ -25204,32 +25191,26 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core) phy_a15 = pad_gain_codes_used_2057rev5; phy_a13 = - sizeof(pad_gain_codes_used_2057rev5) / - sizeof(pad_gain_codes_used_2057rev5 - [0]) - 1; + ARRAY_SIZE(pad_gain_codes_used_2057rev5) - 1; } else if ((pi->pubpi.radiorev == 7) || (pi->pubpi.radiorev == 8)) { phy_a15 = pad_gain_codes_used_2057rev7; phy_a13 = - sizeof(pad_gain_codes_used_2057rev7) / - sizeof(pad_gain_codes_used_2057rev7 - [0]) - 1; + ARRAY_SIZE(pad_gain_codes_used_2057rev7) - 1; } else { phy_a15 = pad_all_gain_codes_2057; - phy_a13 = sizeof(pad_all_gain_codes_2057) / - sizeof(pad_all_gain_codes_2057[0]) - + phy_a13 = ARRAY_SIZE(pad_all_gain_codes_2057) - 1; } } else { phy_a15 = pga_all_gain_codes_2057; - phy_a13 = sizeof(pga_all_gain_codes_2057) / - sizeof(pga_all_gain_codes_2057[0]) - 1; + phy_a13 = ARRAY_SIZE(pga_all_gain_codes_2057) - 1; } phy_a14 = 0; diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h index 5fb17d53c9b..333193f20e1 100644 --- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h @@ -17,17 +17,7 @@ #ifndef _BRCM_HW_IDS_H_ #define _BRCM_HW_IDS_H_ -#define BCM4325_D11DUAL_ID 0x431b -#define BCM4325_D11G_ID 0x431c -#define BCM4325_D11A_ID 0x431d - -#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */ -#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */ -#define BCM4329_D11NDUAL_ID 0x432e - -#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */ -#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */ -#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */ +#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ #define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ #define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */ @@ -37,23 +27,15 @@ #define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ #define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ -#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ - -/* Chip IDs */ -#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ -#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ - -#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ -#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ -#define BCM43421_CHIP_ID 43421 /* 43421 chipcommon chipid */ -#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ -#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ -#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ -#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ -#define BCM4325_CHIP_ID 0x4325 /* 4325 chipcommon chipid */ -#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ -#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */ -#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ -#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */ +/* Chipcommon Core Chip IDs */ +#define BCM4313_CHIP_ID 0x4313 +#define BCM43224_CHIP_ID 43224 +#define BCM43225_CHIP_ID 43225 +#define BCM43235_CHIP_ID 43235 +#define BCM43236_CHIP_ID 43236 +#define BCM43238_CHIP_ID 43238 +#define BCM4329_CHIP_ID 0x4329 +#define BCM4330_CHIP_ID 0x4330 +#define BCM4331_CHIP_ID 0x4331 #endif /* _BRCM_HW_IDS_H_ */ diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index bfa0d54221e..627bc12074c 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -244,8 +244,7 @@ u16 hostap_tx_callback_register(local_info_t *local, unsigned long flags; struct hostap_tx_callback_info *entry; - entry = kmalloc(sizeof(*entry), - GFP_ATOMIC); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (entry == NULL) return 0; diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 972a9c3af39..05ca3402dca 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -457,18 +457,4 @@ static struct pci_driver prism2_pci_driver = { #endif /* CONFIG_PM */ }; - -static int __init init_prism2_pci(void) -{ - return pci_register_driver(&prism2_pci_driver); -} - - -static void __exit exit_prism2_pci(void) -{ - pci_unregister_driver(&prism2_pci_driver); -} - - -module_init(init_prism2_pci); -module_exit(exit_prism2_pci); +module_pci_driver(prism2_pci_driver); diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 33e79037770..c3d067ee4db 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -616,18 +616,4 @@ static struct pci_driver prism2_plx_driver = { .remove = prism2_plx_remove, }; - -static int __init init_prism2_plx(void) -{ - return pci_register_driver(&prism2_plx_driver); -} - - -static void __exit exit_prism2_plx(void) -{ - pci_unregister_driver(&prism2_plx_driver); -} - - -module_init(init_prism2_plx); -module_exit(exit_prism2_plx); +module_pci_driver(prism2_plx_driver); diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h new file mode 100644 index 00000000000..4007bf5ed6f --- /dev/null +++ b/drivers/net/wireless/ipw2x00/ipw.h @@ -0,0 +1,23 @@ +/* + * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver + * + * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __IPW_H__ +#define __IPW_H__ + +#include <linux/ieee80211.h> + +static const u32 ipw_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + +#endif diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index f0551f807f6..9cfae0c0870 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -166,6 +166,7 @@ that only one external action is invoked at a time. #include <net/lib80211.h> #include "ipw2100.h" +#include "ipw.h" #define IPW2100_VERSION "git-1.2.2" @@ -343,38 +344,50 @@ static struct iw_handler_def ipw2100_wx_handler_def; static inline void read_register(struct net_device *dev, u32 reg, u32 * val) { - *val = readl((void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + *val = ioread32(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); } static inline void write_register(struct net_device *dev, u32 reg, u32 val) { - writel(val, (void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + iowrite32(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); } static inline void read_register_word(struct net_device *dev, u32 reg, u16 * val) { - *val = readw((void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + *val = ioread16(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); } static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) { - *val = readb((void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + *val = ioread8(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); } static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) { - writew(val, (void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + iowrite16(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); } static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) { - writeb(val, (void __iomem *)(dev->base_addr + reg)); + struct ipw2100_priv *priv = libipw_priv(dev); + + iowrite8(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); } @@ -506,13 +519,13 @@ static void read_nic_memory(struct net_device *dev, u32 addr, u32 len, read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); } -static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev) +static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev) { - return (dev->base_addr && - (readl - ((void __iomem *)(dev->base_addr + - IPW_REG_DOA_DEBUG_AREA_START)) - == IPW_DATA_DOA_DEBUG_VALUE)); + u32 dbg; + + read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg); + + return dbg == IPW_DATA_DOA_DEBUG_VALUE; } static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, @@ -1946,11 +1959,12 @@ static int ipw2100_wdev_init(struct net_device *dev) wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; } + wdev->wiphy->cipher_suites = ipw_cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); - if (wiphy_register(wdev->wiphy)) { - ipw2100_down(priv); + if (wiphy_register(wdev->wiphy)) return -EIO; - } return 0; } @@ -3773,7 +3787,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), IPW2100_ORD(COUNTRY_CODE, "IEEE country code as recv'd from beacon"), IPW2100_ORD(COUNTRY_CHANNELS, - "channels suported by country"), + "channels supported by country"), IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), IPW2100_ORD(ANTENNA_DIVERSITY, @@ -4062,7 +4076,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) ipw2100_firmware.version = 0; #endif - printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name); + printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name); priv->reset_backoff = 0; schedule_reset(priv); @@ -6082,9 +6096,7 @@ static const struct net_device_ops ipw2100_netdev_ops = { /* Look into using netdev destructor to shutdown libipw? */ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, - void __iomem * base_addr, - unsigned long mem_start, - unsigned long mem_len) + void __iomem * ioaddr) { struct ipw2100_priv *priv; struct net_device *dev; @@ -6096,6 +6108,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, priv->ieee = netdev_priv(dev); priv->pci_dev = pci_dev; priv->net_dev = dev; + priv->ioaddr = ioaddr; priv->ieee->hard_start_xmit = ipw2100_tx; priv->ieee->set_security = shim__set_security; @@ -6111,10 +6124,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, dev->watchdog_timeo = 3 * HZ; dev->irq = 0; - dev->base_addr = (unsigned long)base_addr; - dev->mem_start = mem_start; - dev->mem_end = dev->mem_start + mem_len - 1; - /* NOTE: We don't use the wireless_handlers hook * in dev as the system will start throwing WX requests * to us before we're actually initialized and it just @@ -6215,8 +6224,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, static int ipw2100_pci_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { - unsigned long mem_start, mem_len, mem_flags; - void __iomem *base_addr = NULL; + void __iomem *ioaddr; struct net_device *dev = NULL; struct ipw2100_priv *priv = NULL; int err = 0; @@ -6225,18 +6233,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, IPW_DEBUG_INFO("enter\n"); - mem_start = pci_resource_start(pci_dev, 0); - mem_len = pci_resource_len(pci_dev, 0); - mem_flags = pci_resource_flags(pci_dev, 0); - - if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) { + if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) { IPW_DEBUG_INFO("weird - resource type is not memory\n"); err = -ENODEV; - goto fail; + goto out; } - base_addr = ioremap_nocache(mem_start, mem_len); - if (!base_addr) { + ioaddr = pci_iomap(pci_dev, 0, 0); + if (!ioaddr) { printk(KERN_WARNING DRV_NAME "Error calling ioremap_nocache.\n"); err = -EIO; @@ -6244,7 +6248,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, } /* allocate and initialize our net_device */ - dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len); + dev = ipw2100_alloc_device(pci_dev, ioaddr); if (!dev) { printk(KERN_WARNING DRV_NAME "Error calling ipw2100_alloc_device.\n"); @@ -6325,6 +6329,11 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, printk(KERN_INFO DRV_NAME ": Detected Intel PRO/Wireless 2100 Network Connection\n"); + err = ipw2100_wdev_init(dev); + if (err) + goto fail; + registered = 1; + /* Bring up the interface. Pre 0.46, after we registered the * network device we would call ipw2100_up. This introduced a race * condition with newer hotplug configurations (network was coming @@ -6341,11 +6350,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, "Error calling register_netdev.\n"); goto fail; } - registered = 1; - - err = ipw2100_wdev_init(dev); - if (err) - goto fail; + registered = 2; mutex_lock(&priv->action_mutex); @@ -6379,18 +6384,21 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, priv->status |= STATUS_INITIALIZED; mutex_unlock(&priv->action_mutex); - - return 0; +out: + return err; fail_unlock: mutex_unlock(&priv->action_mutex); - wiphy_unregister(priv->ieee->wdev.wiphy); - kfree(priv->ieee->bg_band.channels); fail: if (dev) { - if (registered) + if (registered >= 2) unregister_netdev(dev); + if (registered) { + wiphy_unregister(priv->ieee->wdev.wiphy); + kfree(priv->ieee->bg_band.channels); + } + ipw2100_hw_stop_adapter(priv); ipw2100_disable_interrupts(priv); @@ -6409,63 +6417,56 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, pci_set_drvdata(pci_dev, NULL); } - if (base_addr) - iounmap(base_addr); + pci_iounmap(pci_dev, ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); - - return err; + goto out; } static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) { struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); - struct net_device *dev; + struct net_device *dev = priv->net_dev; - if (priv) { - mutex_lock(&priv->action_mutex); + mutex_lock(&priv->action_mutex); - priv->status &= ~STATUS_INITIALIZED; + priv->status &= ~STATUS_INITIALIZED; - dev = priv->net_dev; - sysfs_remove_group(&pci_dev->dev.kobj, - &ipw2100_attribute_group); + sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); #ifdef CONFIG_PM - if (ipw2100_firmware.version) - ipw2100_release_firmware(priv, &ipw2100_firmware); + if (ipw2100_firmware.version) + ipw2100_release_firmware(priv, &ipw2100_firmware); #endif - /* Take down the hardware */ - ipw2100_down(priv); + /* Take down the hardware */ + ipw2100_down(priv); - /* Release the mutex so that the network subsystem can - * complete any needed calls into the driver... */ - mutex_unlock(&priv->action_mutex); + /* Release the mutex so that the network subsystem can + * complete any needed calls into the driver... */ + mutex_unlock(&priv->action_mutex); - /* Unregister the device first - this results in close() - * being called if the device is open. If we free storage - * first, then close() will crash. */ - unregister_netdev(dev); + /* Unregister the device first - this results in close() + * being called if the device is open. If we free storage + * first, then close() will crash. + * FIXME: remove the comment above. */ + unregister_netdev(dev); - ipw2100_kill_works(priv); + ipw2100_kill_works(priv); - ipw2100_queues_free(priv); + ipw2100_queues_free(priv); - /* Free potential debugging firmware snapshot */ - ipw2100_snapshot_free(priv); + /* Free potential debugging firmware snapshot */ + ipw2100_snapshot_free(priv); - if (dev->irq) - free_irq(dev->irq, priv); + free_irq(dev->irq, priv); - if (dev->base_addr) - iounmap((void __iomem *)dev->base_addr); + pci_iounmap(pci_dev, priv->ioaddr); - /* wiphy_unregister needs to be here, before free_libipw */ - wiphy_unregister(priv->ieee->wdev.wiphy); - kfree(priv->ieee->bg_band.channels); - free_libipw(dev, 0); - } + /* wiphy_unregister needs to be here, before free_libipw */ + wiphy_unregister(priv->ieee->wdev.wiphy); + kfree(priv->ieee->bg_band.channels); + free_libipw(dev, 0); pci_release_regions(pci_dev); pci_disable_device(pci_dev); @@ -8508,8 +8509,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw) { fw->version = 0; - if (fw->fw_entry) - release_firmware(fw->fw_entry); + release_firmware(fw->fw_entry); fw->fw_entry = NULL; } @@ -8609,7 +8609,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv, struct net_device *dev = priv->net_dev; const unsigned char *microcode_data = fw->uc.data; unsigned int microcode_data_left = fw->uc.size; - void __iomem *reg = (void __iomem *)dev->base_addr; + void __iomem *reg = priv->ioaddr; struct symbol_alive_response response; int i, j; diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 99cba968aa5..97312524249 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -135,15 +135,6 @@ enum { IPW_HW_STATE_ENABLED = 0 }; -struct ssid_context { - char ssid[IW_ESSID_MAX_SIZE + 1]; - int ssid_len; - unsigned char bssid[ETH_ALEN]; - int port_type; - int channel; - -}; - extern const char *port_type_str[]; extern const char *band_str[]; @@ -488,6 +479,7 @@ enum { #define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ struct ipw2100_priv { + void __iomem *ioaddr; int stop_hang_check; /* Set 1 when shutting down to kill hang_check */ int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 1779db3aa2b..0036737fe8e 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include <net/cfg80211-wext.h> #include "ipw2200.h" +#include "ipw.h" #ifndef KBUILD_EXTMOD @@ -3668,8 +3669,7 @@ static int ipw_load(struct ipw_priv *priv) priv->rxq = NULL; } ipw_tx_queue_free(priv); - if (raw) - release_firmware(raw); + release_firmware(raw); #ifdef CONFIG_PM fw_loaded = 0; raw = NULL; @@ -7035,7 +7035,7 @@ static int ipw_qos_activate(struct ipw_priv *priv, cpu_to_le16(burst_duration); } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { if (type == IEEE_B) { - IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", + IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n", type); if (priv->qos_data.qos_enable == 0) active_one = &def_parameters_CCK; @@ -11443,20 +11443,6 @@ static void ipw_bg_down(struct work_struct *work) mutex_unlock(&priv->mutex); } -/* Called by register_netdev() */ -static int ipw_net_init(struct net_device *dev) -{ - int rc = 0; - struct ipw_priv *priv = libipw_priv(dev); - - mutex_lock(&priv->mutex); - if (ipw_up(priv)) - rc = -EIO; - mutex_unlock(&priv->mutex); - - return rc; -} - static int ipw_wdev_init(struct net_device *dev) { int i, rc = 0; @@ -11544,6 +11530,9 @@ static int ipw_wdev_init(struct net_device *dev) wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; } + wdev->wiphy->cipher_suites = ipw_cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); /* With that information in place, we can now register the wiphy... */ @@ -11722,7 +11711,6 @@ static void ipw_prom_free(struct ipw_priv *priv) #endif static const struct net_device_ops ipw_netdev_ops = { - .ndo_init = ipw_net_init, .ndo_open = ipw_net_open, .ndo_stop = ipw_net_stop, .ndo_set_rx_mode = ipw_net_set_multicast_list, @@ -11837,10 +11825,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, net_dev->wireless_data = &priv->wireless_data; net_dev->wireless_handlers = &ipw_wx_handler_def; net_dev->ethtool_ops = &ipw_ethtool_ops; - net_dev->irq = pdev->irq; - net_dev->base_addr = (unsigned long)priv->hw_base; - net_dev->mem_start = pci_resource_start(pdev, 0); - net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); if (err) { @@ -11849,17 +11833,24 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, goto out_release_irq; } - mutex_unlock(&priv->mutex); - err = register_netdev(net_dev); - if (err) { - IPW_ERROR("failed to register network device\n"); + if (ipw_up(priv)) { + mutex_unlock(&priv->mutex); + err = -EIO; goto out_remove_sysfs; } + mutex_unlock(&priv->mutex); + err = ipw_wdev_init(net_dev); if (err) { IPW_ERROR("failed to register wireless device\n"); - goto out_unregister_netdev; + goto out_remove_sysfs; + } + + err = register_netdev(net_dev); + if (err) { + IPW_ERROR("failed to register network device\n"); + goto out_unregister_wiphy; } #ifdef CONFIG_IPW2200_PROMISCUOUS @@ -11868,10 +11859,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, if (err) { IPW_ERROR("Failed to register promiscuous network " "device (error %d).\n", err); - wiphy_unregister(priv->ieee->wdev.wiphy); - kfree(priv->ieee->a_band.channels); - kfree(priv->ieee->bg_band.channels); - goto out_unregister_netdev; + unregister_netdev(priv->net_dev); + goto out_unregister_wiphy; } } #endif @@ -11883,8 +11872,10 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, return 0; - out_unregister_netdev: - unregister_netdev(priv->net_dev); + out_unregister_wiphy: + wiphy_unregister(priv->ieee->wdev.wiphy); + kfree(priv->ieee->a_band.channels); + kfree(priv->ieee->bg_band.channels); out_remove_sysfs: sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); out_release_irq: diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index 8874588fb92..0b22fb42173 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -584,61 +584,6 @@ struct libipw_tim_parameters { /*******************************************************/ -enum { /* libipw_basic_report.map */ - LIBIPW_BASIC_MAP_BSS = (1 << 0), - LIBIPW_BASIC_MAP_OFDM = (1 << 1), - LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2), - LIBIPW_BASIC_MAP_RADAR = (1 << 3), - LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4), - /* Bits 5-7 are reserved */ - -}; -struct libipw_basic_report { - u8 channel; - __le64 start_time; - __le16 duration; - u8 map; -} __packed; - -enum { /* libipw_measurement_request.mode */ - /* Bit 0 is reserved */ - LIBIPW_MEASUREMENT_ENABLE = (1 << 1), - LIBIPW_MEASUREMENT_REQUEST = (1 << 2), - LIBIPW_MEASUREMENT_REPORT = (1 << 3), - /* Bits 4-7 are reserved */ -}; - -enum { - LIBIPW_REPORT_BASIC = 0, /* required */ - LIBIPW_REPORT_CCA = 1, /* optional */ - LIBIPW_REPORT_RPI = 2, /* optional */ - /* 3-255 reserved */ -}; - -struct libipw_measurement_params { - u8 channel; - __le64 start_time; - __le16 duration; -} __packed; - -struct libipw_measurement_request { - struct libipw_info_element ie; - u8 token; - u8 mode; - u8 type; - struct libipw_measurement_params params[0]; -} __packed; - -struct libipw_measurement_report { - struct libipw_info_element ie; - u8 token; - u8 mode; - u8 type; - union { - struct libipw_basic_report basic[0]; - } u; -} __packed; - struct libipw_tpc_report { u8 transmit_power; u8 link_margin; diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c index c4955d25a19..02e05792323 100644 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c @@ -77,8 +77,8 @@ static struct libipw_frag_entry *libipw_frag_cache_find(struct if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && - !compare_ether_addr(entry->src_addr, src) && - !compare_ether_addr(entry->dst_addr, dst)) + ether_addr_equal(entry->src_addr, src) && + ether_addr_equal(entry->dst_addr, dst)) return entry; } @@ -245,12 +245,12 @@ static int libipw_is_eapol_frame(struct libipw_device *ieee, /* check that the frame is unicast frame to us */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS && - !compare_ether_addr(hdr->addr1, dev->dev_addr) && - !compare_ether_addr(hdr->addr3, dev->dev_addr)) { + ether_addr_equal(hdr->addr1, dev->dev_addr) && + ether_addr_equal(hdr->addr3, dev->dev_addr)) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && - !compare_ether_addr(hdr->addr1, dev->dev_addr)) { + ether_addr_equal(hdr->addr1, dev->dev_addr)) { /* FromDS frame with own addr as DA */ } else return 0; @@ -523,8 +523,8 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == - IEEE80211_FCTL_FROMDS && ieee->stadev - && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { + IEEE80211_FCTL_FROMDS && ieee->stadev && + ether_addr_equal(hdr->addr2, ieee->assoc_ap_addr)) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); @@ -1468,7 +1468,7 @@ static inline int is_same_network(struct libipw_network *src, * as one network */ return ((src->ssid_len == dst->ssid_len) && (src->channel == dst->channel) && - !compare_ether_addr(src->bssid, dst->bssid) && + ether_addr_equal(src->bssid, dst->bssid) && !memcmp(src->ssid, dst->ssid, src->ssid_len)); } diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index b25c01be0d9..87e53989433 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -453,10 +453,10 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header) switch (il->iw_mode) { case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr3, il->bssid); + return ether_addr_equal(header->addr3, il->bssid); case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ /* packets to our IBSS update information */ - return !compare_ether_addr(header->addr2, il->bssid); + return ether_addr_equal(header->addr2, il->bssid); default: return 1; } diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index c46275a9256..509301a5e7e 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -2565,7 +2565,7 @@ il4965_find_station(struct il_priv *il, const u8 *addr) spin_lock_irqsave(&il->sta_lock, flags); for (i = start; i < il->hw_params.max_stations; i++) if (il->stations[i].used && - (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) { + ether_addr_equal(il->stations[i].sta.sta.addr, addr)) { ret = i; goto out; } @@ -2850,9 +2850,9 @@ void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, struct ieee80211_tx_info *info) { - struct ieee80211_tx_rate *r = &info->control.rates[0]; + struct ieee80211_tx_rate *r = &info->status.rates[0]; - info->antenna_sel_tx = + info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); if (rate_n_flags & RATE_MCS_HT_MSK) r->flags |= IEEE80211_TX_RC_MCS; diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index 11ab1247fae..f3b8e91aa3d 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -873,7 +873,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) || tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) || tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) || - tbl_type.ant_type != info->antenna_sel_tx || + tbl_type.ant_type != info->status.antenna || !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS) || !!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) { diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index eaf249452e5..cbf2dc18341 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -1896,8 +1896,8 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, sta_id = il->hw_params.bcast_id; else for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { - if (!compare_ether_addr - (il->stations[i].sta.sta.addr, addr)) { + if (ether_addr_equal(il->stations[i].sta.sta.addr, + addr)) { sta_id = i; break; } @@ -1926,7 +1926,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && - !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) { + ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { D_ASSOC("STA %d (%pM) already added, not adding again.\n", sta_id, addr); return sta_id; @@ -3744,10 +3744,10 @@ il_full_rxon_required(struct il_priv *il) /* These items are only settable from the full RXON command */ CHK(!il_is_associated(il)); - CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); - CHK(compare_ether_addr(staging->node_addr, active->node_addr)); - CHK(compare_ether_addr - (staging->wlap_bssid_addr, active->wlap_bssid_addr)); + CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr)); + CHK(!ether_addr_equal(staging->node_addr, active->node_addr)); + CHK(!ether_addr_equal(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); CHK_NEQ(staging->dev_type, active->dev_type); CHK_NEQ(staging->channel, active->channel); CHK_NEQ(staging->air_propagation, active->air_propagation); diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 2fe62730ddd..db6c6e52802 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -113,20 +113,21 @@ config IWLWIFI_DEVICE_TESTMODE generic netlink message via NL80211_TESTMODE channel. config IWLWIFI_P2P - bool "iwlwifi experimental P2P support" - depends on IWLWIFI - help - This option enables experimental P2P support for some devices - based on microcode support. Since P2P support is still under - development, this option may even enable it for some devices - now that turn out to not support it in the future due to - microcode restrictions. + def_bool y + bool "iwlwifi experimental P2P support" + depends on IWLWIFI + help + This option enables experimental P2P support for some devices + based on microcode support. Since P2P support is still under + development, this option may even enable it for some devices + now that turn out to not support it in the future due to + microcode restrictions. - To determine if your microcode supports the experimental P2P - offered by this option, check if the driver advertises AP - support when it is loaded. + To determine if your microcode supports the experimental P2P + offered by this option, check if the driver advertises AP + support when it is loaded. - Say Y only if you want to experiment with P2P. + Say Y only if you want to experiment with P2P. config IWLWIFI_EXPERIMENTAL_MFP bool "support MFP (802.11w) even if uCode doesn't advertise" @@ -136,3 +137,11 @@ config IWLWIFI_EXPERIMENTAL_MFP even if the microcode doesn't advertise it. Say Y only if you want to experiment with MFP. + +config IWLWIFI_UCODE16 + bool "support uCode 16.0" + depends on IWLWIFI + help + This option enables support for uCode version 16.0. + + Say Y if you want to use 16.0 microcode. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 85d163ed3db..406f297a9a5 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -5,9 +5,9 @@ iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o -iwlwifi-objs += iwl-core.o iwl-eeprom.o iwl-power.o +iwlwifi-objs += iwl-eeprom.o iwl-power.o iwlwifi-objs += iwl-scan.o iwl-led.o -iwlwifi-objs += iwl-agn-rxon.o +iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o iwlwifi-objs += iwl-5000.o iwlwifi-objs += iwl-6000.o iwlwifi-objs += iwl-1000.o @@ -17,6 +17,8 @@ iwlwifi-objs += iwl-drv.o iwlwifi-objs += iwl-notif-wait.o iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o + +iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index 8d80e233bc7..2629a6602df 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -24,26 +24,12 @@ * *****************************************************************************/ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> #include <linux/stringify.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn.h" -#include "iwl-agn-hw.h" -#include "iwl-shared.h" +#include "iwl-config.h" #include "iwl-cfg.h" -#include "iwl-prph.h" +#include "iwl-csr.h" +#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL1000_UCODE_API_MAX 5 @@ -57,6 +43,10 @@ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 +/* EEPROM version */ +#define EEPROM_1000_TX_POWER_VERSION (4) +#define EEPROM_1000_EEPROM_VERSION (0x15C) + #define IWL1000_FW_PRE "iwlwifi-1000-" #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" @@ -64,100 +54,8 @@ #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" -/* - * For 1000, use advance thermal throttling critical temperature threshold, - * but legacy thermal management implementation for now. - * This is for the reason of 1000 uCode using advance thermal throttling API - * but not implement ct_kill_exit based on ct_kill exit temperature - * so the thermal throttling will still based on legacy thermal throttling - * management. - * The code here need to be modified once 1000 uCode has the advanced thermal - * throttling algorithm in place - */ -static void iwl1000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; - hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 1000 series */ -static void iwl1000_nic_config(struct iwl_priv *priv) -{ - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); - - /* Setting digital SVR for 1000 card to 1.32V */ - /* locking is acquired in iwl_set_bits_mask_prph() function */ - iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG, - APMG_SVR_DIGITAL_VOLTAGE_1_32, - ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); -} - -static const struct iwl_sensitivity_ranges iwl1000_sensitivity = { - .min_nrg_cck = 95, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 120, - .auto_corr_min_ofdm_mrc_x1 = 240, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 155, - .auto_corr_max_ofdm_mrc_x1 = 290, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 170, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 95, - .nrg_th_ofdm = 95, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) -{ - hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - - hw_params(priv).tx_chains_num = - num_of_ant(hw_params(priv).valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) - hw_params(priv).rx_chains_num = 1; - else - hw_params(priv).rx_chains_num = - num_of_ant(hw_params(priv).valid_rx_ant); - - iwl1000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - hw_params(priv).sens = &iwl1000_sensitivity; -} - -static struct iwl_lib_ops iwl1000_lib = { - .set_hw_params = iwl1000_hw_set_hw_params, - .nic_config = iwl1000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_REG_BAND_24_HT40_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - }, - .temperature = iwlagn_temperature, -}; - static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .eeprom_size = OTP_LOW_IMAGE_SIZE, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .max_ll_items = OTP_MAX_LL_ITEMS_1000, @@ -166,15 +64,13 @@ static const struct iwl_base_params iwl1000_base_params = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .wd_timeout = IWL_DEF_WD_TIMEOUT, + .wd_timeout = IWL_WATCHHDOG_DISABLED, .max_event_log_size = 128, - .wd_disable = true, }; static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .smps_mode = IEEE80211_SMPS_DYNAMIC, }; #define IWL_DEVICE_1000 \ @@ -182,11 +78,11 @@ static const struct iwl_ht_params iwl1000_ht_params = { .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .lib = &iwl1000_lib, \ .base_params = &iwl1000_base_params, \ .led_mode = IWL_LED_BLINK @@ -206,11 +102,11 @@ const struct iwl_cfg iwl1000_bg_cfg = { .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .lib = &iwl1000_lib, \ .base_params = &iwl1000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index ea108622e0b..7f793417c78 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -24,25 +24,12 @@ * *****************************************************************************/ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> #include <linux/stringify.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn.h" -#include "iwl-agn-hw.h" -#include "iwl-shared.h" +#include "iwl-config.h" #include "iwl-cfg.h" +#include "iwl-agn-hw.h" +#include "iwl-commands.h" /* needed for BT for now */ /* Highest firmware API version supported */ #define IWL2030_UCODE_API_MAX 6 @@ -62,6 +49,11 @@ #define IWL105_UCODE_API_MIN 5 #define IWL135_UCODE_API_MIN 5 +/* EEPROM version */ +#define EEPROM_2000_TX_POWER_VERSION (6) +#define EEPROM_2000_EEPROM_VERSION (0x805) + + #define IWL2030_FW_PRE "iwlwifi-2030-" #define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" @@ -74,105 +66,9 @@ #define IWL135_FW_PRE "iwlwifi-135-" #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" -static void iwl2000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD; - hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 2000 series */ -static void iwl2000_nic_config(struct iwl_priv *priv) -{ - iwl_rf_config(priv); - - if (cfg(priv)->iq_invert) - iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); -} - -static const struct iwl_sensitivity_ranges iwl2000_sensitivity = { - .min_nrg_cck = 97, - .auto_corr_min_ofdm = 80, - .auto_corr_min_ofdm_mrc = 128, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 192, - - .auto_corr_max_ofdm = 145, - .auto_corr_max_ofdm_mrc = 232, - .auto_corr_max_ofdm_x1 = 110, - .auto_corr_max_ofdm_mrc_x1 = 232, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 175, - .auto_corr_min_cck_mrc = 160, - .auto_corr_max_cck_mrc = 310, - .nrg_th_cck = 97, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) -{ - hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ); - - hw_params(priv).tx_chains_num = - num_of_ant(hw_params(priv).valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) - hw_params(priv).rx_chains_num = 1; - else - hw_params(priv).rx_chains_num = - num_of_ant(hw_params(priv).valid_rx_ant); - - iwl2000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - hw_params(priv).sens = &iwl2000_sensitivity; -} - -static struct iwl_lib_ops iwl2000_lib = { - .set_hw_params = iwl2000_hw_set_hw_params, - .nic_config = iwl2000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - .enhanced_txpower = true, - }, - .temperature = iwlagn_temperature, -}; - -static struct iwl_lib_ops iwl2030_lib = { - .set_hw_params = iwl2000_hw_set_hw_params, - .nic_config = iwl2000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REGULATORY_BAND_NO_HT40, - }, - .enhanced_txpower = true, - }, - .temperature = iwlagn_temperature, -}; - static const struct iwl_base_params iwl2000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, @@ -191,7 +87,6 @@ static const struct iwl_base_params iwl2000_base_params = { static const struct iwl_base_params iwl2030_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, @@ -226,16 +121,15 @@ static const struct iwl_bt_params iwl2030_bt_params = { .ucode_api_max = IWL2000_UCODE_API_MAX, \ .ucode_api_ok = IWL2000_UCODE_API_OK, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .iq_invert = true \ + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl2000_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", @@ -254,18 +148,17 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .ucode_api_max = IWL2030_UCODE_API_MAX, \ .ucode_api_ok = IWL2030_UCODE_API_OK, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_2030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ - .iq_invert = true \ + .adv_pm = true const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -278,18 +171,17 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .ucode_api_max = IWL105_UCODE_API_MAX, \ .ucode_api_ok = IWL105_UCODE_API_OK, \ .ucode_api_min = IWL105_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_105, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true, \ - .rx_with_siso_diversity = true, \ - .iq_invert = true \ + .rx_with_siso_diversity = true const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -308,19 +200,18 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .ucode_api_max = IWL135_UCODE_API_MAX, \ .ucode_api_ok = IWL135_UCODE_API_OK, \ .ucode_api_min = IWL135_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_135, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ .need_temp_offset_calib = true, \ .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true, \ - .rx_with_siso_diversity = true, \ - .iq_invert = true \ + .rx_with_siso_diversity = true const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index de0920c74cd..8e26bc825f2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -24,28 +24,12 @@ * *****************************************************************************/ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> #include <linux/stringify.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn.h" -#include "iwl-agn-hw.h" -#include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-config.h" #include "iwl-cfg.h" -#include "iwl-prph.h" +#include "iwl-agn-hw.h" +#include "iwl-csr.h" /* Highest firmware API version supported */ #define IWL5000_UCODE_API_MAX 5 @@ -59,268 +43,28 @@ #define IWL5000_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1 +/* EEPROM versions */ +#define EEPROM_5000_TX_POWER_VERSION (4) +#define EEPROM_5000_EEPROM_VERSION (0x11A) +#define EEPROM_5050_TX_POWER_VERSION (4) +#define EEPROM_5050_EEPROM_VERSION (0x21E) + #define IWL5000_FW_PRE "iwlwifi-5000-" #define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" #define IWL5150_FW_PRE "iwlwifi-5150-" #define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" -/* NIC configuration for 5000 series */ -static void iwl5000_nic_config(struct iwl_priv *priv) -{ - iwl_rf_config(priv); - - /* W/A : NIC is stuck in a reset state after Early PCIe power off - * (PCIe power is lost before PERST# is asserted), - * causing ME FW to lose ownership and not being able to obtain it back. - */ - iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG, - APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, - ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); -} - -static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { - .min_nrg_cck = 100, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - .auto_corr_max_ofdm_x1 = 120, - .auto_corr_max_ofdm_mrc_x1 = 240, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 200, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 100, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static struct iwl_sensitivity_ranges iwl5150_sensitivity = { - .min_nrg_cck = 95, - .auto_corr_min_ofdm = 90, - .auto_corr_min_ofdm_mrc = 170, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 220, - - .auto_corr_max_ofdm = 120, - .auto_corr_max_ofdm_mrc = 210, - /* max = min for performance bug in 5150 DSP */ - .auto_corr_max_ofdm_x1 = 105, - .auto_corr_max_ofdm_mrc_x1 = 220, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 200, - .auto_corr_min_cck_mrc = 170, - .auto_corr_max_cck_mrc = 400, - .nrg_th_cck = 95, - .nrg_th_ofdm = 95, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) - -static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd) -{ - u16 temperature, voltage; - __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd, - EEPROM_KELVIN_TEMPERATURE); - - temperature = le16_to_cpu(temp_calib[0]); - voltage = le16_to_cpu(temp_calib[1]); - - /* offset = temp - volt / coeff */ - return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); -} - -static void iwl5150_set_ct_threshold(struct iwl_priv *priv) -{ - const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; - s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - - iwl_temp_calib_to_offset(priv->shrd); - - hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef; -} - -static void iwl5000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; -} - -static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) -{ - hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | - BIT(IEEE80211_BAND_5GHZ); - - hw_params(priv).tx_chains_num = - num_of_ant(hw_params(priv).valid_tx_ant); - hw_params(priv).rx_chains_num = - num_of_ant(hw_params(priv).valid_rx_ant); - - iwl5000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - hw_params(priv).sens = &iwl5000_sensitivity; -} - -static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) -{ - hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | - BIT(IEEE80211_BAND_5GHZ); - - hw_params(priv).tx_chains_num = - num_of_ant(hw_params(priv).valid_tx_ant); - hw_params(priv).rx_chains_num = - num_of_ant(hw_params(priv).valid_rx_ant); - - iwl5150_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - hw_params(priv).sens = &iwl5150_sensitivity; -} - -static void iwl5150_temperature(struct iwl_priv *priv) -{ - u32 vt = 0; - s32 offset = iwl_temp_calib_to_offset(priv->shrd); - - vt = le32_to_cpu(priv->statistics.common.temperature); - vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; - /* now vt hold the temperature in Kelvin */ - priv->temperature = KELVIN_TO_CELSIUS(vt); - iwl_tt_handler(priv); -} - -static int iwl5000_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl5000_channel_switch_cmd cmd; - const struct iwl_channel_info *ch_info; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - struct iwl_host_cmd hcmd = { - .id = REPLY_CHANNEL_SWITCH, - .len = { sizeof(cmd), }, - .flags = CMD_SYNC, - .data = { &cmd, }, - }; - - cmd.band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->channel->hw_value; - IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", - ctx->active.channel, ch); - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); - ch_info = iwl_get_channel_info(priv, priv->band, ch); - if (ch_info) - cmd.expect_beacon = is_channel_radar(ch_info); - else { - IWL_ERR(priv, "invalid channel switch from %u to %u\n", - ctx->active.channel, ch); - return -EFAULT; - } - - return iwl_dvm_send_cmd(priv, &hcmd); -} - -static struct iwl_lib_ops iwl5000_lib = { - .set_hw_params = iwl5000_hw_set_hw_params, - .set_channel_switch = iwl5000_hw_channel_switch, - .nic_config = iwl5000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - }, - .temperature = iwlagn_temperature, -}; - -static struct iwl_lib_ops iwl5150_lib = { - .set_hw_params = iwl5150_hw_set_hw_params, - .set_channel_switch = iwl5000_hw_channel_switch, - .nic_config = iwl5000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - }, - .temperature = iwl5150_temperature, -}; - static const struct iwl_base_params iwl5000_base_params = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .led_compensation = 51, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .wd_timeout = IWL_LONG_WD_TIMEOUT, + .wd_timeout = IWL_WATCHHDOG_DISABLED, .max_event_log_size = 512, .no_idle_support = true, - .wd_disable = true, }; static const struct iwl_ht_params iwl5000_ht_params = { @@ -332,11 +76,11 @@ static const struct iwl_ht_params iwl5000_ht_params = { .ucode_api_max = IWL5000_UCODE_API_MAX, \ .ucode_api_ok = IWL5000_UCODE_API_OK, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ - .lib = &iwl5000_lib, \ .base_params = &iwl5000_base_params, \ .led_mode = IWL_LED_BLINK @@ -378,11 +122,11 @@ const struct iwl_cfg iwl5350_agn_cfg = { .ucode_api_max = IWL5000_UCODE_API_MAX, .ucode_api_ok = IWL5000_UCODE_API_OK, .ucode_api_min = IWL5000_UCODE_API_MIN, + .device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, .max_data_size = IWLAGN_RTC_DATA_SIZE, .eeprom_ver = EEPROM_5050_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, - .lib = &iwl5000_lib, .base_params = &iwl5000_base_params, .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, @@ -394,11 +138,11 @@ const struct iwl_cfg iwl5350_agn_cfg = { .ucode_api_max = IWL5150_UCODE_API_MAX, \ .ucode_api_ok = IWL5150_UCODE_API_OK, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ - .lib = &iwl5150_lib, \ .base_params = &iwl5000_base_params, \ .no_xtal_calib = true, \ .led_mode = IWL_LED_BLINK, \ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index f0c91505a7f..381b02cf339 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -24,26 +24,12 @@ * *****************************************************************************/ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <net/mac80211.h> -#include <linux/etherdevice.h> -#include <asm/unaligned.h> #include <linux/stringify.h> - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn.h" -#include "iwl-agn-hw.h" -#include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-config.h" #include "iwl-cfg.h" +#include "iwl-agn-hw.h" +#include "iwl-commands.h" /* needed for BT for now */ /* Highest firmware API version supported */ #define IWL6000_UCODE_API_MAX 6 @@ -61,6 +47,20 @@ #define IWL6050_UCODE_API_MIN 4 #define IWL6000G2_UCODE_API_MIN 4 +/* EEPROM versions */ +#define EEPROM_6000_TX_POWER_VERSION (4) +#define EEPROM_6000_EEPROM_VERSION (0x423) +#define EEPROM_6050_TX_POWER_VERSION (4) +#define EEPROM_6050_EEPROM_VERSION (0x532) +#define EEPROM_6150_TX_POWER_VERSION (6) +#define EEPROM_6150_EEPROM_VERSION (0x553) +#define EEPROM_6005_TX_POWER_VERSION (6) +#define EEPROM_6005_EEPROM_VERSION (0x709) +#define EEPROM_6030_TX_POWER_VERSION (6) +#define EEPROM_6030_EEPROM_VERSION (0x709) +#define EEPROM_6035_TX_POWER_VERSION (6) +#define EEPROM_6035_EEPROM_VERSION (0x753) + #define IWL6000_FW_PRE "iwlwifi-6000-" #define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" @@ -73,205 +73,9 @@ #define IWL6030_FW_PRE "iwlwifi-6000g2b-" #define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" -static void iwl6000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD; - hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -static void iwl6050_additional_nic_config(struct iwl_priv *priv) -{ - /* Indicate calibration version to uCode. */ - if (iwl_eeprom_calib_version(priv->shrd) >= 6) - iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); -} - -static void iwl6150_additional_nic_config(struct iwl_priv *priv) -{ - /* Indicate calibration version to uCode. */ - if (iwl_eeprom_calib_version(priv->shrd) >= 6) - iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); - iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_6050_1x2); -} - -static void iwl6000i_additional_nic_config(struct iwl_priv *priv) -{ - /* 2x2 IPA phy type */ - iwl_write32(trans(priv), CSR_GP_DRIVER_REG, - CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); -} - -/* NIC configuration for 6000 series */ -static void iwl6000_nic_config(struct iwl_priv *priv) -{ - iwl_rf_config(priv); - - /* do additional nic configuration if needed */ - if (cfg(priv)->additional_nic_config) - cfg(priv)->additional_nic_config(priv); -} - -static const struct iwl_sensitivity_ranges iwl6000_sensitivity = { - .min_nrg_cck = 110, - .auto_corr_min_ofdm = 80, - .auto_corr_min_ofdm_mrc = 128, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 192, - - .auto_corr_max_ofdm = 145, - .auto_corr_max_ofdm_mrc = 232, - .auto_corr_max_ofdm_x1 = 110, - .auto_corr_max_ofdm_mrc_x1 = 232, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 175, - .auto_corr_min_cck_mrc = 160, - .auto_corr_max_cck_mrc = 310, - .nrg_th_cck = 110, - .nrg_th_ofdm = 110, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 336, - .nrg_th_cca = 62, -}; - -static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) -{ - hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) | - BIT(IEEE80211_BAND_5GHZ); - - hw_params(priv).tx_chains_num = - num_of_ant(hw_params(priv).valid_tx_ant); - if (cfg(priv)->rx_with_siso_diversity) - hw_params(priv).rx_chains_num = 1; - else - hw_params(priv).rx_chains_num = - num_of_ant(hw_params(priv).valid_rx_ant); - - iwl6000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - hw_params(priv).sens = &iwl6000_sensitivity; - -} - -static int iwl6000_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl6000_channel_switch_cmd cmd; - const struct iwl_channel_info *ch_info; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - struct iwl_host_cmd hcmd = { - .id = REPLY_CHANNEL_SWITCH, - .len = { sizeof(cmd), }, - .flags = CMD_SYNC, - .data = { &cmd, }, - }; - - cmd.band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->channel->hw_value; - IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", - ctx->active.channel, ch); - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); - ch_info = iwl_get_channel_info(priv, priv->band, ch); - if (ch_info) - cmd.expect_beacon = is_channel_radar(ch_info); - else { - IWL_ERR(priv, "invalid channel switch from %u to %u\n", - ctx->active.channel, ch); - return -EFAULT; - } - - return iwl_dvm_send_cmd(priv, &hcmd); -} - -static struct iwl_lib_ops iwl6000_lib = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - .enhanced_txpower = true, - }, - .temperature = iwlagn_temperature, -}; - -static struct iwl_lib_ops iwl6030_lib = { - .set_hw_params = iwl6000_hw_set_hw_params, - .set_channel_switch = iwl6000_hw_channel_switch, - .nic_config = iwl6000_nic_config, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - .enhanced_txpower = true, - }, - .temperature = iwlagn_temperature, -}; - static const struct iwl_base_params iwl6000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, @@ -288,7 +92,6 @@ static const struct iwl_base_params iwl6000_base_params = { static const struct iwl_base_params iwl6050_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, @@ -305,7 +108,6 @@ static const struct iwl_base_params iwl6050_base_params = { static const struct iwl_base_params iwl6000_g2_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, @@ -338,11 +140,11 @@ static const struct iwl_bt_params iwl6000_bt_params = { .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6005, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ - .lib = &iwl6000_lib, \ .base_params = &iwl6000_g2_base_params, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE @@ -392,11 +194,11 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .lib = &iwl6030_lib, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ .need_temp_offset_calib = true, \ @@ -463,14 +265,13 @@ const struct iwl_cfg iwl130_bg_cfg = { .ucode_api_max = IWL6000_UCODE_API_MAX, \ .ucode_api_ok = IWL6000_UCODE_API_OK, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6000i, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ - .lib = &iwl6000_lib, \ - .additional_nic_config = iwl6000i_additional_nic_config,\ .base_params = &iwl6000_base_params, \ .led_mode = IWL_LED_BLINK @@ -494,12 +295,11 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6050, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ - .lib = &iwl6000_lib, \ - .additional_nic_config = iwl6050_additional_nic_config, \ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ @@ -521,10 +321,9 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_6150, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ - .lib = &iwl6000_lib, \ - .additional_nic_config = iwl6150_additional_nic_config, \ .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ @@ -548,11 +347,11 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .ucode_api_max = IWL6000_UCODE_API_MAX, .ucode_api_ok = IWL6000_UCODE_API_OK, .ucode_api_min = IWL6000_UCODE_API_MIN, + .device_family = IWL_DEVICE_FAMILY_6000, .max_inst_size = IWL60_RTC_INST_SIZE, .max_data_size = IWL60_RTC_DATA_SIZE, .eeprom_ver = EEPROM_6000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, - .lib = &iwl6000_lib, .base_params = &iwl6000_base_params, .ht_params = &iwl6000_ht_params, .led_mode = IWL_LED_BLINK, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index 84cbe7bb504..95f27f1a423 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -64,7 +64,6 @@ #include <net/mac80211.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-agn-calib.h" #include "iwl-trans.h" #include "iwl-agn.h" @@ -190,7 +189,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv, u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; data = &(priv->sensitivity_data); @@ -373,7 +372,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv, u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; data = &(priv->sensitivity_data); @@ -521,7 +520,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - if (cfg(priv)->base_params->hd_v2) { + if (priv->cfg->base_params->hd_v2) { cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = @@ -597,9 +596,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv) int ret = 0; int i; struct iwl_sensitivity_data *data = NULL; - const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; - if (priv->disable_sens_cal) + if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) return; IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); @@ -663,7 +662,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv) struct statistics_rx_phy *ofdm, *cck; struct statistics_general_data statis; - if (priv->disable_sens_cal) + if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED) return; data = &(priv->sensitivity_data); @@ -833,28 +832,28 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, * To be safe, simply mask out any chains that we know * are not on the device. */ - active_chains &= hw_params(priv).valid_rx_ant; + active_chains &= priv->hw_params.valid_rx_ant; num_tx_chains = 0; for (i = 0; i < NUM_RX_CHAINS; i++) { /* loops on all the bits of * priv->hw_setting.valid_tx_ant */ u8 ant_msk = (1 << i); - if (!(hw_params(priv).valid_tx_ant & ant_msk)) + if (!(priv->hw_params.valid_tx_ant & ant_msk)) continue; num_tx_chains++; if (data->disconn_array[i] == 0) /* there is a Tx antenna connected */ break; - if (num_tx_chains == hw_params(priv).tx_chains_num && + if (num_tx_chains == priv->hw_params.tx_chains_num && data->disconn_array[i]) { /* * If all chains are disconnected * connect the first valid tx chain */ first_chain = - find_first_chain(hw_params(priv).valid_tx_ant); + find_first_chain(priv->hw_params.valid_tx_ant); data->disconn_array[first_chain] = 0; active_chains |= BIT(first_chain); IWL_DEBUG_CALIB(priv, @@ -864,13 +863,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, } } - if (active_chains != hw_params(priv).valid_rx_ant && + if (active_chains != priv->hw_params.valid_rx_ant && active_chains != priv->chain_noise_data.active_chains) IWL_DEBUG_CALIB(priv, "Detected that not all antennas are connected! " "Connected: %#x, valid: %#x.\n", active_chains, - hw_params(priv).valid_rx_ant); + priv->hw_params.valid_rx_ant); /* Save for use within RXON, TX, SCAN commands, etc. */ data->active_chains = active_chains; @@ -895,7 +894,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, continue; } - delta_g = (cfg(priv)->base_params->chain_noise_scale * + delta_g = (priv->cfg->base_params->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; @@ -970,7 +969,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - if (priv->disable_chain_noise_cal) + if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) return; data = &(priv->chain_noise_data); @@ -1051,11 +1050,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) return; /* Analyze signal for disconnected antenna */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* Disable disconnected antenna algorithm for advanced bt coex, assuming valid antennas are connected */ - data->active_chains = hw_params(priv).valid_rx_ant; + data->active_chains = priv->hw_params.valid_rx_ant; for (i = 0; i < NUM_RX_CHAINS; i++) if (!(data->active_chains & (1<<i))) data->disconn_array[i] = 1; @@ -1085,7 +1084,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) min_average_noise, min_average_noise_antenna_i); iwlagn_gain_computation(priv, average_noise, - find_first_chain(hw_params(priv).valid_rx_ant)); + find_first_chain(priv->hw_params.valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h index 9ed6683314a..dbe13787f27 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h @@ -63,7 +63,6 @@ #define __iwl_calib_h__ #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-commands.h" void iwl_chain_noise_calibration(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c new file mode 100644 index 00000000000..48533b3a0f9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c @@ -0,0 +1,755 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * DVM device-specific data & functions + */ +#include "iwl-agn.h" +#include "iwl-dev.h" +#include "iwl-commands.h" +#include "iwl-io.h" +#include "iwl-prph.h" + +/* + * 1000 series + * =========== + */ + +/* + * For 1000, use advance thermal throttling critical temperature threshold, + * but legacy thermal management implementation for now. + * This is for the reason of 1000 uCode using advance thermal throttling API + * but not implement ct_kill_exit based on ct_kill exit temperature + * so the thermal throttling will still based on legacy thermal throttling + * management. + * The code here need to be modified once 1000 uCode has the advanced thermal + * throttling algorithm in place + */ +static void iwl1000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 1000 series */ +static void iwl1000_nic_config(struct iwl_priv *priv) +{ + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + /* Setting digital SVR for 1000 card to 1.32V */ + /* locking is acquired in iwl_set_bits_mask_prph() function */ + iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG, + APMG_SVR_DIGITAL_VOLTAGE_1_32, + ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); +} + +/** + * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, + u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >> + IWLAGN_EXT_BEACON_TIME_POS); + rem = (usec % interval) & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + + return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + u32 addon_low = addon & iwl_beacon_time_mask_low(priv, + IWLAGN_EXT_BEACON_TIME_POS); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_beacon_time_mask_high(priv, + IWLAGN_EXT_BEACON_TIME_POS)) + + (addon & iwl_beacon_time_mask_high(priv, + IWLAGN_EXT_BEACON_TIME_POS)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << IWLAGN_EXT_BEACON_TIME_POS); + } else + res += (1 << IWLAGN_EXT_BEACON_TIME_POS); + + return cpu_to_le32(res); +} + +static const struct iwl_sensitivity_ranges iwl1000_sensitivity = { + .min_nrg_cck = 95, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 120, + .auto_corr_min_ofdm_mrc_x1 = 240, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 155, + .auto_corr_max_ofdm_mrc_x1 = 290, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ); + + priv->hw_params.tx_chains_num = + num_of_ant(priv->hw_params.valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) + priv->hw_params.rx_chains_num = 1; + else + priv->hw_params.rx_chains_num = + num_of_ant(priv->hw_params.valid_rx_ant); + + iwl1000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl1000_sensitivity; +} + +struct iwl_lib_ops iwl1000_lib = { + .set_hw_params = iwl1000_hw_set_hw_params, + .nic_config = iwl1000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + }, + .temperature = iwlagn_temperature, +}; + + +/* + * 2000 series + * =========== + */ + +static void iwl2000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 2000 series */ +static void iwl2000_nic_config(struct iwl_priv *priv) +{ + iwl_rf_config(priv); + + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); +} + +static const struct iwl_sensitivity_ranges iwl2000_sensitivity = { + .min_nrg_cck = 97, + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 97, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ); + + priv->hw_params.tx_chains_num = + num_of_ant(priv->hw_params.valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) + priv->hw_params.rx_chains_num = 1; + else + priv->hw_params.rx_chains_num = + num_of_ant(priv->hw_params.valid_rx_ant); + + iwl2000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl2000_sensitivity; +} + +struct iwl_lib_ops iwl2000_lib = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .enhanced_txpower = true, + }, + .temperature = iwlagn_temperature, +}; + +struct iwl_lib_ops iwl2030_lib = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .enhanced_txpower = true, + }, + .temperature = iwlagn_temperature, +}; + +/* + * 5000 series + * =========== + */ + +/* NIC configuration for 5000 series */ +static void iwl5000_nic_config(struct iwl_priv *priv) +{ + iwl_rf_config(priv); + + /* W/A : NIC is stuck in a reset state after Early PCIe power off + * (PCIe power is lost before PERST# is asserted), + * causing ME FW to lose ownership and not being able to obtain it back. + */ + iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, + ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); +} + +static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { + .min_nrg_cck = 100, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 120, + .auto_corr_max_ofdm_mrc_x1 = 240, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static struct iwl_sensitivity_ranges iwl5150_sensitivity = { + .min_nrg_cck = 95, + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + /* max = min for performance bug in 5150 DSP */ + .auto_corr_max_ofdm_x1 = 105, + .auto_corr_max_ofdm_mrc_x1 = 220, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) + +static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) +{ + u16 temperature, voltage; + __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv, + EEPROM_KELVIN_TEMPERATURE); + + temperature = le16_to_cpu(temp_calib[0]); + voltage = le16_to_cpu(temp_calib[1]); + + /* offset = temp - volt / coeff */ + return (s32)(temperature - + voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); +} + +static void iwl5150_set_ct_threshold(struct iwl_priv *priv) +{ + const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; + s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - + iwl_temp_calib_to_offset(priv); + + priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef; +} + +static void iwl5000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; +} + +static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.tx_chains_num = + num_of_ant(priv->hw_params.valid_tx_ant); + priv->hw_params.rx_chains_num = + num_of_ant(priv->hw_params.valid_rx_ant); + + iwl5000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl5000_sensitivity; +} + +static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.tx_chains_num = + num_of_ant(priv->hw_params.valid_tx_ant); + priv->hw_params.rx_chains_num = + num_of_ant(priv->hw_params.valid_rx_ant); + + iwl5150_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl5150_sensitivity; +} + +static void iwl5150_temperature(struct iwl_priv *priv) +{ + u32 vt = 0; + s32 offset = iwl_temp_calib_to_offset(priv); + + vt = le32_to_cpu(priv->statistics.common.temperature); + vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; + /* now vt hold the temperature in Kelvin */ + priv->temperature = KELVIN_TO_CELSIUS(vt); + iwl_tt_handler(priv); +} + +static int iwl5000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl5000_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = { sizeof(cmd), }, + .flags = CMD_SYNC, + .data = { &cmd, }, + }; + + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + ch = ch_switch->channel->hw_value; + IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", + ctx->active.channel, ch); + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + + return iwl_dvm_send_cmd(priv, &hcmd); +} + +struct iwl_lib_ops iwl5000_lib = { + .set_hw_params = iwl5000_hw_set_hw_params, + .set_channel_switch = iwl5000_hw_channel_switch, + .nic_config = iwl5000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + }, + .temperature = iwlagn_temperature, +}; + +struct iwl_lib_ops iwl5150_lib = { + .set_hw_params = iwl5150_hw_set_hw_params, + .set_channel_switch = iwl5000_hw_channel_switch, + .nic_config = iwl5000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + }, + .temperature = iwl5150_temperature, +}; + + + +/* + * 6000 series + * =========== + */ + +static void iwl6000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 6000 series */ +static void iwl6000_nic_config(struct iwl_priv *priv) +{ + iwl_rf_config(priv); + + switch (priv->cfg->device_family) { + case IWL_DEVICE_FAMILY_6005: + case IWL_DEVICE_FAMILY_6030: + case IWL_DEVICE_FAMILY_6000: + break; + case IWL_DEVICE_FAMILY_6000i: + /* 2x2 IPA phy type */ + iwl_write32(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); + break; + case IWL_DEVICE_FAMILY_6050: + /* Indicate calibration version to uCode. */ + if (iwl_eeprom_calib_version(priv) >= 6) + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); + break; + case IWL_DEVICE_FAMILY_6150: + /* Indicate calibration version to uCode. */ + if (iwl_eeprom_calib_version(priv) >= 6) + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); + iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_6050_1x2); + break; + default: + WARN_ON(1); + } +} + +static const struct iwl_sensitivity_ranges iwl6000_sensitivity = { + .min_nrg_cck = 110, + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 110, + .nrg_th_ofdm = 110, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 336, + .nrg_th_cca = 62, +}; + +static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.tx_chains_num = + num_of_ant(priv->hw_params.valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) + priv->hw_params.rx_chains_num = 1; + else + priv->hw_params.rx_chains_num = + num_of_ant(priv->hw_params.valid_rx_ant); + + iwl6000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + priv->hw_params.sens = &iwl6000_sensitivity; + +} + +static int iwl6000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl6000_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = { sizeof(cmd), }, + .flags = CMD_SYNC, + .data = { &cmd, }, + }; + + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + ch = ch_switch->channel->hw_value; + IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", + ctx->active.channel, ch); + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + + return iwl_dvm_send_cmd(priv, &hcmd); +} + +struct iwl_lib_ops iwl6000_lib = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + .enhanced_txpower = true, + }, + .temperature = iwlagn_temperature, +}; + +struct iwl_lib_ops iwl6030_lib = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + .enhanced_txpower = true, + }, + .temperature = iwlagn_temperature, +}; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h index d0ec0abd3c8..7960a52f6ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h @@ -102,10 +102,18 @@ /* EEPROM */ #define IWLAGN_EEPROM_IMG_SIZE 2048 +/* OTP */ +/* lower blocks contain EEPROM image and calibration data */ +#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ +/* high blocks contain PAPD data */ +#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ +#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ +#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ +#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ +#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ +#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ + -#define IWLAGN_CMD_FIFO_NUM 7 #define IWLAGN_NUM_QUEUES 20 -#define IWLAGN_NUM_AMPDU_QUEUES 9 -#define IWLAGN_FIRST_AMPDU_QUEUE 11 #endif /* __iwl_agn_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 56f41c9409d..01dc4426731 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -33,12 +33,11 @@ #include <linux/sched.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-hw.h" #include "iwl-agn.h" #include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-modparams.h" int iwlagn_hw_valid_rtc_data_addr(u32 addr) { @@ -94,81 +93,6 @@ void iwlagn_temperature(struct iwl_priv *priv) iwl_tt_handler(priv); } -u16 iwl_eeprom_calib_version(struct iwl_shared *shrd) -{ - struct iwl_eeprom_calib_hdr *hdr; - - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd, - EEPROM_CALIB_ALL); - return hdr->version; - -} - -/* - * EEPROM - */ -static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address) -{ - u16 offset = 0; - - if ((address & INDIRECT_ADDRESS) == 0) - return address; - - switch (address & INDIRECT_TYPE_MSK) { - case INDIRECT_HOST: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST); - break; - case INDIRECT_GENERAL: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL); - break; - case INDIRECT_REGULATORY: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY); - break; - case INDIRECT_TXP_LIMIT: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT); - break; - case INDIRECT_TXP_LIMIT_SIZE: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE); - break; - case INDIRECT_CALIBRATION: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION); - break; - case INDIRECT_PROCESS_ADJST: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST); - break; - case INDIRECT_OTHERS: - offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS); - break; - default: - IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n", - address & INDIRECT_TYPE_MSK); - break; - } - - /* translate the offset from words to byte */ - return (address & ADDRESS_MSK) + (offset << 1); -} - -const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset) -{ - u32 address = eeprom_indirect_address(shrd, offset); - BUG_ON(address >= shrd->cfg->base_params->eeprom_size); - return &shrd->eeprom[address]; -} - -struct iwl_mod_params iwlagn_mod_params = { - .amsdu_size_8K = 1, - .restart_fw = 1, - .plcp_check = true, - .bt_coex_active = true, - .no_sleep_autoadjust = true, - .power_level = IWL_POWER_INDEX_1, - .bt_ch_announce = true, - .wanted_ucode_alternative = 1, - .auto_agg = true, - /* the rest are 0 by default */ -}; - int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) { int idx = 0; @@ -228,13 +152,13 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; if ((flush_control & BIT(IWL_RXON_CTX_PAN)) && - (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))) + (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | IWL_PAN_SCD_MULTICAST_MSK; - if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) + if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", @@ -253,7 +177,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) goto done; } IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(trans(priv)); + iwl_trans_wait_tx_queue_empty(priv->trans); done: ieee80211_wake_queues(priv->hw); mutex_unlock(&priv->mutex); @@ -262,76 +186,8 @@ done: /* * BT coex */ -/* - * Macros to access the lookup table. - * - * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req, -* wifi_prio, wifi_txrx and wifi_sh_ant_req. - * - * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH - * - * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits - * one after another in 32-bit registers, and "registers" 0 through 7 contain - * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order). - * - * These macros encode that format. - */ -#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \ - wifi_txrx, wifi_sh_ant_req) \ - (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \ - (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6)) - -#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \ - lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f))) -#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \ - bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \ - wifi_sh_ant_req)))) -#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \ - bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \ - wifi_sh_ant_req)) -#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, \ - wifi_sh_ant_req) \ - LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \ - bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \ - wifi_sh_ant_req)) - -#define LUT_WLAN_KILL_OP(lut, op, val) \ - lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e))) -#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req)))) -#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req)) -#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req)) - -#define LUT_ANT_SWITCH_OP(lut, op, val) \ - lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1))) -#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, \ - wifi_sh_ant_req)))) -#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req)) -#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \ - wifi_prio, wifi_txrx, wifi_sh_ant_req) \ - LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \ - wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req)) - -static const __le32 iwlagn_def_3w_lookup[12] = { +/* Notmal TDM */ +static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaeaaaaaa), @@ -346,7 +202,25 @@ static const __le32 iwlagn_def_3w_lookup[12] = { cpu_to_le32(0xf0005000), }; -static const __le32 iwlagn_concurrent_lookup[12] = { + +/* Loose Coex */ +static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xaeaaaaaa), + cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xcc00ff28), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0xcc00aaaa), + cpu_to_le32(0x0000aaaa), + cpu_to_le32(0x00000000), + cpu_to_le32(0x00000000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), +}; + +/* Full concurrency */ +static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), @@ -369,24 +243,30 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, }; - struct iwl6000_bt_cmd bt_cmd_6000; - struct iwl2000_bt_cmd bt_cmd_2000; + struct iwl_bt_cmd_v1 bt_cmd_v1; + struct iwl_bt_cmd_v2 bt_cmd_v2; int ret; BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != sizeof(basic.bt3_lookup_table)); - if (cfg(priv)->bt_params) { - if (cfg(priv)->bt_params->bt_session_2) { - bt_cmd_2000.prio_boost = cpu_to_le32( - cfg(priv)->bt_params->bt_prio_boost); - bt_cmd_2000.tx_prio_boost = 0; - bt_cmd_2000.rx_prio_boost = 0; + if (priv->cfg->bt_params) { + /* + * newer generation of devices (2000 series and newer) + * use the version 2 of the bt command + * we need to make sure sending the host command + * with correct data structure to avoid uCode assert + */ + if (priv->cfg->bt_params->bt_session_2) { + bt_cmd_v2.prio_boost = cpu_to_le32( + priv->cfg->bt_params->bt_prio_boost); + bt_cmd_v2.tx_prio_boost = 0; + bt_cmd_v2.rx_prio_boost = 0; } else { - bt_cmd_6000.prio_boost = - cfg(priv)->bt_params->bt_prio_boost; - bt_cmd_6000.tx_prio_boost = 0; - bt_cmd_6000.rx_prio_boost = 0; + bt_cmd_v1.prio_boost = + priv->cfg->bt_params->bt_prio_boost; + bt_cmd_v1.tx_prio_boost = 0; + bt_cmd_v1.rx_prio_boost = 0; } } else { IWL_ERR(priv, "failed to construct BT Coex Config\n"); @@ -395,6 +275,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) basic.kill_ack_mask = priv->kill_ack_mask; basic.kill_cts_mask = priv->kill_cts_mask; + basic.reduce_txpower = priv->reduced_txpower; basic.valid = priv->bt_valid; /* @@ -403,7 +284,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) * (might be in monitor mode), or the interface is in * IBSS mode (no proper uCode support for coex then). */ - if (!iwlagn_mod_params.bt_coex_active || + if (!iwlwifi_mod_params.bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; } else { @@ -432,16 +313,16 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (cfg(priv)->bt_params->bt_session_2) { - memcpy(&bt_cmd_2000.basic, &basic, + if (priv->cfg->bt_params->bt_session_2) { + memcpy(&bt_cmd_v2.basic, &basic, sizeof(basic)); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); + CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2); } else { - memcpy(&bt_cmd_6000.basic, &basic, + memcpy(&bt_cmd_v1.basic, &basic, sizeof(basic)); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); + CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1); } if (ret) IWL_ERR(priv, "failed to send BT Coex Config\n"); @@ -615,7 +496,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, struct iwl_bt_uart_msg *uart_msg) { IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " - "Update Req = 0x%X", + "Update Req = 0x%X\n", (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> BT_UART_MSG_FRAME1MSGTYPE_POS, (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >> @@ -624,7 +505,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, BT_UART_MSG_FRAME1UPDATEREQ_POS); IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " - "Chl_SeqN = 0x%X, In band = 0x%X", + "Chl_SeqN = 0x%X, In band = 0x%X\n", (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >> @@ -635,7 +516,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, BT_UART_MSG_FRAME2INBAND_POS); IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " - "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X", + "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n", (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> BT_UART_MSG_FRAME3SCOESCO_POS, (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >> @@ -649,12 +530,12 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> BT_UART_MSG_FRAME3OBEX_POS); - IWL_DEBUG_COEX(priv, "Idle duration = 0x%X", + IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n", (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> BT_UART_MSG_FRAME4IDLEDURATION_POS); IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " - "eSCO Retransmissions = 0x%X", + "eSCO Retransmissions = 0x%X\n", (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> BT_UART_MSG_FRAME5TXACTIVITY_POS, (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >> @@ -662,14 +543,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); - IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X", + IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n", (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> BT_UART_MSG_FRAME6DISCOVERABLE_POS); IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " - "0x%X, Inquiry = 0x%X, Connectable = 0x%X", + "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n", (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> @@ -680,29 +561,62 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, BT_UART_MSG_FRAME7CONNECTABLE_POS); } -static void iwlagn_set_kill_msk(struct iwl_priv *priv, +static bool iwlagn_set_kill_msk(struct iwl_priv *priv, struct iwl_bt_uart_msg *uart_msg) { - u8 kill_msk; - static const __le32 bt_kill_ack_msg[2] = { + bool need_update = false; + u8 kill_msk = IWL_BT_KILL_REDUCE; + static const __le32 bt_kill_ack_msg[3] = { IWLAGN_BT_KILL_ACK_MASK_DEFAULT, - IWLAGN_BT_KILL_ACK_CTS_MASK_SCO }; - static const __le32 bt_kill_cts_msg[2] = { + IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, + IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; + static const __le32 bt_kill_cts_msg[3] = { IWLAGN_BT_KILL_CTS_MASK_DEFAULT, - IWLAGN_BT_KILL_ACK_CTS_MASK_SCO }; + IWLAGN_BT_KILL_ACK_CTS_MASK_SCO, + IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE}; - kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) - ? 1 : 0; + if (!priv->reduced_txpower) + kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) + ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT; if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] || priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) { priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK; priv->kill_ack_mask = bt_kill_ack_msg[kill_msk]; priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK; priv->kill_cts_mask = bt_kill_cts_msg[kill_msk]; + need_update = true; + } + return need_update; +} - /* schedule to send runtime bt_config */ - queue_work(priv->workqueue, &priv->bt_runtime_config); +static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv, + struct iwl_bt_uart_msg *uart_msg) +{ + bool need_update = false; + + if (!priv->reduced_txpower && + !iwl_is_associated(priv, IWL_RXON_CTX_PAN) && + (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | + BT_UART_MSG_FRAME3OBEX_MSK)) && + !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | + BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) { + /* enabling reduced tx power */ + priv->reduced_txpower = true; + priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR; + need_update = true; + } else if (priv->reduced_txpower && + (iwl_is_associated(priv, IWL_RXON_CTX_PAN) || + (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK | + BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) || + !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK | + BT_UART_MSG_FRAME3OBEX_MSK)))) { + /* disable reduced tx power */ + priv->reduced_txpower = false; + priv->bt_valid &= ~IWLAGN_BT_VALID_REDUCED_TX_PWR; + need_update = true; } + + return need_update; } int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, @@ -750,7 +664,12 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, } } - iwlagn_set_kill_msk(priv, uart_msg); + /* schedule to send runtime bt_config */ + /* check reduce power before change ack/cts kill mask */ + if (iwlagn_fill_txpower_mode(priv, uart_msg) || + iwlagn_set_kill_msk(priv, uart_msg)) + queue_work(priv->workqueue, &priv->bt_runtime_config); + /* FIXME: based on notification, adjust the prio_boost */ @@ -798,8 +717,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv) */ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) { - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -856,7 +775,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap) void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { bool is_single = is_single_rx_stream(priv); - bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; u32 active_chains; u16 rx_chain; @@ -868,10 +787,10 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) if (priv->chain_noise_data.active_chains) active_chains = priv->chain_noise_data.active_chains; else - active_chains = hw_params(priv).valid_rx_ant; + active_chains = priv->hw_params.valid_rx_ant; - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -1190,7 +1109,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) memcpy(&rxon, &ctx->active, sizeof(rxon)); priv->ucode_loaded = false; - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(priv->trans); priv->wowlan = true; @@ -1212,7 +1131,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) if (ret) goto out; - if (!iwlagn_mod_params.sw_crypto) { + if (!iwlwifi_mod_params.sw_crypto) { /* mark all keys clear */ priv->ucode_key_table = 0; ctx->key_mapping_keys = 0; @@ -1298,6 +1217,12 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return -EIO; } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + iwl_dvm_get_cmd_string(cmd->id)); + return -EIO; + } + /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two @@ -1312,7 +1237,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return -EIO; } - return iwl_trans_send_cmd(trans(priv), cmd); + return iwl_trans_send_cmd(priv->trans, cmd); } int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 7e590b349dd..51e1a69ffdd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -36,9 +36,9 @@ #include <linux/workqueue.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-agn.h" #include "iwl-op-mode.h" +#include "iwl-modparams.h" #define RS_NAME "iwl-agn-rs" @@ -420,7 +420,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, load = rs_tl_get_load(lq_data, tid); - if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { + if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); ret = ieee80211_start_tx_ba_session(sta, tid, 5000); @@ -819,7 +819,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, if (num_of_ant(tbl->ant_type) > 1) tbl->ant_type = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); tbl->is_ht40 = 0; tbl->is_SGI = 0; @@ -969,7 +969,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || - (tbl_type.ant_type != info->antenna_sel_tx) || + (tbl_type.ant_type != info->status.antenna) || (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || (rs_index != mac_index)) { @@ -1085,7 +1085,7 @@ done: (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) + if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -1291,7 +1291,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv, return -1; /* Need both Tx chains/antennas to support MIMO */ - if (hw_params(priv).tx_chains_num < 2) + if (priv->hw_params.tx_chains_num < 2) return -1; IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); @@ -1347,7 +1347,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv, return -1; /* Need both Tx chains/antennas to support MIMO */ - if (hw_params(priv).tx_chains_num < 3) + if (priv->hw_params.tx_chains_num < 3) return -1; IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); @@ -1446,8 +1446,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv, u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; - u8 valid_tx_ant = hw_params(priv).valid_tx_ant; - u8 tx_chains_num = hw_params(priv).tx_chains_num; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; int ret = 0; u8 update_search_tbl_counter = 0; @@ -1464,7 +1464,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv, case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: /* avoid antenna B and MIMO */ valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && tbl->action != IWL_LEGACY_SWITCH_SISO) tbl->action = IWL_LEGACY_SWITCH_SISO; @@ -1488,7 +1488,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv, else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) tbl->action = IWL_LEGACY_SWITCH_SISO; valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); } start_action = tbl->action; @@ -1622,8 +1622,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; - u8 valid_tx_ant = hw_params(priv).valid_tx_ant; - u8 tx_chains_num = hw_params(priv).tx_chains_num; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; u8 update_search_tbl_counter = 0; int ret; @@ -1640,7 +1640,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: /* avoid antenna B and MIMO */ valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) tbl->action = IWL_SISO_SWITCH_ANTENNA1; break; @@ -1658,7 +1658,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, /* configure as 1x1 if bt full concurrency */ if (priv->bt_full_concurrent) { valid_tx_ant = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) tbl->action = IWL_SISO_SWITCH_ANTENNA1; } @@ -1794,8 +1794,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv, u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; - u8 valid_tx_ant = hw_params(priv).valid_tx_ant; - u8 tx_chains_num = hw_params(priv).tx_chains_num; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; u8 update_search_tbl_counter = 0; int ret; @@ -1964,8 +1964,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv, u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; - u8 valid_tx_ant = hw_params(priv).valid_tx_ant; - u8 tx_chains_num = hw_params(priv).tx_chains_num; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; int ret; u8 update_search_tbl_counter = 0; @@ -2166,7 +2166,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) (lq_sta->total_success > lq_sta->max_success_limit) || ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) && (flush_interval_passed))) { - IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n", lq_sta->total_failed, lq_sta->total_success, flush_interval_passed); @@ -2698,7 +2698,7 @@ static void rs_initialize_lq(struct iwl_priv *priv, i = lq_sta->last_txrate_idx; - valid_tx_ant = hw_params(priv).valid_tx_ant; + valid_tx_ant = priv->hw_params.valid_tx_ant; if (!lq_sta->search_better_tbl) active_tbl = lq_sta->active_tbl; @@ -2826,6 +2826,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i struct iwl_station_priv *sta_priv; struct iwl_lq_sta *lq_sta; struct ieee80211_supported_band *sband; + unsigned long supp; /* must be unsigned long for for_each_set_bit */ sta_priv = (struct iwl_station_priv *) sta->drv_priv; lq_sta = &sta_priv->lq_sta; @@ -2855,8 +2856,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i lq_sta->max_rate_idx = -1; lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; lq_sta->is_green = rs_use_green(sta); - lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); - lq_sta->band = priv->band; + lq_sta->band = sband->band; + /* + * active legacy rates as per supported rates bitmap + */ + supp = sta->supp_rates[sband->band]; + lq_sta->active_legacy_rate = 0; + for_each_set_bit(i, &supp, BITS_PER_LONG) + lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); + /* * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), * supp_rates[] does not; shift to convert format, force 9 MBits off. @@ -2884,15 +2892,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i /* These values will be overridden later */ lq_sta->lq.general_params.single_stream_ant_msk = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); lq_sta->lq.general_params.dual_stream_ant_msk = - hw_params(priv).valid_tx_ant & - ~first_antenna(hw_params(priv).valid_tx_ant); + priv->hw_params.valid_tx_ant & + ~first_antenna(priv->hw_params.valid_tx_ant); if (!lq_sta->lq.general_params.dual_stream_ant_msk) { lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; - } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) { + } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { lq_sta->lq.general_params.dual_stream_ant_msk = - hw_params(priv).valid_tx_ant; + priv->hw_params.valid_tx_ant; } /* as default allow aggregation for all tids */ @@ -2938,7 +2946,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, if (priv && priv->bt_full_concurrent) { /* 1x1 only */ tbl_type.ant_type = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); } /* How many times should we repeat the initial rate? */ @@ -2970,7 +2978,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, if (priv->bt_full_concurrent) valid_tx_ant = ANT_A; else - valid_tx_ant = hw_params(priv).valid_tx_ant; + valid_tx_ant = priv->hw_params.valid_tx_ant; } /* Fill rest of rate table */ @@ -3004,7 +3012,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, if (priv && priv->bt_full_concurrent) { /* 1x1 only */ tbl_type.ant_type = - first_antenna(hw_params(priv).valid_tx_ant); + first_antenna(priv->hw_params.valid_tx_ant); } /* Indicate to uCode which entries might be MIMO. @@ -3055,11 +3063,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && cfg(priv)->bt_params && - cfg(priv)->bt_params->agg_time_limit && + if (priv && priv->cfg->bt_params && + priv->cfg->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(cfg(priv)->bt_params->agg_time_limit); + cpu_to_le16(priv->cfg->bt_params->agg_time_limit); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) @@ -3091,7 +3099,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, u8 ant_sel_tx; priv = lq_sta->drv; - valid_tx_ant = hw_params(priv).valid_tx_ant; + valid_tx_ant = priv->hw_params.valid_tx_ant; if (lq_sta->dbg_fixed_rate) { ant_sel_tx = ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) @@ -3162,9 +3170,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, desc += sprintf(buff+desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate); desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", - (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "", - (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "", - (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : ""); + (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); desc += sprintf(buff+desc, "lq type %s\n", (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); if (is_Ht(tbl->lq_type)) { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h index 203b1c13c49..82d02e1ae89 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h @@ -30,6 +30,7 @@ #include <net/mac80211.h> #include "iwl-commands.h" +#include "iwl-config.h" struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ @@ -174,32 +175,6 @@ enum { IWL_RATE_11M_IEEE = 22, }; -#define IWL_CCK_BASIC_RATES_MASK \ - (IWL_RATE_1M_MASK | \ - IWL_RATE_2M_MASK) - -#define IWL_CCK_RATES_MASK \ - (IWL_CCK_BASIC_RATES_MASK | \ - IWL_RATE_5M_MASK | \ - IWL_RATE_11M_MASK) - -#define IWL_OFDM_BASIC_RATES_MASK \ - (IWL_RATE_6M_MASK | \ - IWL_RATE_12M_MASK | \ - IWL_RATE_24M_MASK) - -#define IWL_OFDM_RATES_MASK \ - (IWL_OFDM_BASIC_RATES_MASK | \ - IWL_RATE_9M_MASK | \ - IWL_RATE_18M_MASK | \ - IWL_RATE_36M_MASK | \ - IWL_RATE_48M_MASK | \ - IWL_RATE_54M_MASK) - -#define IWL_BASIC_RATES_MASK \ - (IWL_OFDM_BASIC_RATES_MASK | \ - IWL_CCK_BASIC_RATES_MASK) - #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) #define IWL_INVALID_VALUE -1 @@ -306,15 +281,6 @@ enum iwl_table_type { #define is_a_band(tbl) ((tbl) == LQ_A) #define is_g_and(tbl) ((tbl) == LQ_G) -#define ANT_NONE 0x0 -#define ANT_A BIT(0) -#define ANT_B BIT(1) -#define ANT_AB (ANT_A | ANT_B) -#define ANT_C BIT(2) -#define ANT_AC (ANT_A | ANT_C) -#define ANT_BC (ANT_B | ANT_C) -#define ANT_ABC (ANT_AB | ANT_C) - #define IWL_MAX_MCS_DISPLAY_SIZE 12 struct iwl_rate_mcs_info { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c index 22474608a70..403de96f974 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c @@ -34,95 +34,91 @@ #include <asm/unaligned.h> #include "iwl-eeprom.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" -#include "iwl-shared.h" - -const char *get_cmd_string(u8 cmd) -{ - switch (cmd) { - IWL_CMD(REPLY_ALIVE); - IWL_CMD(REPLY_ERROR); - IWL_CMD(REPLY_ECHO); - IWL_CMD(REPLY_RXON); - IWL_CMD(REPLY_RXON_ASSOC); - IWL_CMD(REPLY_QOS_PARAM); - IWL_CMD(REPLY_RXON_TIMING); - IWL_CMD(REPLY_ADD_STA); - IWL_CMD(REPLY_REMOVE_STA); - IWL_CMD(REPLY_REMOVE_ALL_STA); - IWL_CMD(REPLY_TXFIFO_FLUSH); - IWL_CMD(REPLY_WEPKEY); - IWL_CMD(REPLY_TX); - IWL_CMD(REPLY_LEDS_CMD); - IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); - IWL_CMD(COEX_PRIORITY_TABLE_CMD); - IWL_CMD(COEX_MEDIUM_NOTIFICATION); - IWL_CMD(COEX_EVENT_CMD); - IWL_CMD(REPLY_QUIET_CMD); - IWL_CMD(REPLY_CHANNEL_SWITCH); - IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); - IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); - IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); - IWL_CMD(POWER_TABLE_CMD); - IWL_CMD(PM_SLEEP_NOTIFICATION); - IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); - IWL_CMD(REPLY_SCAN_CMD); - IWL_CMD(REPLY_SCAN_ABORT_CMD); - IWL_CMD(SCAN_START_NOTIFICATION); - IWL_CMD(SCAN_RESULTS_NOTIFICATION); - IWL_CMD(SCAN_COMPLETE_NOTIFICATION); - IWL_CMD(BEACON_NOTIFICATION); - IWL_CMD(REPLY_TX_BEACON); - IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); - IWL_CMD(QUIET_NOTIFICATION); - IWL_CMD(REPLY_TX_PWR_TABLE_CMD); - IWL_CMD(MEASURE_ABORT_NOTIFICATION); - IWL_CMD(REPLY_BT_CONFIG); - IWL_CMD(REPLY_STATISTICS_CMD); - IWL_CMD(STATISTICS_NOTIFICATION); - IWL_CMD(REPLY_CARD_STATE_CMD); - IWL_CMD(CARD_STATE_NOTIFICATION); - IWL_CMD(MISSED_BEACONS_NOTIFICATION); - IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); - IWL_CMD(SENSITIVITY_CMD); - IWL_CMD(REPLY_PHY_CALIBRATION_CMD); - IWL_CMD(REPLY_RX_PHY_CMD); - IWL_CMD(REPLY_RX_MPDU_CMD); - IWL_CMD(REPLY_RX); - IWL_CMD(REPLY_COMPRESSED_BA); - IWL_CMD(CALIBRATION_CFG_CMD); - IWL_CMD(CALIBRATION_RES_NOTIFICATION); - IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); - IWL_CMD(REPLY_TX_POWER_DBM_CMD); - IWL_CMD(TEMPERATURE_NOTIFICATION); - IWL_CMD(TX_ANT_CONFIGURATION_CMD); - IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); - IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); - IWL_CMD(REPLY_BT_COEX_PROT_ENV); - IWL_CMD(REPLY_WIPAN_PARAMS); - IWL_CMD(REPLY_WIPAN_RXON); - IWL_CMD(REPLY_WIPAN_RXON_TIMING); - IWL_CMD(REPLY_WIPAN_RXON_ASSOC); - IWL_CMD(REPLY_WIPAN_QOS_PARAM); - IWL_CMD(REPLY_WIPAN_WEPKEY); - IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); - IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); - IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); - IWL_CMD(REPLY_WOWLAN_PATTERNS); - IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER); - IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS); - IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); - IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); - IWL_CMD(REPLY_WOWLAN_GET_STATUS); - IWL_CMD(REPLY_D3_CONFIG); - default: - return "UNKNOWN"; - - } -} +#include "iwl-modparams.h" + +#define IWL_CMD_ENTRY(x) [x] = #x + +const char *iwl_dvm_cmd_strings[REPLY_MAX] = { + IWL_CMD_ENTRY(REPLY_ALIVE), + IWL_CMD_ENTRY(REPLY_ERROR), + IWL_CMD_ENTRY(REPLY_ECHO), + IWL_CMD_ENTRY(REPLY_RXON), + IWL_CMD_ENTRY(REPLY_RXON_ASSOC), + IWL_CMD_ENTRY(REPLY_QOS_PARAM), + IWL_CMD_ENTRY(REPLY_RXON_TIMING), + IWL_CMD_ENTRY(REPLY_ADD_STA), + IWL_CMD_ENTRY(REPLY_REMOVE_STA), + IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA), + IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH), + IWL_CMD_ENTRY(REPLY_WEPKEY), + IWL_CMD_ENTRY(REPLY_TX), + IWL_CMD_ENTRY(REPLY_LEDS_CMD), + IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD), + IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD), + IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION), + IWL_CMD_ENTRY(COEX_EVENT_CMD), + IWL_CMD_ENTRY(REPLY_QUIET_CMD), + IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH), + IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD), + IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION), + IWL_CMD_ENTRY(POWER_TABLE_CMD), + IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION), + IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC), + IWL_CMD_ENTRY(REPLY_SCAN_CMD), + IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD), + IWL_CMD_ENTRY(SCAN_START_NOTIFICATION), + IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION), + IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION), + IWL_CMD_ENTRY(BEACON_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_BEACON), + IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION), + IWL_CMD_ENTRY(QUIET_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD), + IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_BT_CONFIG), + IWL_CMD_ENTRY(REPLY_STATISTICS_CMD), + IWL_CMD_ENTRY(STATISTICS_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD), + IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION), + IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD), + IWL_CMD_ENTRY(SENSITIVITY_CMD), + IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD), + IWL_CMD_ENTRY(REPLY_RX_PHY_CMD), + IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD), + IWL_CMD_ENTRY(REPLY_RX), + IWL_CMD_ENTRY(REPLY_COMPRESSED_BA), + IWL_CMD_ENTRY(CALIBRATION_CFG_CMD), + IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION), + IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD), + IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION), + IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD), + IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF), + IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE), + IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV), + IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING), + IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC), + IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM), + IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY), + IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH), + IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION), + IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE), + IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS), + IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER), + IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS), + IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS), + IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL), + IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS), + IWL_CMD_ENTRY(REPLY_D3_CONFIG), +}; +#undef IWL_CMD_ENTRY /****************************************************************************** * @@ -137,10 +133,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; - IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) " "seq 0x%04X ser 0x%08X\n", le32_to_cpu(err_resp->error_type), - get_cmd_string(err_resp->cmd_id), err_resp->cmd_id, le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_info)); @@ -216,8 +211,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, u32 __maybe_unused len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " - "notification for %s:\n", len, - get_cmd_string(pkt->hdr.cmd)); + "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len); iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); return 0; } @@ -246,69 +240,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv, return 0; } -/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ -#define ACK_CNT_RATIO (50) -#define BA_TIMEOUT_CNT (5) -#define BA_TIMEOUT_MAX (16) - -/** - * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. - * - * When the ACK count ratio is low and aggregated BA timeout retries exceeding - * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal - * operation state. - */ -static bool iwlagn_good_ack_health(struct iwl_priv *priv, - struct statistics_tx *cur) -{ - int actual_delta, expected_delta, ba_timeout_delta; - struct statistics_tx *old; - - if (priv->agg_tids_count) - return true; - - lockdep_assert_held(&priv->statistics.lock); - - old = &priv->statistics.tx; - - actual_delta = le32_to_cpu(cur->actual_ack_cnt) - - le32_to_cpu(old->actual_ack_cnt); - expected_delta = le32_to_cpu(cur->expected_ack_cnt) - - le32_to_cpu(old->expected_ack_cnt); - - /* Values should not be negative, but we do not trust the firmware */ - if (actual_delta <= 0 || expected_delta <= 0) - return true; - - ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) - - le32_to_cpu(old->agg.ba_timeout); - - if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO && - ba_timeout_delta > BA_TIMEOUT_CNT) { - IWL_DEBUG_RADIO(priv, - "deltas: actual %d expected %d ba_timeout %d\n", - actual_delta, expected_delta, ba_timeout_delta); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* - * This is ifdef'ed on DEBUGFS because otherwise the - * statistics aren't available. If DEBUGFS is set but - * DEBUG is not, these will just compile out. - */ - IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n", - priv->delta_stats.tx.rx_detected_cnt); - IWL_DEBUG_RADIO(priv, - "ack_or_ba_timeout_collision delta %d\n", - priv->delta_stats.tx.ack_or_ba_timeout_collision); -#endif - - if (ba_timeout_delta >= BA_TIMEOUT_MAX) - return false; - } - - return true; -} - /** * iwl_good_plcp_health - checks for plcp error. * @@ -347,6 +278,45 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv, return true; } +int iwl_force_rf_reset(struct iwl_priv *priv, bool external) +{ + struct iwl_rf_reset *rf_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EAGAIN; + + if (!iwl_is_any_associated(priv)) { + IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); + return -ENOLINK; + } + + rf_reset = &priv->rf_reset; + rf_reset->reset_request_count++; + if (!external && rf_reset->last_reset_jiffies && + time_after(rf_reset->last_reset_jiffies + + IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) { + IWL_DEBUG_INFO(priv, "RF reset rejected\n"); + rf_reset->reset_reject_count++; + return -EAGAIN; + } + rf_reset->reset_success_count++; + rf_reset->last_reset_jiffies = jiffies; + + /* + * There is no easy and better way to force reset the radio, + * the only known method is switching channel which will force to + * reset and tune the radio. + * Use internal short scan (single channel) operation to should + * achieve this objective. + * Driver should reset the radio when number of consecutive missed + * beacon, or any other uCode error condition detected. + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_internal_short_hw_scan(priv); + return 0; +} + + static void iwlagn_recover_from_statistics(struct iwl_priv *priv, struct statistics_rx_phy *cur_ofdm, struct statistics_rx_ht_phy *cur_ofdm_ht, @@ -368,15 +338,9 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv, if (msecs < 99) return; - if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) { - IWL_ERR(priv, "low ack count detected, restart firmware\n"); - if (!iwl_force_reset(priv, IWL_FW_RESET, false)) - return; - } - - if (iwlagn_mod_params.plcp_check && + if (iwlwifi_mod_params.plcp_check && !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) - iwl_force_reset(priv, IWL_RF_RESET, false); + iwl_force_rf_reset(priv, false); } /* Calculate noise level, based on measurements during network silence just @@ -589,8 +553,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, iwlagn_rx_calc_noise(priv); queue_work(priv->workqueue, &priv->run_time_calib_work); } - if (cfg(priv)->lib->temperature && change) - cfg(priv)->lib->temperature(priv); + if (priv->lib->temperature && change) + priv->lib->temperature(priv); spin_unlock(&priv->statistics.lock); @@ -639,16 +603,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, + iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); if (!(flags & RXON_CARD_DISABLED)) { - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, + iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); } if (flags & CT_CARD_DISABLED) @@ -671,7 +635,7 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); else - wake_up(&trans(priv)->wait_command_queue); + wake_up(&priv->trans->wait_command_queue); return 0; } @@ -783,7 +747,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, } /* In case of HW accelerated crypto and bad decryption, drop */ - if (!iwlagn_mod_params.sw_crypto && + if (!iwlwifi_mod_params.sw_crypto && iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; @@ -795,17 +759,22 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, IWL_ERR(priv, "alloc_skb failed\n"); return; } - hdrlen = min_t(unsigned int, len, skb_tailroom(skb)); + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr so that splice() or TCP coalesce + * are more efficient. + */ + hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { - int offset = (void *)hdr + hdrlen - rxb_addr(rxb); + int offset = (void *)hdr + hdrlen - + rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } - iwl_update_stats(priv, false, fc, len); /* * Wake any queues that were stopped due to a passive channel tx @@ -816,8 +785,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, */ if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) { for_each_context(priv, ctx) { - if (compare_ether_addr(hdr->addr3, - ctx->active.bssid_addr)) + if (!ether_addr_equal(hdr->addr3, + ctx->active.bssid_addr)) continue; iwlagn_lift_passive_no_rx(priv); } @@ -977,7 +946,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, } if ((unlikely(phy_res->cfg_phy_cnt > 20))) { - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n", phy_res->cfg_phy_cnt); return 0; } @@ -1012,7 +981,6 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, /* Find max signal strength (dBm) among 3 antenna/receiver chains */ rx_status.signal = iwlagn_calc_rssi(priv, phy_res); - iwl_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); @@ -1141,16 +1109,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; - /* init calibration handlers */ - priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = - iwlagn_rx_calib_result; priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; /* set up notification wait support */ iwl_notification_wait_init(&priv->notif_wait); /* Set up BT Rx handlers */ - if (cfg(priv)->bt_params) + if (priv->cfg->bt_params) iwlagn_bt_rx_handler_setup(priv); } @@ -1192,9 +1157,9 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd); } else { /* No handling needed */ - IWL_DEBUG_RX(priv, - "No handler needed for %s, 0x%02x\n", - get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", + iwl_dvm_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); } } return err; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 2e1a31797a9..74fbee62730 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -24,12 +24,79 @@ * *****************************************************************************/ +#include <linux/etherdevice.h> #include "iwl-dev.h" #include "iwl-agn.h" -#include "iwl-core.h" #include "iwl-agn-calib.h" #include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-modparams.h" + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_channel_info *ch_info; + + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + case NL80211_IFTYPE_AP: + ctx->staging.dev_type = ctx->ap_devtype; + break; + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->band, + le16_to_cpu(ctx->active.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + ctx->staging.channel = cpu_to_le16(ch_info->channel); + priv->band = ch_info->band; + + iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; +} static int iwlagn_disable_bss(struct iwl_priv *priv, struct iwl_rxon_context *ctx, @@ -59,9 +126,12 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; + static const u8 deactivate_cmd[] = { + REPLY_WIPAN_DEACTIVATION_COMPLETE + }; iwl_init_notification_wait(&priv->notif_wait, &disable_wait, - REPLY_WIPAN_DEACTIVATION_COMPLETE, + deactivate_cmd, ARRAY_SIZE(deactivate_cmd), NULL, NULL); send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; @@ -101,8 +171,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv, return ret; } -static void iwlagn_update_qos(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) +void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; @@ -129,8 +198,8 @@ static void iwlagn_update_qos(struct iwl_priv *priv, IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); } -static int iwlagn_update_beacon(struct iwl_priv *priv, - struct ieee80211_vif *vif) +int iwlagn_update_beacon(struct iwl_priv *priv, + struct ieee80211_vif *vif) { lockdep_assert_held(&priv->mutex); @@ -186,6 +255,109 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, return ret; } +static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device (not checking this here + * would cause the adjustment below to return the maximum + * value, which may break PAN.) + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +static int iwl_send_rxon_timing(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = &priv->hw->conf; + + lockdep_assert_held(&priv->mutex); + + memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(priv->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_window from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_window = 0; + + if (ctx->ctxid == IWL_RXON_CTX_PAN && + (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && + iwl_is_associated(priv, IWL_RXON_CTX_BSS) && + priv->contexts[IWL_RXON_CTX_BSS].vif && + priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) { + ctx->timing.beacon_interval = + priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; + beacon_int = le16_to_cpu(ctx->timing.beacon_interval); + } else if (ctx->ctxid == IWL_RXON_CTX_BSS && + iwl_is_associated(priv, IWL_RXON_CTX_PAN) && + priv->contexts[IWL_RXON_CTX_PAN].vif && + priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int && + (!iwl_is_associated_ctx(ctx) || !ctx->vif || + !ctx->vif->bss_conf.beacon_int)) { + ctx->timing.beacon_interval = + priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval; + beacon_int = le16_to_cpu(ctx->timing.beacon_interval); + } else { + beacon_int = iwl_adjust_beacon_interval(beacon_int, + IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + } + + ctx->beacon_int = beacon_int; + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; + + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_window)); + + return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, + CMD_SYNC, sizeof(ctx->timing), &ctx->timing); +} + static int iwlagn_rxon_disconn(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { @@ -228,6 +400,64 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv, return 0; } +static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED) + return 0; + + lockdep_assert_held(&priv->mutex); + + if (priv->tx_power_user_lmt == tx_power && !force) + return 0; + + if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { + IWL_WARN(priv, + "Requested user TXPOWER %d below lower limit %d.\n", + tx_power, + IWLAGN_TX_POWER_TARGET_POWER_MIN); + return -EINVAL; + } + + if (tx_power > priv->tx_power_device_lmt) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->tx_power_device_lmt); + return -EINVAL; + } + + if (!iwl_is_ready_rf(priv)) + return -EIO; + + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ + priv->tx_power_next = tx_power; + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); + return 0; + } + + prev_tx_power = priv->tx_power_user_lmt; + priv->tx_power_user_lmt = tx_power; + + ret = iwlagn_send_tx_power(priv); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + priv->tx_power_user_lmt = prev_tx_power; + priv->tx_power_next = prev_tx_power; + } + return ret; +} + static int iwlagn_rxon_connect(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { @@ -295,9 +525,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, } if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode) + priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) ieee80211_request_smps(ctx->vif, - cfg(priv)->ht_params->smps_mode); + priv->cfg->ht_params->smps_mode); return 0; } @@ -309,7 +539,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv) int slot0 = 300, slot1 = 0; int ret; - if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS)) + if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) return 0; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); @@ -394,6 +624,414 @@ int iwlagn_set_pan_params(struct iwl_priv *priv) return ret; } +static void _iwl_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf, + struct iwl_rxon_context *ctx) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + /* FIXME: if the definition of ht.protection changed, the "translation" + * will be needed for rxon->flags + */ + rxon->flags |= cpu_to_le32(ctx->ht.protection << + RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == + IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* + * Note: control channel is opposite of extension + * channel + */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* + * Note: control channel is opposite of extension + * channel + */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* + * channel location only valid if in Mixed + * mode + */ + IWL_ERR(priv, + "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + iwlagn_set_rxon_chain(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ctx->ht.protection, + ctx->ht.extension_chan_offset); +} + +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) + _iwl_set_rxon_ht(priv, ht_conf, ctx); +} + +/** + * iwl_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if ((le16_to_cpu(ctx->staging.channel) == channel) && + (priv->band == band)) + return; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + +} + +void iwl_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} + +static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} + +/* validate RXON structure is valid */ +static int iwl_check_rxon_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_rxon_cmd *rxon = &ctx->staging; + u32 errors = 0; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IWL_WARN(priv, "check 2.4G: wrong narrow\n"); + errors |= BIT(0); + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IWL_WARN(priv, "check 2.4G: wrong radar\n"); + errors |= BIT(1); + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "check 5.2G: not short slot!\n"); + errors |= BIT(2); + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IWL_WARN(priv, "check 5.2G: CCK!\n"); + errors |= BIT(3); + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IWL_WARN(priv, "mac/bssid mcast!\n"); + errors |= BIT(4); + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { + IWL_WARN(priv, "neither 1 nor 6 are basic\n"); + errors |= BIT(5); + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IWL_WARN(priv, "aid > 2007\n"); + errors |= BIT(6); + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "CCK and short slot\n"); + errors |= BIT(7); + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IWL_WARN(priv, "CCK and auto detect"); + errors |= BIT(8); + } + + if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IWL_WARN(priv, "TGg but no auto-detect\n"); + errors |= BIT(9); + } + + if (rxon->channel == 0) { + IWL_WARN(priv, "zero channel is invalid\n"); + errors |= BIT(10); + } + + WARN(errors, "Invalid RXON (%#x), channel %d", + errors, le16_to_cpu(rxon->channel)); + + return errors ? -EINVAL : 0; +} + +/** + * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int iwl_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_rxon_cmd *staging = &ctx->staging; + const struct iwl_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!iwl_is_associated_ctx(ctx)); + CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr)); + CHK(!ether_addr_equal(staging->node_addr, active->node_addr)); + CHK(!ether_addr_equal(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates, + active->ofdm_ht_triple_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +void iwl_print_rx_config_cmd(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; + struct iwl_rxon_cmd *rxon = &ctx->staging; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", + le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", + le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", + rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", + le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_calc_basic_rates(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int lowest_present_ofdm = 100; + int lowest_present_cck = 100; + u8 cck = 0; + u8 ofdm = 0; + + if (ctx->vif) { + struct ieee80211_supported_band *sband; + unsigned long basic = ctx->vif->bss_conf.basic_rates; + int i; + + sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band]; + + for_each_set_bit(i, &basic, BITS_PER_LONG) { + int hw = sband->bitrates[i].hw_value; + if (hw >= IWL_FIRST_OFDM_RATE) { + ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); + if (lowest_present_ofdm > hw) + lowest_present_ofdm = hw; + } else { + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + cck |= BIT(hw); + if (lowest_present_cck > hw) + lowest_present_cck = hw; + } + } + } + + /* + * Now we've got the basic rates as bitmaps in the ofdm and cck + * variables. This isn't sufficient though, as there might not + * be all the right rates in the bitmap. E.g. if the only basic + * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps + * and 6 Mbps because the 802.11-2007 standard says in 9.6: + * + * [...] a STA responding to a received frame shall transmit + * its Control Response frame [...] at the highest rate in the + * BSSBasicRateSet parameter that is less than or equal to the + * rate of the immediately previous frame in the frame exchange + * sequence ([...]) and that is of the same modulation class + * ([...]) as the received frame. If no rate contained in the + * BSSBasicRateSet parameter meets these conditions, then the + * control frame sent in response to a received frame shall be + * transmitted at the highest mandatory rate of the PHY that is + * less than or equal to the rate of the received frame, and + * that is of the same modulation class as the received frame. + * + * As a consequence, we need to add all mandatory rates that are + * lower than all of the basic rates to these bitmaps. + */ + + if (IWL_RATE_24M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE; + if (IWL_RATE_12M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE; + /* 6M already there or needed so always add */ + ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE; + + /* + * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. + * Note, however: + * - if no CCK rates are basic, it must be ERP since there must + * be some basic rates at all, so they're OFDM => ERP PHY + * (or we're in 5 GHz, and the cck bitmap will never be used) + * - if 11M is a basic rate, it must be ERP as well, so add 5.5M + * - if 5.5M is basic, 1M and 2M are mandatory + * - if 2M is basic, 1M is mandatory + * - if 1M is basic, that's the only valid ACK rate. + * As a consequence, it's not as complicated as it sounds, just add + * any lower rates to the ACK rate bitmap. + */ + if (IWL_RATE_11M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_5M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; + if (IWL_RATE_2M_INDEX < lowest_present_ofdm) + ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; + /* 1M already there or needed so always add */ + cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; + + IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n", + cck, ofdm); + + /* "basic_rates" is a misnomer here -- should be called ACK rates */ + ctx->staging.cck_basic_rates = cck; + ctx->staging.ofdm_basic_rates = ofdm; +} + /** * iwlagn_commit_rxon - commit staging_rxon to hardware * @@ -433,11 +1071,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + /* recalculate basic rates */ + iwl_calc_basic_rates(priv, ctx); + /* * force CTS-to-self frames protection if RTS-CTS is not preferred * one aggregation protection method */ - if (!hw_params(priv).use_rts_for_aggregation) + if (!priv->hw_params.use_rts_for_aggregation) ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || @@ -489,7 +1130,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) return 0; } - iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto); + iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto); IWL_DEBUG_INFO(priv, "Going to commit RXON\n" @@ -547,7 +1188,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) const struct iwl_channel_info *ch_info; int ret = 0; - IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed); + IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); mutex_lock(&priv->mutex); @@ -621,13 +1262,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) } iwl_update_bcast_stations(priv); - - /* - * The list of supported rates and rate mask can be different - * for each band; since the band may have changed, reset - * the rate mask to what mac80211 lists. - */ - iwl_set_rate(priv); } if (changed & (IEEE80211_CONF_CHANGE_PS | @@ -656,9 +1290,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) return ret; } -static void iwlagn_check_needed_chains(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_bss_conf *bss_conf) +void iwlagn_check_needed_chains(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_bss_conf *bss_conf) { struct ieee80211_vif *vif = ctx->vif; struct iwl_rxon_context *tmp; @@ -750,11 +1384,14 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, ht_conf->single_chain_sufficient = !need_multiple; } -static void iwlagn_chain_noise_reset(struct iwl_priv *priv) +void iwlagn_chain_noise_reset(struct iwl_priv *priv) { struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; + if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) + return; + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_any_associated(priv)) { struct iwl_calib_chain_noise_reset_cmd cmd; @@ -907,8 +1544,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwl_power_update_mode(priv, false); /* Enable RX differential gain and sensitivity calibrations */ - if (!priv->disable_chain_noise_cal) - iwlagn_chain_noise_reset(priv); + iwlagn_chain_noise_reset(priv); priv->start_calib = 1; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index c4175603864..b31584e87bc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -30,10 +30,11 @@ #include <net/mac80211.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-agn.h" #include "iwl-trans.h" +const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) { lockdep_assert_held(&priv->sta_lock); @@ -170,6 +171,50 @@ int iwl_send_add_sta(struct iwl_priv *priv, return cmd.handler_status; } +static bool iwl_is_channel_extension(struct iwl_priv *priv, + enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, band, channel); + if (!is_channel_valid(ch_info)) + return false; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40MINUS); + + return false; +} + +bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + + /* + * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (ht_cap && !ht_cap->ht_supported) + return false; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (priv->disable_ht40) + return false; +#endif + + return iwl_is_channel_extension(priv, priv->band, + le16_to_cpu(ctx->staging.channel), + ctx->ht.extension_chan_offset); +} + static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, struct ieee80211_sta *sta, struct iwl_rxon_context *ctx, @@ -277,8 +322,8 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, sta_id = ctx->bcast_sta_id; else for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) { - if (!compare_ether_addr(priv->stations[i].sta.sta.addr, - addr)) { + if (ether_addr_equal(priv->stations[i].sta.sta.addr, + addr)) { sta_id = i; break; } @@ -308,7 +353,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && - !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { + ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) { IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not " "adding again.\n", sta_id, addr); return sta_id; @@ -581,6 +626,56 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, spin_unlock_bh(&priv->sta_lock); } +static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + u8 sta_id, struct iwl_link_quality_cmd *link_cmd) +{ + int i, r; + u32 rate_flags = 0; + __le32 rate_n_flags; + + lockdep_assert_held(&priv->mutex); + + memset(link_cmd, 0, sizeof(*link_cmd)); + + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else if (ctx && ctx->vif && ctx->vif->p2p) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + first_antenna(priv->hw_params.valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~first_antenna(priv->hw_params.valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = + LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; +} + /** * iwl_clear_ucode_stations - clear ucode station table bits * @@ -841,56 +936,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, } -void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - u8 sta_id, struct iwl_link_quality_cmd *link_cmd) -{ - int i, r; - u32 rate_flags = 0; - __le32 rate_n_flags; - - lockdep_assert_held(&priv->mutex); - - memset(link_cmd, 0, sizeof(*link_cmd)); - - /* Set up the rate scaling to start at selected rate, fall back - * all the way down to 1M in IEEE order, and then spin on 1M */ - if (priv->band == IEEE80211_BAND_5GHZ) - r = IWL_RATE_6M_INDEX; - else if (ctx && ctx->vif && ctx->vif->p2p) - r = IWL_RATE_6M_INDEX; - else - r = IWL_RATE_1M_INDEX; - - if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; - - rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) << - RATE_MCS_ANT_POS; - rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); - for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) - link_cmd->rs_table[i].rate_n_flags = rate_n_flags; - - link_cmd->general_params.single_stream_ant_msk = - first_antenna(hw_params(priv).valid_tx_ant); - - link_cmd->general_params.dual_stream_ant_msk = - hw_params(priv).valid_tx_ant & - ~first_antenna(hw_params(priv).valid_tx_ant); - if (!link_cmd->general_params.dual_stream_ant_msk) { - link_cmd->general_params.dual_stream_ant_msk = ANT_AB; - } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) { - link_cmd->general_params.dual_stream_ant_msk = - hw_params(priv).valid_tx_ant; - } - - link_cmd->agg_params.agg_dis_start_th = - LINK_QUAL_AGG_DISABLE_START_DEF; - link_cmd->agg_params.agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); - - link_cmd->sta_id = sta_id; -} - static struct iwl_link_quality_cmd * iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c index baaf5ba2fc3..a5cfe0aceed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c @@ -37,11 +37,11 @@ #include "iwl-agn.h" #include "iwl-eeprom.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-commands.h" #include "iwl-debug.h" #include "iwl-agn-tt.h" +#include "iwl-modparams.h" /* default Thermal Throttling transaction table * Current state | Throttling Down | Throttling Up @@ -179,19 +179,19 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data) if (tt->state == IWL_TI_CT_KILL) { if (priv->thermal_throttle.ct_kill_toggle) { - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); priv->thermal_throttle.ct_kill_toggle = false; } else { - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); priv->thermal_throttle.ct_kill_toggle = true; } - iwl_read32(trans(priv), CSR_UCODE_DRV_GP1); - spin_lock_irqsave(&trans(priv)->reg_lock, flags); - if (likely(iwl_grab_nic_access(trans(priv)))) - iwl_release_nic_access(trans(priv)); - spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); + iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); + spin_lock_irqsave(&priv->trans->reg_lock, flags); + if (likely(iwl_grab_nic_access(priv->trans))) + iwl_release_nic_access(priv->trans); + spin_unlock_irqrestore(&priv->trans->reg_lock, flags); /* Reschedule the ct_kill timer to occur in * CT_KILL_EXIT_DURATION seconds to ensure we get a @@ -632,7 +632,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - if (cfg(priv)->base_params->adv_thermal_throttle) { + if (priv->cfg->base_params->adv_thermal_throttle) { IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kcalloc(IWL_TI_STATE_MAX, sizeof(struct iwl_tt_restriction), diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 34adedc74d3..f2e9f298a94 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -34,12 +34,22 @@ #include <linux/ieee80211.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-hw.h" #include "iwl-agn.h" #include "iwl-trans.h" +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, +}; + static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, struct ieee80211_tx_info *info, __le16 fc, __le32 *tx_flags) @@ -74,8 +84,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || skb->protocol == cpu_to_be16(ETH_P_PAE))) @@ -192,15 +202,15 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, - first_antenna(hw_params(priv).valid_tx_ant)); + first_antenna(priv->hw_params.valid_tx_ant)); } else priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, - hw_params(priv).valid_tx_ant); + priv->hw_params.valid_tx_ant); rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* Set the rate in the TX cmd */ @@ -293,6 +303,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) u16 len, seq_number = 0; u8 sta_id, tid = IWL_MAX_TID_COUNT; bool is_agg = false; + int txq_id; if (info->control.vif) ctx = iwl_rxon_ctx_from_vif(info->control.vif); @@ -384,12 +395,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* TODO need this for burst mode later on */ iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); - iwl_dbg_log_tx_data_frame(priv, len, hdr); iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); - iwl_update_stats(priv, true, fc, len); - memset(&info->status, 0, sizeof(info->status)); info->driver_data[0] = ctx; @@ -435,7 +443,31 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); - if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) + if (is_agg) + txq_id = priv->tid_data[sta_id][tid].agg.txq_id; + else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* + * Send this frame after DTIM -- there's a special queue + * reserved for this for contexts that support AP mode. + */ + txq_id = ctx->mcast_queue; + + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + txq_id = IWL_AUX_QUEUE; + else + txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; + + WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue); + WARN_ON_ONCE(is_agg && + priv->queue_to_mac80211[txq_id] != info->hw_queue); + + if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && @@ -464,11 +496,33 @@ drop_unlock_priv: return -1; } +static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) +{ + int q; + + for (q = IWLAGN_FIRST_AMPDU_QUEUE; + q < priv->cfg->base_params->num_of_queues; q++) { + if (!test_and_set_bit(q, priv->agg_q_alloc)) { + priv->queue_to_mac80211[q] = mq; + return q; + } + } + + return -ENOSPC; +} + +static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q) +{ + clear_bit(q, priv->agg_q_alloc); + priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE; +} + int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_tid_data *tid_data; - int sta_id; + int sta_id, txq_id; + enum iwl_agg_state agg_state; sta_id = iwl_sta_id(sta); @@ -480,6 +534,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, spin_lock_bh(&priv->sta_lock); tid_data = &priv->tid_data[sta_id][tid]; + txq_id = priv->tid_data[sta_id][tid].agg.txq_id; switch (priv->tid_data[sta_id][tid].agg.state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -491,6 +546,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, */ IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); goto turn_off; + case IWL_AGG_STARTING: + /* + * This can happen when the session is stopped before + * we receive ADDBA response + */ + IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n"); + goto turn_off; case IWL_AGG_ON: break; default: @@ -504,9 +566,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); /* There are still packets for this RA / TID in the HW */ - if (tid_data->agg.ssn != tid_data->next_reclaimed) { + if (!test_bit(txq_id, priv->agg_q_alloc)) { + IWL_DEBUG_TX_QUEUES(priv, + "stopping AGG on STA/TID %d/%d but hwq %d not used\n", + sta_id, tid, txq_id); + } else if (tid_data->agg.ssn != tid_data->next_reclaimed) { IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_recl = %d", + "next_recl = %d\n", tid_data->agg.ssn, tid_data->next_reclaimed); priv->tid_data[sta_id][tid].agg.state = @@ -515,14 +581,22 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, return 0; } - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", tid_data->agg.ssn); turn_off: + agg_state = priv->tid_data[sta_id][tid].agg.state; priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; spin_unlock_bh(&priv->sta_lock); - iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); + if (test_bit(txq_id, priv->agg_q_alloc)) { + /* If the transport didn't know that we wanted to start + * agreggation, don't tell it that we want to stop them + */ + if (agg_state != IWL_AGG_STARTING) + iwl_trans_tx_agg_disable(priv->trans, txq_id); + iwlagn_dealloc_agg_txq(priv, txq_id); + } ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); @@ -532,9 +606,9 @@ turn_off: int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { + struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); struct iwl_tid_data *tid_data; - int sta_id; - int ret; + int sta_id, txq_id, ret; IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", sta->addr, tid); @@ -552,36 +626,37 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, return -ENXIO; } + txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]); + if (txq_id < 0) { + IWL_DEBUG_TX_QUEUES(priv, + "No free aggregation queue for %pM/%d\n", + sta->addr, tid); + return txq_id; + } + ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); if (ret) return ret; spin_lock_bh(&priv->sta_lock); - tid_data = &priv->tid_data[sta_id][tid]; tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; *ssn = tid_data->agg.ssn; - ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid); - if (ret) { - spin_unlock_bh(&priv->sta_lock); - return ret; - } - if (*ssn == tid_data->next_reclaimed) { - IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", + IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", tid_data->agg.ssn); - tid_data->agg.state = IWL_AGG_ON; + tid_data->agg.state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); } else { IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " - "next_reclaimed = %d", + "next_reclaimed = %d\n", tid_data->agg.ssn, tid_data->next_reclaimed); tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; } - spin_unlock_bh(&priv->sta_lock); return ret; @@ -592,15 +667,21 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, { struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + int q, fifo; u16 ssn; buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); spin_lock_bh(&priv->sta_lock); ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; + q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id; + priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON; spin_unlock_bh(&priv->sta_lock); - iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, + fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; + + iwl_trans_tx_agg_setup(priv->trans, q, fifo, + sta_priv->sta_id, tid, buf_size, ssn); /* @@ -623,7 +704,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, sta_priv->max_agg_bufsize = min(sta_priv->max_agg_bufsize, buf_size); - if (hw_params(priv).use_rts_for_aggregation) { + if (priv->hw_params.use_rts_for_aggregation) { /* * switch to RTS/CTS if it is the prefer protection * method for HT traffic @@ -666,7 +747,9 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) IWL_DEBUG_TX_QUEUES(priv, "Can continue DELBA flow ssn = next_recl =" " %d", tid_data->next_reclaimed); - iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); + iwl_trans_tx_agg_disable(priv->trans, + tid_data->agg.txq_id); + iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); tid_data->agg.state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); } @@ -677,7 +760,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) IWL_DEBUG_TX_QUEUES(priv, "Can continue ADDBA flow ssn = next_recl =" " %d", tid_data->next_reclaimed); - tid_data->agg.state = IWL_AGG_ON; + tid_data->agg.state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); } break; @@ -711,9 +794,9 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, struct ieee80211_tx_info *info) { - struct ieee80211_tx_rate *r = &info->control.rates[0]; + struct ieee80211_tx_rate *r = &info->status.rates[0]; - info->antenna_sel_tx = + info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); if (rate_n_flags & RATE_MCS_HT_MSK) r->flags |= IEEE80211_TX_RC_MCS; @@ -841,8 +924,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, * notification again. */ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); } @@ -1005,6 +1088,29 @@ static void iwl_check_abort_status(struct iwl_priv *priv, } } +static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid, + int txq_id, int ssn, struct sk_buff_head *skbs) +{ + if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && + tid != IWL_TID_NON_QOS && + txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now. + * Since it is can possibly happen very often and in order + * not to fill the syslog, don't use IWL_ERR or IWL_WARN + */ + IWL_DEBUG_TX_QUEUES(priv, + "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", + txq_id, sta_id, tid, + priv->tid_data[sta_id][tid].agg.txq_id); + return 1; + } + + iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs); + return 0; +} + int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { @@ -1059,13 +1165,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, if (tid != IWL_TID_NON_QOS) { priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed; - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", next_reclaimed); } /*we can free until ssn % q.n_bd not inclusive */ - WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, - txq_id, ssn, &skbs)); + WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; @@ -1159,7 +1264,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - if (scd_flow >= cfg(priv)->base_params->num_of_queues) { + if (scd_flow >= priv->cfg->base_params->num_of_queues) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return 0; @@ -1183,8 +1288,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, /* Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ - if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, - ba_resp_scd_ssn, &reclaimed_skbs)) { + if (iwl_reclaim(priv, sta_id, tid, scd_flow, + ba_resp_scd_ssn, &reclaimed_skbs)) { spin_unlock(&priv->sta_lock); return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 2a9a16f901c..8d7637083fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -26,6 +26,9 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -43,13 +46,13 @@ #include "iwl-eeprom.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" -#include "iwl-shared.h" #include "iwl-trans.h" #include "iwl-op-mode.h" +#include "iwl-drv.h" +#include "iwl-modparams.h" /****************************************************************************** * @@ -177,7 +180,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) rate = info->control.rates[0].idx; priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, - hw_params(priv).valid_tx_ant); + priv->hw_params.valid_tx_ant); rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* In mac80211, rates for 5 GHz start at 0 */ @@ -286,6 +289,25 @@ out: mutex_unlock(&priv->mutex); } +int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + CMD_ASYNC, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); + else + return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + CMD_SYNC, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} + /** * iwl_bg_statistics_periodic - Timer callback to queue statistics * @@ -326,14 +348,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); /* Make sure device is powered up for SRAM reads */ - spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags); - if (unlikely(!iwl_grab_nic_access(trans(priv)))) { - spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); + spin_lock_irqsave(&priv->trans->reg_lock, reg_flags); + if (unlikely(!iwl_grab_nic_access(priv->trans))) { + spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); return; } /* Set starting address; reads will auto-increment */ - iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr); + iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr); /* * Refuse to read more than would have fit into the log from @@ -349,20 +371,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, * place event id # at far right for easier visual parsing. */ for (i = 0; i < num_events; i++) { - ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); - time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); + ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); + time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); if (mode == 0) { trace_iwlwifi_dev_ucode_cont_event( - trans(priv)->dev, 0, time, ev); + priv->trans->dev, 0, time, ev); } else { - data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); + data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); trace_iwlwifi_dev_ucode_cont_event( - trans(priv)->dev, time, data, ev); + priv->trans->dev, time, data, ev); } } /* Allow device to power down */ - iwl_release_nic_access(trans(priv)); - spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); + iwl_release_nic_access(priv->trans); + spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); } static void iwl_continuous_event_trace(struct iwl_priv *priv) @@ -379,10 +401,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ - base = priv->shrd->device_pointers.log_event_table; + base = priv->device_pointers.log_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { - iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read)); - + iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); capacity = read.capacity; mode = read.mode; num_wraps = read.wrap_counter; @@ -422,7 +443,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) else priv->event_log.wraps_once_count++; - trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev, + trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev, num_wraps - priv->event_log.num_wraps, next_entry, priv->event_log.next_entry); @@ -488,7 +509,76 @@ static void iwl_bg_tx_flush(struct work_struct *work) iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); } -static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) +/* + * queue/FIFO/AC mapping definitions + */ + +#define IWL_TX_FIFO_BK 0 /* shared */ +#define IWL_TX_FIFO_BE 1 +#define IWL_TX_FIFO_VI 2 /* shared */ +#define IWL_TX_FIFO_VO 3 +#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK +#define IWL_TX_FIFO_BE_IPAN 4 +#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI +#define IWL_TX_FIFO_VO_IPAN 5 +/* re-uses the VO FIFO, uCode will properly flush/schedule */ +#define IWL_TX_FIFO_AUX 5 +#define IWL_TX_FIFO_UNUSED -1 + +#define IWLAGN_CMD_FIFO_NUM 7 + +/* + * This queue number is required for proper operation + * because the ucode will stop/start the scheduler as + * required. + */ +#define IWL_IPAN_MCAST_QUEUE 8 + +static const u8 iwlagn_default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWLAGN_CMD_FIFO_NUM, +}; + +static const u8 iwlagn_ipan_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWL_TX_FIFO_BK_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWL_TX_FIFO_VI_IPAN, + IWL_TX_FIFO_VO_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWLAGN_CMD_FIFO_NUM, + IWL_TX_FIFO_AUX, +}; + +static const u8 iwlagn_bss_ac_to_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwlagn_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static const u8 iwlagn_pan_ac_to_fifo[] = { + IWL_TX_FIFO_VO_IPAN, + IWL_TX_FIFO_VI_IPAN, + IWL_TX_FIFO_BE_IPAN, + IWL_TX_FIFO_BK_IPAN, +}; + +static const u8 iwlagn_pan_ac_to_queue[] = { + 7, 6, 5, 4, +}; + +void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) { int i; @@ -496,9 +586,9 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) * The default context is always valid, * the PAN context depends on uCode. */ - priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS); + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) - priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN); + priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN); for (i = 0; i < NUM_IWL_RXON_CTX; i++) priv->contexts[i].ctxid = i; @@ -520,6 +610,10 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue, + iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue)); + memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo, + iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo)); priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = @@ -542,26 +636,31 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; + memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue, + iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue)); + memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo, + iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo)); + priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); } -static void iwl_rf_kill_ct_config(struct iwl_priv *priv) +void iwl_rf_kill_ct_config(struct iwl_priv *priv) { struct iwl_ct_kill_config cmd; struct iwl_ct_kill_throttling_config adv_cmd; int ret = 0; - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); priv->thermal_throttle.ct_kill_toggle = false; - if (cfg(priv)->base_params->support_ct_kill_exit) { + if (priv->cfg->base_params->support_ct_kill_exit) { adv_cmd.critical_temperature_enter = - cpu_to_le32(hw_params(priv).ct_kill_threshold); + cpu_to_le32(priv->hw_params.ct_kill_threshold); adv_cmd.critical_temperature_exit = - cpu_to_le32(hw_params(priv).ct_kill_exit_threshold); + cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, @@ -572,11 +671,11 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " "succeeded, critical temperature enter is %d," "exit is %d\n", - hw_params(priv).ct_kill_threshold, - hw_params(priv).ct_kill_exit_threshold); + priv->hw_params.ct_kill_threshold, + priv->hw_params.ct_kill_exit_threshold); } else { cmd.critical_temperature_R = - cpu_to_le32(hw_params(priv).ct_kill_threshold); + cpu_to_le32(priv->hw_params.ct_kill_threshold); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, @@ -587,7 +686,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " "succeeded, " "critical temperature is %d\n", - hw_params(priv).ct_kill_threshold); + priv->hw_params.ct_kill_threshold); } } @@ -627,6 +726,29 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) } } +void iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!iwlwifi_mod_params.bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + priv->bt_enable_flag = bt_cmd.flags; + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, + CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) + IWL_ERR(priv, "failed to send BT Coex Config\n"); +} + /** * iwl_alive_start - called after REPLY_ALIVE notification received * from protocol/runtime uCode (initialization uCode's @@ -642,9 +764,6 @@ int iwl_alive_start(struct iwl_priv *priv) /* After the ALIVE response, we can send host commands to the uCode */ set_bit(STATUS_ALIVE, &priv->status); - /* Enable watchdog to monitor the driver tx queues */ - iwl_setup_watchdog(priv); - if (iwl_is_rfkill(priv)) return -ERFKILL; @@ -654,10 +773,10 @@ int iwl_alive_start(struct iwl_priv *priv) } /* download priority table before any calibration request */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ - if (cfg(priv)->bt_params->bt_sco_disable) + if (priv->cfg->bt_params->bt_sco_disable) priv->bt_enable_pspoll = false; else priv->bt_enable_pspoll = true; @@ -694,10 +813,8 @@ int iwl_alive_start(struct iwl_priv *priv) ieee80211_wake_queues(priv->hw); - priv->active_rate = IWL_RATES_MASK; - /* Configure Tx antenna selection based on H/W config */ - iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant); + iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant); if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { struct iwl_rxon_cmd *active_rxon = @@ -788,10 +905,6 @@ void iwl_down(struct iwl_priv *priv) exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); - /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set - * to prevent rearm timer */ - del_timer_sync(&priv->watchdog); - iwl_clear_ucode_stations(priv, NULL); iwl_dealloc_bcast_stations(priv); iwl_clear_driver_stations(priv); @@ -800,9 +913,9 @@ void iwl_down(struct iwl_priv *priv) priv->bt_status = 0; priv->cur_rssi_ctx = NULL; priv->bt_is_sco = 0; - if (cfg(priv)->bt_params) + if (priv->cfg->bt_params) priv->bt_traffic_load = - cfg(priv)->bt_params->bt_init_traffic_load; + priv->cfg->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; priv->bt_full_concurrent = false; @@ -817,18 +930,17 @@ void iwl_down(struct iwl_priv *priv) ieee80211_stop_queues(priv->hw); priv->ucode_loaded = false; - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(priv->trans); /* Clear out all status bits but a few that are stable across reset */ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << STATUS_RF_KILL_HW | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | test_bit(STATUS_EXIT_PENDING, &priv->status) << STATUS_EXIT_PENDING; - priv->shrd->status &= - test_bit(STATUS_FW_ERROR, &priv->shrd->status) << - STATUS_FW_ERROR; dev_kfree_skb(priv->beacon_skb); priv->beacon_skb = NULL; @@ -868,6 +980,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv) u8 bt_load; u8 bt_status; bool bt_is_sco; + int i; lockdep_assert_held(&priv->mutex); @@ -895,6 +1008,15 @@ void iwlagn_prepare_restart(struct iwl_priv *priv) priv->bt_traffic_load = bt_load; priv->bt_status = bt_status; priv->bt_is_sco = bt_is_sco; + + /* reset aggregation queues */ + for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++) + priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; + /* and stop counts */ + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc)); } static void iwl_bg_restart(struct work_struct *data) @@ -904,7 +1026,7 @@ static void iwl_bg_restart(struct work_struct *data) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) { + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { mutex_lock(&priv->mutex); iwlagn_prepare_restart(priv); mutex_unlock(&priv->mutex); @@ -956,7 +1078,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work) * *****************************************************************************/ -static void iwl_setup_deferred_work(struct iwl_priv *priv) +void iwl_setup_deferred_work(struct iwl_priv *priv) { priv->workqueue = create_singlethread_workqueue(DRV_NAME); @@ -971,7 +1093,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) iwl_setup_scan_deferred_work(priv); - if (cfg(priv)->bt_params) + if (priv->cfg->bt_params) iwlagn_bt_setup_deferred_work(priv); init_timer(&priv->statistics_periodic); @@ -981,15 +1103,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) init_timer(&priv->ucode_trace); priv->ucode_trace.data = (unsigned long)priv; priv->ucode_trace.function = iwl_bg_ucode_trace; - - init_timer(&priv->watchdog); - priv->watchdog.data = (unsigned long)priv; - priv->watchdog.function = iwl_bg_watchdog; } void iwl_cancel_deferred_work(struct iwl_priv *priv) { - if (cfg(priv)->bt_params) + if (priv->cfg->bt_params) iwlagn_bt_cancel_deferred_work(priv); cancel_work_sync(&priv->run_time_calib_work); @@ -1025,7 +1143,193 @@ static void iwl_init_hw_rates(struct ieee80211_rate *rates) } } -static int iwl_init_drv(struct iwl_priv *priv) +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = priv->hw_params.rx_chains_num; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + if (priv->cfg->ht_params && + priv->cfg->ht_params->ht_greenfield_support) + ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (priv->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (iwlwifi_mod_params.amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains_num - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +static int iwl_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + channels = kcalloc(priv->channel_count, + sizeof(struct ieee80211_channel), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; + + if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) + iwl_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_5GHZ); + + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY; + + if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) + iwl_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_2GHZ); + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + for (i = 0; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + /* FIXME: might be removed if scan is OK */ + if (!is_channel_valid(ch)) + continue; + + sband = &priv->bands[ch->band]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel, ch->band); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > max_tx_power) + max_tx_power = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", + ch->channel, geo_ch->center_freq, + is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch->flags & IEEE80211_CHAN_DISABLED ? + "restricted" : "valid", + geo_ch->flags); + } + + priv->tx_power_device_lmt = max_tx_power; + priv->tx_power_user_lmt = max_tx_power; + priv->tx_power_next = max_tx_power; + + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && + priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) { + IWL_INFO(priv, "Incorrectly detected BG card as ABG. " + "Please send your %s to maintainer.\n", + priv->trans->hw_id_str); + priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; + } + + if (iwlwifi_mod_params.disable_5ghz) + priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0; + + IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", + priv->bands[IEEE80211_BAND_2GHZ].n_channels, + priv->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} + +/* + * iwl_free_geos - undo allocations in iwl_init_geos + */ +static void iwl_free_geos(struct iwl_priv *priv) +{ + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + clear_bit(STATUS_GEO_CONFIGURED, &priv->status); +} + +int iwl_init_drv(struct iwl_priv *priv) { int ret; @@ -1040,7 +1344,7 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->band = IEEE80211_BAND_2GHZ; priv->plcp_delta_threshold = - cfg(priv)->base_params->plcp_delta_threshold; + priv->cfg->base_params->plcp_delta_threshold; priv->iw_mode = NL80211_IFTYPE_STATION; priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; @@ -1049,12 +1353,6 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->ucode_owner = IWL_OWNERSHIP_DRIVER; - /* initialize force reset */ - priv->force_reset[IWL_RF_RESET].reset_duration = - IWL_DELAY_NEXT_FORCE_RF_RESET; - priv->force_reset[IWL_FW_RESET].reset_duration = - IWL_DELAY_NEXT_FORCE_FW_RELOAD; - priv->rx_statistics_jiffies = jiffies; /* Choose which receivers/antennas to use */ @@ -1063,8 +1361,8 @@ static int iwl_init_drv(struct iwl_priv *priv) iwl_init_scan_params(priv); /* init bt coex */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; @@ -1094,7 +1392,7 @@ err: return ret; } -static void iwl_uninit_drv(struct iwl_priv *priv) +void iwl_uninit_drv(struct iwl_priv *priv) { iwl_free_geos(priv); iwl_free_channel_map(priv); @@ -1107,75 +1405,59 @@ static void iwl_uninit_drv(struct iwl_priv *priv) #endif } -/* Size of one Rx buffer in host DRAM */ -#define IWL_RX_BUF_SIZE_4K (4 * 1024) -#define IWL_RX_BUF_SIZE_8K (8 * 1024) - -static void iwl_set_hw_params(struct iwl_priv *priv) +void iwl_set_hw_params(struct iwl_priv *priv) { - if (cfg(priv)->ht_params) - hw_params(priv).use_rts_for_aggregation = - cfg(priv)->ht_params->use_rts_for_aggregation; - - if (iwlagn_mod_params.amsdu_size_8K) - hw_params(priv).rx_page_order = - get_order(IWL_RX_BUF_SIZE_8K); - else - hw_params(priv).rx_page_order = - get_order(IWL_RX_BUF_SIZE_4K); - - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL) - hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE; + if (priv->cfg->ht_params) + priv->hw_params.use_rts_for_aggregation = + priv->cfg->ht_params->use_rts_for_aggregation; - hw_params(priv).num_ampdu_queues = - cfg(priv)->base_params->num_of_ampdu_queues; - hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout; + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) + priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE; /* Device-specific setup */ - cfg(priv)->lib->set_hw_params(priv); + priv->lib->set_hw_params(priv); } -static void iwl_debug_config(struct iwl_priv *priv) +/* show what optional capabilities we have */ +void iwl_option_config(struct iwl_priv *priv) { - dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG " #ifdef CONFIG_IWLWIFI_DEBUG - "enabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n"); #else - "disabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n"); #endif - dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS " + #ifdef CONFIG_IWLWIFI_DEBUGFS - "enabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n"); #else - "disabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n"); #endif - dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING " + #ifdef CONFIG_IWLWIFI_DEVICE_TRACING - "enabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n"); #else - "disabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); #endif - dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE " #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - "enabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n"); #else - "disabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n"); #endif - dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P " + #ifdef CONFIG_IWLWIFI_P2P - "enabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n"); #else - "disabled\n"); + IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n"); #endif } static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg, const struct iwl_fw *fw) { - int err = 0; struct iwl_priv *priv; struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; @@ -1190,25 +1472,60 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, STATISTICS_NOTIFICATION, REPLY_TX, }; + int i; /************************ * 1. Allocating HW data ************************/ hw = iwl_alloc_all(); if (!hw) { - pr_err("%s: Cannot allocate network device\n", - cfg(trans)->name); - err = -ENOMEM; + pr_err("%s: Cannot allocate network device\n", cfg->name); goto out; } op_mode = hw->priv; op_mode->ops = &iwl_dvm_ops; priv = IWL_OP_MODE_GET_DVM(op_mode); - priv->shrd = trans->shrd; + priv->trans = trans; + priv->dev = trans->dev; + priv->cfg = cfg; priv->fw = fw; - /* TODO: remove fw from shared data later */ - priv->shrd->fw = fw; + + switch (priv->cfg->device_family) { + case IWL_DEVICE_FAMILY_1000: + case IWL_DEVICE_FAMILY_100: + priv->lib = &iwl1000_lib; + break; + case IWL_DEVICE_FAMILY_2000: + case IWL_DEVICE_FAMILY_105: + priv->lib = &iwl2000_lib; + break; + case IWL_DEVICE_FAMILY_2030: + case IWL_DEVICE_FAMILY_135: + priv->lib = &iwl2030_lib; + break; + case IWL_DEVICE_FAMILY_5000: + priv->lib = &iwl5000_lib; + break; + case IWL_DEVICE_FAMILY_5150: + priv->lib = &iwl5150_lib; + break; + case IWL_DEVICE_FAMILY_6000: + case IWL_DEVICE_FAMILY_6005: + case IWL_DEVICE_FAMILY_6000i: + case IWL_DEVICE_FAMILY_6050: + case IWL_DEVICE_FAMILY_6150: + priv->lib = &iwl6000_lib; + break; + case IWL_DEVICE_FAMILY_6030: + priv->lib = &iwl6030_lib; + break; + default: + break; + } + + if (WARN_ON(!priv->lib)) + goto out_free_hw; /* * Populate the state variables that the transport layer needs @@ -1217,87 +1534,90 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + if (!iwlwifi_mod_params.wd_disable) + trans_cfg.queue_watchdog_timeout = + priv->cfg->base_params->wd_timeout; + else + trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED; + trans_cfg.command_names = iwl_dvm_cmd_strings; ucode_flags = fw->ucode_capa.flags; #ifndef CONFIG_IWLWIFI_P2P - ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; + ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; #endif if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; + trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; + trans_cfg.n_queue_to_fifo = + ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo); } else { priv->sta_key_max_num = STA_KEY_MAX_NUM; trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo; + trans_cfg.n_queue_to_fifo = + ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); } /* Configure transport layer */ - iwl_trans_configure(trans(priv), &trans_cfg); + iwl_trans_configure(priv->trans, &trans_cfg); /* At this point both hw and priv are allocated. */ - SET_IEEE80211_DEV(priv->hw, trans(priv)->dev); + SET_IEEE80211_DEV(priv->hw, priv->trans->dev); - /* show what debugging capabilities we have */ - iwl_debug_config(priv); + iwl_option_config(priv); IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); /* is antenna coupling more than 35dB ? */ priv->bt_ant_couple_ok = - (iwlagn_mod_params.ant_coupling > + (iwlwifi_mod_params.ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? true : false; /* enable/disable bt channel inhibition */ - priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce; + priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce; IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", (priv->bt_ch_announce) ? "On" : "Off"); - if (iwl_alloc_traffic_mem(priv)) - IWL_ERR(priv, "Not enough memory to generate traffic log\n"); - /* these spin locks will be used in apm_ops.init and EEPROM access * we should init now */ - spin_lock_init(&trans(priv)->reg_lock); spin_lock_init(&priv->statistics.lock); /*********************** * 2. Read REV register ***********************/ IWL_INFO(priv, "Detected %s, REV=0x%X\n", - cfg(priv)->name, trans(priv)->hw_rev); + priv->cfg->name, priv->trans->hw_rev); - err = iwl_trans_start_hw(trans(priv)); - if (err) - goto out_free_traffic_mem; + if (iwl_trans_start_hw(priv->trans)) + goto out_free_hw; - /***************** - * 3. Read EEPROM - *****************/ - err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev); - /* Reset chip to save power until we load uCode during "up". */ - iwl_trans_stop_hw(trans(priv)); - if (err) { + /* Read the EEPROM */ + if (iwl_eeprom_init(priv, priv->trans->hw_rev)) { IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_free_traffic_mem; + goto out_free_hw; } - err = iwl_eeprom_check_version(priv); - if (err) + /* Reset chip to save power until we load uCode during "up". */ + iwl_trans_stop_hw(priv->trans, false); + + if (iwl_eeprom_check_version(priv)) goto out_free_eeprom; - err = iwl_eeprom_init_hw_params(priv); - if (err) + if (iwl_eeprom_init_hw_params(priv)) goto out_free_eeprom; /* extract MAC Address */ - iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr); + iwl_eeprom_get_mac(priv, priv->addresses[0].addr); IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); priv->hw->wiphy->addresses = priv->addresses; priv->hw->wiphy->n_addresses = 1; - num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS); + num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); if (num_mac > 1) { memcpy(priv->addresses[1].addr, priv->addresses[0].addr, ETH_ALEN); @@ -1310,7 +1630,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ************************/ iwl_set_hw_params(priv); - if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { + if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; /* @@ -1320,18 +1640,32 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; priv->sta_key_max_num = STA_KEY_MAX_NUM; trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo; + trans_cfg.n_queue_to_fifo = + ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); /* Configure transport layer again*/ - iwl_trans_configure(trans(priv), &trans_cfg); + iwl_trans_configure(priv->trans, &trans_cfg); } /******************* * 5. Setup priv *******************/ + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { + priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; + if (i < IWLAGN_FIRST_AMPDU_QUEUE && + i != IWL_DEFAULT_CMD_QUEUE_NUM && + i != IWL_IPAN_CMD_QUEUE_NUM) + priv->queue_to_mac80211[i] = i; + atomic_set(&priv->queue_stop_count[i], 0); + } + + WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] != + IWLAGN_CMD_FIFO_NUM); - err = iwl_init_drv(priv); - if (err) + if (iwl_init_drv(priv)) goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ /******************** @@ -1364,15 +1698,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * * 7. Setup and register with mac80211 and debugfs **************************************************/ - err = iwlagn_mac_setup_register(priv, &fw->ucode_capa); - if (err) + if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) goto out_destroy_workqueue; - err = iwl_dbgfs_register(priv, DRV_NAME); - if (err) + if (iwl_dbgfs_register(priv, DRV_NAME)) IWL_ERR(priv, - "failed to create debugfs files. Ignoring error: %d\n", - err); + "failed to create debugfs files. Ignoring error\n"); return op_mode; @@ -1381,16 +1712,15 @@ out_destroy_workqueue: priv->workqueue = NULL; iwl_uninit_drv(priv); out_free_eeprom: - iwl_eeprom_free(priv->shrd); -out_free_traffic_mem: - iwl_free_traffic_mem(priv); + iwl_eeprom_free(priv); +out_free_hw: ieee80211_free_hw(priv->hw); out: op_mode = NULL; return op_mode; } -static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) +void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); @@ -1405,9 +1735,9 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) /*This will stop the queues, move the device to low power state */ priv->ucode_loaded = false; - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(priv->trans); - iwl_eeprom_free(priv->shrd); + iwl_eeprom_free(priv); /*netif_stop_queue(dev); */ flush_workqueue(priv->workqueue); @@ -1417,69 +1747,562 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) * until now... */ destroy_workqueue(priv->workqueue); priv->workqueue = NULL; - iwl_free_traffic_mem(priv); iwl_uninit_drv(priv); dev_kfree_skb(priv->beacon_skb); + iwl_trans_stop_hw(priv->trans, true); ieee80211_free_hw(priv->hw); } -static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) +static const char * const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT", + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + struct iwl_trans *trans = priv->trans; + u32 base; + struct iwl_error_event_table table; + + base = priv->device_pointers.error_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = priv->fw->init_errlog_ptr; + } else { + if (!base) + base = priv->fw->inst_errlog_ptr; + } + + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, + (priv->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return; + } + + /*TODO: Update dbgfs with ISR error stats obtained below */ + iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); + + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { + IWL_ERR(trans, "Start IWL Error Log Dump:\n"); + IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", + priv->status, table.valid); + } + + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, + table.data1, table.data2, table.line, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.ucode_ver, + table.hw_ver, table.brd_ver); + IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(priv, "0x%08X | uPc\n", table.pc); + IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(priv, "0x%08X | data1\n", table.data1); + IWL_ERR(priv, "0x%08X | data2\n", table.data2); + IWL_ERR(priv, "0x%08X | line\n", table.line); + IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver); + IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd); + IWL_ERR(priv, "0x%08X | isr0\n", table.isr0); + IWL_ERR(priv, "0x%08X | isr1\n", table.isr1); + IWL_ERR(priv, "0x%08X | isr2\n", table.isr2); + IWL_ERR(priv, "0x%08X | isr3\n", table.isr3); + IWL_ERR(priv, "0x%08X | isr4\n", table.isr4); + IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event); + IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control); + IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration); + IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); + IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); + IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); + IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp); + IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + */ +static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + struct iwl_trans *trans = priv->trans; + + if (num_events == 0) + return pos; + + base = priv->device_pointers.log_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + if (!base) + base = priv->fw->init_evtlog_ptr; + } else { + if (!base) + base = priv->fw->inst_evtlog_ptr; + } + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&trans->reg_lock, reg_flags); + if (unlikely(!iwl_grab_nic_access(trans))) + goto out_unlock; + + /* Set starting address; reads will auto-increment */ + iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + time = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_dev_ucode_event(trans->dev, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_dev_ucode_event(trans->dev, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(trans); +out_unlock: + spin_unlock_irqrestore(&trans->reg_lock, reg_flags); + return pos; +} + +/** + * iwl_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + u32 logsize; + int pos = 0; + size_t bufsz = 0; + struct iwl_trans *trans = priv->trans; + + base = priv->device_pointers.log_event_table; + if (priv->cur_ucode == IWL_UCODE_INIT) { + logsize = priv->fw->init_evtlog_size; + if (!base) + base = priv->fw->init_evtlog_ptr; + } else { + logsize = priv->fw->inst_evtlog_size; + if (!base) + base = priv->fw->inst_evtlog_ptr; + } + + if (!iwlagn_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, + (priv->cur_ucode == IWL_UCODE_INIT) + ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_read_targ_mem(trans, base); + mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32))); + num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32))); + next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32))); + + if (capacity > logsize) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d " + "entries\n", capacity, logsize); + capacity = logsize; + } + + if (next_entry > logsize) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, logsize); + next_entry = logsize; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#endif + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) +{ + unsigned int reload_msec; + unsigned long reload_jiffies; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) + iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); +#endif + + /* uCode is no longer loaded. */ + priv->ucode_loaded = false; + + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + iwl_abort_notification_waits(&priv->notif_wait); + + /* Keep the restart process from trying to send host + * commands by clearing the ready bit */ + clear_bit(STATUS_READY, &priv->status); + + wake_up(&priv->trans->wait_command_queue); + + if (!ondemand) { + /* + * If firmware keep reloading, then it indicate something + * serious wrong and firmware having problem to recover + * from it. Instead of keep trying which will fill the syslog + * and hang the system, let's just stop it + */ + reload_jiffies = jiffies; + reload_msec = jiffies_to_msecs((long) reload_jiffies - + (long) priv->reload_jiffies); + priv->reload_jiffies = reload_jiffies; + if (reload_msec <= IWL_MIN_RELOAD_DURATION) { + priv->reload_count++; + if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) { + IWL_ERR(priv, "BUG_ON, Stop restarting\n"); + return; + } + } else + priv->reload_count = 0; + } + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + if (iwlwifi_mod_params.restart_fw) { + IWL_DEBUG_FW_ERRORS(priv, + "Restarting adapter due to uCode error.\n"); + queue_work(priv->workqueue, &priv->restart); + } else + IWL_DEBUG_FW_ERRORS(priv, + "Detected FW error, but not restarting\n"); + } +} + +void iwl_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + IWL_ERR(priv, "Loaded firmware version: %s\n", + priv->fw->fw_version); + + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv, false, NULL, false); + + iwlagn_fw_error(priv, false); +} + +void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); if (!iwl_check_for_ct_kill(priv)) { IWL_ERR(priv, "Restarting adapter queue is full\n"); - iwl_nic_error(op_mode); + iwlagn_fw_error(priv, false); } } -static void iwl_nic_config(struct iwl_op_mode *op_mode) +void iwl_nic_config(struct iwl_op_mode *op_mode) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - cfg(priv)->lib->nic_config(priv); + priv->lib->nic_config(priv); } -static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac) +static void iwl_wimax_active(struct iwl_op_mode *op_mode) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - set_bit(ac, &priv->transport_queue_stop); - ieee80211_stop_queue(priv->hw, ac); + clear_bit(STATUS_READY, &priv->status); + IWL_ERR(priv, "RF is used by WiMAX\n"); } -static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac) +void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + int mq = priv->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; - clear_bit(ac, &priv->transport_queue_stop); + if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { + IWL_DEBUG_TX_QUEUES(priv, + "queue %d (mac80211 %d) already stopped\n", + queue, mq); + return; + } + + set_bit(mq, &priv->transport_queue_stop); + ieee80211_stop_queue(priv->hw, mq); +} + +void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + int mq = priv->queue_to_mac80211[queue]; + + if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) + return; + + if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { + IWL_DEBUG_TX_QUEUES(priv, + "queue %d (mac80211 %d) already awake\n", + queue, mq); + return; + } + + clear_bit(mq, &priv->transport_queue_stop); if (!priv->passive_no_rx) - ieee80211_wake_queue(priv->hw, ac); + ieee80211_wake_queue(priv->hw, mq); } void iwlagn_lift_passive_no_rx(struct iwl_priv *priv) { - int ac; + int mq; if (!priv->passive_no_rx) return; - for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) { - if (!test_bit(ac, &priv->transport_queue_stop)) { - IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d"); - ieee80211_wake_queue(priv->hw, ac); + for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { + if (!test_bit(mq, &priv->transport_queue_stop)) { + IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq); + ieee80211_wake_queue(priv->hw, mq); } else { - IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d"); + IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq); } } priv->passive_no_rx = false; } +void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); + dev_kfree_skb_any(skb); +} + +void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) +{ + struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + + if (state) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); +} + const struct iwl_op_mode_ops iwl_dvm_ops = { .start = iwl_op_mode_dvm_start, .stop = iwl_op_mode_dvm_stop, @@ -1491,6 +2314,7 @@ const struct iwl_op_mode_ops iwl_dvm_ops = { .nic_error = iwl_nic_error, .cmd_queue_full = iwl_cmd_queue_full, .nic_config = iwl_nic_config, + .wimax_active = iwl_wimax_active, }; /***************************************************************************** @@ -1541,96 +2365,3 @@ static void __exit iwl_exit(void) module_exit(iwl_exit); module_init(iwl_init); - -#ifdef CONFIG_IWLWIFI_DEBUG -module_param_named(debug, iwlagn_mod_params.debug_level, uint, - S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif - -module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); -MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO); -MODULE_PARM_DESC(11n_disable, - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); -module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, - int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); -module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); - -module_param_named(ucode_alternative, - iwlagn_mod_params.wanted_ucode_alternative, - int, S_IRUGO); -MODULE_PARM_DESC(ucode_alternative, - "specify ucode alternative to use from ucode file"); - -module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling, - int, S_IRUGO); -MODULE_PARM_DESC(antenna_coupling, - "specify antenna coupling in dB (defualt: 0 dB)"); - -module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce, - bool, S_IRUGO); -MODULE_PARM_DESC(bt_ch_inhibition, - "Enable BT channel inhibition (default: enable)"); - -module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO); -MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); - -module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); -MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); - -module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO); -MODULE_PARM_DESC(wd_disable, - "Disable stuck queue watchdog timer 0=system default, " - "1=disable, 2=enable (default: 0)"); - -/* - * set bt_coex_active to true, uCode will do kill/defer - * every time the priority line is asserted (BT is sending signals on the - * priority line in the PCIx). - * set bt_coex_active to false, uCode will ignore the BT activity and - * perform the normal operation - * - * User might experience transmit issue on some platform due to WiFi/BT - * co-exist problem. The possible behaviors are: - * Able to scan and finding all the available AP - * Not able to associate with any AP - * On those platforms, WiFi communication can be restored by set - * "bt_coex_active" module parameter to "false" - * - * default: bt_coex_active = true (BT_COEX_ENABLE) - */ -module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active, - bool, S_IRUGO); -MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); - -module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO); -MODULE_PARM_DESC(led_mode, "0=system default, " - "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); - -module_param_named(power_save, iwlagn_mod_params.power_save, - bool, S_IRUGO); -MODULE_PARM_DESC(power_save, - "enable WiFi power management (default: disable)"); - -module_param_named(power_level, iwlagn_mod_params.power_level, - int, S_IRUGO); -MODULE_PARM_DESC(power_level, - "default power save level (range from 1 - 5, default: 1)"); - -module_param_named(auto_agg, iwlagn_mod_params.auto_agg, - bool, S_IRUGO); -MODULE_PARM_DESC(auto_agg, - "enable agg w/o check traffic load (default: enable)"); - -/* - * For now, keep using power level 1 instead of automatically - * adjusting ... - */ -module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust, - bool, S_IRUGO); -MODULE_PARM_DESC(no_sleep_autoadjust, - "don't automatically adjust sleep level " - "according to maximum network latency (default: true)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 3780a03f271..79c0fe06f4d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -64,6 +64,43 @@ #define __iwl_agn_h__ #include "iwl-dev.h" +#include "iwl-config.h" + +/* The first 11 queues (0-10) are used otherwise */ +#define IWLAGN_FIRST_AMPDU_QUEUE 11 + +/* AUX (TX during scan dwell) queue */ +#define IWL_AUX_QUEUE 10 + +/* device operations */ +extern struct iwl_lib_ops iwl1000_lib; +extern struct iwl_lib_ops iwl2000_lib; +extern struct iwl_lib_ops iwl2030_lib; +extern struct iwl_lib_ops iwl5000_lib; +extern struct iwl_lib_ops iwl5150_lib; +extern struct iwl_lib_ops iwl6000_lib; +extern struct iwl_lib_ops iwl6030_lib; + + +#define TIME_UNIT 1024 + +/***************************************************** +* DRIVER STATUS FUNCTIONS +******************************************************/ +#define STATUS_RF_KILL_HW 0 +#define STATUS_CT_KILL 1 +#define STATUS_ALIVE 2 +#define STATUS_READY 3 +#define STATUS_GEO_CONFIGURED 4 +#define STATUS_EXIT_PENDING 5 +#define STATUS_STATISTICS 6 +#define STATUS_SCANNING 7 +#define STATUS_SCAN_ABORTING 8 +#define STATUS_SCAN_HW 9 +#define STATUS_FW_ERROR 10 +#define STATUS_CHANNEL_SWITCH_PENDING 11 +#define STATUS_SCAN_COMPLETE 12 +#define STATUS_POWER_PMI 13 struct iwl_ucode_capabilities; @@ -80,12 +117,9 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) void iwl_down(struct iwl_priv *priv); void iwl_cancel_deferred_work(struct iwl_priv *priv); void iwlagn_prepare_restart(struct iwl_priv *priv); -void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb); int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); -void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state); -void iwl_nic_error(struct iwl_op_mode *op_mode); bool iwl_check_for_ct_kill(struct iwl_priv *priv); @@ -103,6 +137,8 @@ int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, const void *data); /* RXON */ +void iwl_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); int iwlagn_set_pan_params(struct iwl_priv *priv); int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); @@ -113,11 +149,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, u32 changes); void iwlagn_config_ht40(struct ieee80211_conf *conf, struct iwl_rxon_context *ctx); +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); +void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx); +void iwl_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif); /* uCode */ -int iwlagn_rx_calib_result(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); void iwl_send_prio_tbl(struct iwl_priv *priv); int iwl_init_alive_start(struct iwl_priv *priv); @@ -128,14 +168,25 @@ int iwl_send_calib_results(struct iwl_priv *priv); int iwl_calib_set(struct iwl_priv *priv, const struct iwl_calib_hdr *cmd, int len); void iwl_calib_free_results(struct iwl_priv *priv); +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display); +int iwlagn_hw_valid_rtc_data_addr(u32 addr); /* lib */ int iwlagn_send_tx_power(struct iwl_priv *priv); void iwlagn_temperature(struct iwl_priv *priv); -u16 iwl_eeprom_calib_version(struct iwl_shared *shrd); int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); int iwlagn_send_beacon_cmd(struct iwl_priv *priv); +int iwl_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); + +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} + #ifdef CONFIG_PM_SLEEP int iwlagn_send_patterns(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); @@ -145,6 +196,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); /* rx */ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); void iwl_setup_rx_handlers(struct iwl_priv *priv); +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); /* tx */ @@ -189,6 +241,31 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); /* scan */ void iwlagn_post_scan(struct iwl_priv *priv); void iwlagn_disable_roc(struct iwl_priv *priv); +int iwl_force_rf_reset(struct iwl_priv *priv, bool external); +void iwl_init_scan_params(struct iwl_priv *priv); +int iwl_scan_cancel(struct iwl_priv *priv); +void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +void iwl_force_scan_end(struct iwl_priv *priv); +void iwl_internal_short_hw_scan(struct iwl_priv *priv); +void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); +void iwl_setup_scan_deferred_work(struct iwl_priv *priv); +void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); +int __must_check iwl_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum iwl_scan_type scan_type, + enum ieee80211_band band); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) + /* bt coex */ void iwlagn_send_advance_bt_config(struct iwl_priv *priv); @@ -201,6 +278,12 @@ void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); +static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) +{ + return priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist; +} + #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_get_tx_fail_reason(u32 status); const char *iwl_get_agg_tx_fail_reason(u16 status); @@ -239,8 +322,6 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, bool is_ap, struct ieee80211_sta *sta); -void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - u8 sta_id, struct iwl_link_quality_cmd *link_cmd); int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_link_quality_cmd *lq, u8 flags, bool init); int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, @@ -248,6 +329,9 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_sta *sta); +bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap); static inline int iwl_sta_id(struct ieee80211_sta *sta) { @@ -305,9 +389,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) return cpu_to_le32(flags|(u32)rate); } -/* eeprom */ -void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac); - extern int iwl_alive_start(struct iwl_priv *priv); /* svtool */ #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE @@ -386,13 +467,35 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv) return iwl_is_ready(priv); } +static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) +{ + if (state) + set_bit(STATUS_POWER_PMI, &priv->status); + else + clear_bit(STATUS_POWER_PMI, &priv->status); + iwl_trans_set_pmi(priv->trans, state); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); +void iwl_dbgfs_unregister(struct iwl_priv *priv); +#else +static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + return 0; +} +static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) +{ +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + #ifdef CONFIG_IWLWIFI_DEBUG #define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ do { \ if (!iwl_is_rfkill((m))) \ IWL_ERR(m, fmt, ##args); \ else \ - __iwl_err(trans(m)->dev, true, \ + __iwl_err((m)->dev, true, \ !iwl_have_debug_level(IWL_DL_RADIO), \ fmt, ##args); \ } while (0) @@ -402,8 +505,98 @@ do { \ if (!iwl_is_rfkill((m))) \ IWL_ERR(m, fmt, ##args); \ else \ - __iwl_err(trans(m)->dev, true, true, fmt, ##args); \ + __iwl_err((m)->dev, true, true, fmt, ##args); \ } while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ +extern const char *iwl_dvm_cmd_strings[REPLY_MAX]; + +static inline const char *iwl_dvm_get_cmd_string(u8 cmd) +{ + const char *s = iwl_dvm_cmd_strings[cmd]; + if (s) + return s; + return "UNKNOWN"; +} + +/* API method exported for mvm hybrid state */ +void iwl_setup_deferred_work(struct iwl_priv *priv); +int iwl_send_wimax_coex(struct iwl_priv *priv); +int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); +void iwl_option_config(struct iwl_priv *priv); +void iwl_set_hw_params(struct iwl_priv *priv); +void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags); +int iwl_init_drv(struct iwl_priv *priv); +void iwl_uninit_drv(struct iwl_priv *priv); +void iwl_send_bt_config(struct iwl_priv *priv); +void iwl_rf_kill_ct_config(struct iwl_priv *priv); +int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +void iwl_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change); +int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx); +void iwlagn_check_needed_chains(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_bss_conf *bss_conf); +void iwlagn_chain_noise_reset(struct iwl_priv *priv); +int iwlagn_update_beacon(struct iwl_priv *priv, + struct ieee80211_vif *vif); +void iwl_tt_handler(struct iwl_priv *priv); +void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode); +void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue); +void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state); +void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb); +void iwl_nic_error(struct iwl_op_mode *op_mode); +void iwl_cmd_queue_full(struct iwl_op_mode *op_mode); +void iwl_nic_config(struct iwl_op_mode *op_mode); +int iwlagn_mac_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set); +void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event); +int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw); +int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw); +void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop); +void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue); +void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); +int iwlagn_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); +int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); +int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta); +void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params); +void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); +void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwlagn_mac_stop(struct ieee80211_hw *hw); +void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); #endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 9ed73e5154b..83a6930f365 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -1877,9 +1877,16 @@ struct iwl_bt_cmd { #define IWLAGN_BT3_T7_DEFAULT 1 +enum iwl_bt_kill_idx { + IWL_BT_KILL_DEFAULT = 0, + IWL_BT_KILL_OVERRIDE = 1, + IWL_BT_KILL_REDUCE = 2, +}; + #define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000) #define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000) #define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff) +#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0) #define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 @@ -1891,7 +1898,7 @@ struct iwl_bt_cmd { #define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3)) #define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4)) #define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5)) -#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6)) +#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6)) #define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7)) #define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \ @@ -1900,9 +1907,11 @@ struct iwl_bt_cmd { IWLAGN_BT_VALID_3W_TIMERS | \ IWLAGN_BT_VALID_KILL_ACK_MASK | \ IWLAGN_BT_VALID_KILL_CTS_MASK | \ - IWLAGN_BT_VALID_BT4_TIMES | \ + IWLAGN_BT_VALID_REDUCED_TX_PWR | \ IWLAGN_BT_VALID_3W_LUT) +#define IWLAGN_BT_DECISION_LUT_SIZE 12 + struct iwl_basic_bt_cmd { u8 flags; u8 ledtime; /* unused */ @@ -1913,12 +1922,13 @@ struct iwl_basic_bt_cmd { u8 bt3_prio_sample_time; u8 bt3_timer_t2_value; __le16 bt4_reaction_time; /* unused */ - __le32 bt3_lookup_table[12]; - __le16 bt4_decision_time; /* unused */ + __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE]; + u8 reduce_txpower; + u8 reserved; __le16 valid; }; -struct iwl6000_bt_cmd { +struct iwl_bt_cmd_v1 { struct iwl_basic_bt_cmd basic; u8 prio_boost; /* @@ -1929,7 +1939,7 @@ struct iwl6000_bt_cmd { __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ }; -struct iwl2000_bt_cmd { +struct iwl_bt_cmd_v2 { struct iwl_basic_bt_cmd basic; __le32 prio_boost; /* @@ -3634,6 +3644,9 @@ enum iwl_bt_coex_profile_traffic_load { (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS) +#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62) +#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) + struct iwl_bt_uart_msg { u8 header; u8 frame1; diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-config.h index b515d657a0a..67b28aa7f9b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-shared.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -60,136 +60,29 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#ifndef __iwl_shared_h__ -#define __iwl_shared_h__ +#ifndef __IWL_CONFIG_H__ +#define __IWL_CONFIG_H__ #include <linux/types.h> -#include <linux/spinlock.h> -#include <linux/gfp.h> #include <net/mac80211.h> -#include "iwl-commands.h" -#include "iwl-fw.h" -/** - * DOC: shared area - role and goal - * - * The shared area contains all the data exported by the upper layer to the - * other layers. Since the bus and transport layer shouldn't dereference - * iwl_priv, all the data needed by the upper layer and the transport / bus - * layer must be here. - * The shared area also holds pointer to all the other layers. This allows a - * layer to call a function from another layer. - * - * NOTE: All the layers hold a pointer to the shared area which must be shrd. - * A few macros assume that (_m)->shrd points to the shared area no matter - * what _m is. - * - * gets notifications about enumeration, suspend, resume. - * For the moment, the bus layer is not a linux kernel module as itself, and - * the module_init function of the driver must call the bus specific - * registration functions. These functions are listed at the end of this file. - * For the moment, there is only one implementation of this interface: PCI-e. - * This implementation is iwl-pci.c - */ - -struct iwl_priv; -struct iwl_trans; -struct iwl_sensitivity_ranges; -struct iwl_trans_ops; - -#define DRV_NAME "iwlwifi" -#define IWLWIFI_VERSION "in-tree:" -#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation" -#define DRV_AUTHOR "<ilw@linux.intel.com>" - -extern struct iwl_mod_params iwlagn_mod_params; - -#define IWL_DISABLE_HT_ALL BIT(0) -#define IWL_DISABLE_HT_TXAGG BIT(1) -#define IWL_DISABLE_HT_RXAGG BIT(2) - -/** - * struct iwl_mod_params - * - * Holds the module parameters - * - * @sw_crypto: using hardware encryption, default = 0 - * @disable_11n: disable 11n capabilities, default = 0, - * use IWL_DISABLE_HT_* constants - * @amsdu_size_8K: enable 8K amsdu size, default = 1 - * @antenna: both antennas (use diversity), default = 0 - * @restart_fw: restart firmware, default = 1 - * @plcp_check: enable plcp health check, default = true - * @ack_check: disable ack health check, default = false - * @wd_disable: enable stuck queue check, default = 0 - * @bt_coex_active: enable bt coex, default = true - * @led_mode: system default, default = 0 - * @no_sleep_autoadjust: disable autoadjust, default = true - * @power_save: disable power save, default = false - * @power_level: power level, default = 1 - * @debug_level: levels are IWL_DL_* - * @ant_coupling: antenna coupling in dB, default = 0 - * @bt_ch_announce: BT channel inhibition, default = enable - * @wanted_ucode_alternative: ucode alternative to use, default = 1 - * @auto_agg: enable agg. without check, default = true - */ -struct iwl_mod_params { - int sw_crypto; - unsigned int disable_11n; - int amsdu_size_8K; - int antenna; - int restart_fw; - bool plcp_check; - bool ack_check; - int wd_disable; - bool bt_coex_active; - int led_mode; - bool no_sleep_autoadjust; - bool power_save; - int power_level; - u32 debug_level; - int ant_coupling; - bool bt_ch_announce; - int wanted_ucode_alternative; - bool auto_agg; -}; - -/** - * struct iwl_hw_params - * - * Holds the module parameters - * - * @num_ampdu_queues: num of ampdu queues - * @tx_chains_num: Number of TX chains - * @rx_chains_num: Number of RX chains - * @valid_tx_ant: usable antennas for TX - * @valid_rx_ant: usable antennas for RX - * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX) - * @sku: sku read from EEPROM - * @rx_page_order: Rx buffer page order - * @ct_kill_threshold: temperature threshold - in hw dependent unit - * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit - * relevant for 1000, 6000 and up - * @wd_timeout: TX queues watchdog timeout - * @struct iwl_sensitivity_ranges: range of sensitivity values - * @use_rts_for_aggregation: use rts/cts protection for HT traffic - */ -struct iwl_hw_params { - u8 num_ampdu_queues; - u8 tx_chains_num; - u8 rx_chains_num; - u8 valid_tx_ant; - u8 valid_rx_ant; - u8 ht40_channel; - bool use_rts_for_aggregation; - u16 sku; - u32 rx_page_order; - u32 ct_kill_threshold; - u32 ct_kill_exit_threshold; - unsigned int wd_timeout; - - const struct iwl_sensitivity_ranges *sens; +enum iwl_device_family { + IWL_DEVICE_FAMILY_UNDEFINED, + IWL_DEVICE_FAMILY_1000, + IWL_DEVICE_FAMILY_100, + IWL_DEVICE_FAMILY_2000, + IWL_DEVICE_FAMILY_2030, + IWL_DEVICE_FAMILY_105, + IWL_DEVICE_FAMILY_135, + IWL_DEVICE_FAMILY_5000, + IWL_DEVICE_FAMILY_5150, + IWL_DEVICE_FAMILY_6000, + IWL_DEVICE_FAMILY_6000i, + IWL_DEVICE_FAMILY_6005, + IWL_DEVICE_FAMILY_6030, + IWL_DEVICE_FAMILY_6050, + IWL_DEVICE_FAMILY_6150, }; /* @@ -209,6 +102,34 @@ enum iwl_led_mode { }; /* + * This is the threshold value of plcp error rate per 100mSecs. It is + * used to set and check for the validity of plcp_delta. + */ +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 +#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 +#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 +#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 +#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 +#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 + +/* TX queue watchdog timeouts in mSecs */ +#define IWL_WATCHHDOG_DISABLED 0 +#define IWL_DEF_WD_TIMEOUT 2000 +#define IWL_LONG_WD_TIMEOUT 10000 +#define IWL_MAX_WD_TIMEOUT 120000 + +/* Antenna presence definitions */ +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_C BIT(2) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_A | ANT_B | ANT_C) + + +/* * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according @@ -217,7 +138,6 @@ enum iwl_led_mode { * @chain_noise_num_beacons: number of beacons used to compute chain noise * @adv_thermal_throttle: support advance thermal throttle * @support_ct_kill_exit: support ct kill exit condition - * @support_wimax_coexist: support wimax/wifi co-exist * @plcp_delta_threshold: plcp error rate threshold used to trigger * radio tuning when there is a high receiving plcp error rate * @chain_noise_scale: default chain noise scale used for gain computation @@ -226,12 +146,10 @@ enum iwl_led_mode { * @shadow_reg_enable: HW shadhow register bit * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up * @no_idle_support: do not support idle mode - * wd_disable: disable watchdog timer */ struct iwl_base_params { int eeprom_size; int num_of_queues; /* def: HW dependent */ - int num_of_ampdu_queues;/* def: HW dependent */ /* for iwl_apm_init() */ u32 pll_cfg_val; @@ -240,7 +158,6 @@ struct iwl_base_params { u16 led_compensation; bool adv_thermal_throttle; bool support_ct_kill_exit; - const bool support_wimax_coexist; u8 plcp_delta_threshold; s32 chain_noise_scale; unsigned int wd_timeout; @@ -248,7 +165,6 @@ struct iwl_base_params { const bool shadow_reg_enable; const bool hd_v2; const bool no_idle_support; - const bool wd_disable; }; /* @@ -292,28 +208,21 @@ struct iwl_ht_params { * @eeprom_ver: EEPROM version * @eeprom_calib_ver: EEPROM calibration version * @lib: pointer to the lib ops - * @additional_nic_config: additional nic configuration * @base_params: pointer to basic parameters * @ht_params: point to ht patameters * @bt_params: pointer to bt parameters * @need_temp_offset_calib: need to perform temperature offset calibration * @no_xtal_calib: some devices do not need crystal calibration data, * don't send it to those - * @scan_rx_antennas: available antenna for scan operation * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @adv_pm: advance power management * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device - * @iq_invert: I/Q inversion * @temp_offset_v2: support v2 of temperature offset calibration * - * We enable the driver to be backward compatible wrt API version. The - * driver specifies which APIs it supports (with @ucode_api_max being the - * highest and @ucode_api_min the lowest). Firmware will only be loaded if - * it has a supported API version. - * - * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. + * We enable the driver to be backward compatible wrt. hardware features. + * API differences in uCode shouldn't be handled here but through TLVs + * and/or the uCode API version instead. */ struct iwl_cfg { /* params specific to an individual device within a device family */ @@ -322,14 +231,13 @@ struct iwl_cfg { const unsigned int ucode_api_max; const unsigned int ucode_api_ok; const unsigned int ucode_api_min; + const enum iwl_device_family device_family; const u32 max_data_size; const u32 max_inst_size; u8 valid_tx_ant; u8 valid_rx_ant; u16 eeprom_ver; u16 eeprom_calib_ver; - const struct iwl_lib_ops *lib; - void (*additional_nic_config)(struct iwl_priv *priv); /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ @@ -337,99 +245,11 @@ struct iwl_cfg { const struct iwl_bt_params *bt_params; const bool need_temp_offset_calib; /* if used set to true */ const bool no_xtal_calib; - u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; enum iwl_led_mode led_mode; const bool adv_pm; const bool rx_with_siso_diversity; const bool internal_wimax_coex; - const bool iq_invert; const bool temp_offset_v2; }; -/** - * struct iwl_shared - shared fields for all the layers of the driver - * - * @status: STATUS_* - * @wowlan: are we running wowlan uCode - * @valid_contexts: microcode/device supports multiple contexts - * @bus: pointer to the bus layer data - * @cfg: see struct iwl_cfg - * @priv: pointer to the upper layer data - * @trans: pointer to the transport layer data - * @nic: pointer to the nic data - * @hw_params: see struct iwl_hw_params - * @lock: protect general shared data - * @eeprom: pointer to the eeprom/OTP image - * @ucode_type: indicator of loaded ucode image - * @device_pointers: pointers to ucode event tables - */ -struct iwl_shared { - unsigned long status; - u8 valid_contexts; - - const struct iwl_cfg *cfg; - struct iwl_trans *trans; - void *drv; - struct iwl_hw_params hw_params; - const struct iwl_fw *fw; - - /* eeprom -- this is in the card's little endian byte order */ - u8 *eeprom; - - /* ucode related variables */ - enum iwl_ucode_type ucode_type; - - struct { - u32 error_event_table; - u32 log_event_table; - } device_pointers; - -}; - -/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */ -#define cfg(_m) ((_m)->shrd->cfg) -#define trans(_m) ((_m)->shrd->trans) -#define hw_params(_m) ((_m)->shrd->hw_params) - -static inline bool iwl_have_debug_level(u32 level) -{ - return iwlagn_mod_params.debug_level & level; -} - -enum iwl_rxon_context_id { - IWL_RXON_CTX_BSS, - IWL_RXON_CTX_PAN, - - NUM_IWL_RXON_CTX -}; - -int iwlagn_hw_valid_rtc_data_addr(u32 addr); -const char *get_cmd_string(u8 cmd); - -#define IWL_CMD(x) case x: return #x - -/***************************************************** -* DRIVER STATUS FUNCTIONS -******************************************************/ -#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ -/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ -#define STATUS_INT_ENABLED 2 -#define STATUS_RF_KILL_HW 3 -#define STATUS_CT_KILL 4 -#define STATUS_INIT 5 -#define STATUS_ALIVE 6 -#define STATUS_READY 7 -#define STATUS_TEMPERATURE 8 -#define STATUS_GEO_CONFIGURED 9 -#define STATUS_EXIT_PENDING 10 -#define STATUS_STATISTICS 12 -#define STATUS_SCANNING 13 -#define STATUS_SCAN_ABORTING 14 -#define STATUS_SCAN_HW 15 -#define STATUS_POWER_PMI 16 -#define STATUS_FW_ERROR 17 -#define STATUS_DEVICE_ENABLED 18 -#define STATUS_CHANNEL_SWITCH_PENDING 19 -#define STATUS_SCAN_COMPLETE 20 - -#endif /* #__iwl_shared_h__ */ +#endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c deleted file mode 100644 index 46490d3b95b..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ /dev/null @@ -1,1480 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <net/mac80211.h> - -#include "iwl-eeprom.h" -#include "iwl-debug.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-power.h" -#include "iwl-shared.h" -#include "iwl-agn.h" -#include "iwl-trans.h" - -const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -static void iwl_init_ht_hw_capab(const struct iwl_priv *priv, - struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) -{ - u16 max_bit_rate = 0; - u8 rx_chains_num = hw_params(priv).rx_chains_num; - u8 tx_chains_num = hw_params(priv).tx_chains_num; - - ht_info->cap = 0; - memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); - - ht_info->ht_supported = true; - - if (cfg(priv)->ht_params && - cfg(priv)->ht_params->ht_greenfield_support) - ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; - max_bit_rate = MAX_BIT_RATE_20_MHZ; - if (hw_params(priv).ht40_channel & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - ht_info->mcs.rx_mask[4] = 0x01; - max_bit_rate = MAX_BIT_RATE_40_MHZ; - } - - if (iwlagn_mod_params.amsdu_size_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - - ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; - ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; - - ht_info->mcs.rx_mask[0] = 0xFF; - if (rx_chains_num >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains_num >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; - - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains_num; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains_num != rx_chains_num) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= ((tx_chains_num - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); - } -} - -/** - * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom - */ -int iwl_init_geos(struct iwl_priv *priv) -{ - struct iwl_channel_info *ch; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *channels; - struct ieee80211_channel *geo_ch; - struct ieee80211_rate *rates; - int i = 0; - s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN; - - if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || - priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { - IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); - set_bit(STATUS_GEO_CONFIGURED, &priv->status); - return 0; - } - - channels = kcalloc(priv->channel_count, - sizeof(struct ieee80211_channel), GFP_KERNEL); - if (!channels) - return -ENOMEM; - - rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate), - GFP_KERNEL); - if (!rates) { - kfree(channels); - return -ENOMEM; - } - - /* 5.2GHz channels start after the 2.4GHz channels */ - sband = &priv->bands[IEEE80211_BAND_5GHZ]; - sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; - /* just OFDM */ - sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; - sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; - - if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) - iwl_init_ht_hw_capab(priv, &sband->ht_cap, - IEEE80211_BAND_5GHZ); - - sband = &priv->bands[IEEE80211_BAND_2GHZ]; - sband->channels = channels; - /* OFDM & CCK */ - sband->bitrates = rates; - sband->n_bitrates = IWL_RATE_COUNT_LEGACY; - - if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) - iwl_init_ht_hw_capab(priv, &sband->ht_cap, - IEEE80211_BAND_2GHZ); - - priv->ieee_channels = channels; - priv->ieee_rates = rates; - - for (i = 0; i < priv->channel_count; i++) { - ch = &priv->channel_info[i]; - - /* FIXME: might be removed if scan is OK */ - if (!is_channel_valid(ch)) - continue; - - sband = &priv->bands[ch->band]; - - geo_ch = &sband->channels[sband->n_channels++]; - - geo_ch->center_freq = - ieee80211_channel_to_frequency(ch->channel, ch->band); - geo_ch->max_power = ch->max_power_avg; - geo_ch->max_antenna_gain = 0xff; - geo_ch->hw_value = ch->channel; - - if (is_channel_valid(ch)) { - if (!(ch->flags & EEPROM_CHANNEL_IBSS)) - geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; - - if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) - geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; - - if (ch->flags & EEPROM_CHANNEL_RADAR) - geo_ch->flags |= IEEE80211_CHAN_RADAR; - - geo_ch->flags |= ch->ht40_extension_channel; - - if (ch->max_power_avg > max_tx_power) - max_tx_power = ch->max_power_avg; - } else { - geo_ch->flags |= IEEE80211_CHAN_DISABLED; - } - - IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", - ch->channel, geo_ch->center_freq, - is_channel_a_band(ch) ? "5.2" : "2.4", - geo_ch->flags & IEEE80211_CHAN_DISABLED ? - "restricted" : "valid", - geo_ch->flags); - } - - priv->tx_power_device_lmt = max_tx_power; - priv->tx_power_user_lmt = max_tx_power; - priv->tx_power_next = max_tx_power; - - if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && - hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) { - IWL_INFO(priv, "Incorrectly detected BG card as ABG. " - "Please send your %s to maintainer.\n", - trans(priv)->hw_id_str); - hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; - } - - IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", - priv->bands[IEEE80211_BAND_2GHZ].n_channels, - priv->bands[IEEE80211_BAND_5GHZ].n_channels); - - set_bit(STATUS_GEO_CONFIGURED, &priv->status); - - return 0; -} - -/* - * iwl_free_geos - undo allocations in iwl_init_geos - */ -void iwl_free_geos(struct iwl_priv *priv) -{ - kfree(priv->ieee_channels); - kfree(priv->ieee_rates); - clear_bit(STATUS_GEO_CONFIGURED, &priv->status); -} - -static bool iwl_is_channel_extension(struct iwl_priv *priv, - enum ieee80211_band band, - u16 channel, u8 extension_chan_offset) -{ - const struct iwl_channel_info *ch_info; - - ch_info = iwl_get_channel_info(priv, band, channel); - if (!is_channel_valid(ch_info)) - return false; - - if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) - return !(ch_info->ht40_extension_channel & - IEEE80211_CHAN_NO_HT40PLUS); - else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) - return !(ch_info->ht40_extension_channel & - IEEE80211_CHAN_NO_HT40MINUS); - - return false; -} - -bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap) -{ - if (!ctx->ht.enabled || !ctx->ht.is_40mhz) - return false; - - /* - * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 - * the bit will not set if it is pure 40MHz case - */ - if (ht_cap && !ht_cap->ht_supported) - return false; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (priv->disable_ht40) - return false; -#endif - - return iwl_is_channel_extension(priv, priv->band, - le16_to_cpu(ctx->staging.channel), - ctx->ht.extension_chan_offset); -} - -static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) -{ - u16 new_val; - u16 beacon_factor; - - /* - * If mac80211 hasn't given us a beacon interval, program - * the default into the device (not checking this here - * would cause the adjustment below to return the maximum - * value, which may break PAN.) - */ - if (!beacon_val) - return DEFAULT_BEACON_INTERVAL; - - /* - * If the beacon interval we obtained from the peer - * is too large, we'll have to wake up more often - * (and in IBSS case, we'll beacon too much) - * - * For example, if max_beacon_val is 4096, and the - * requested beacon interval is 7000, we'll have to - * use 3500 to be able to wake up on the beacons. - * - * This could badly influence beacon detection stats. - */ - - beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; - new_val = beacon_val / beacon_factor; - - if (!new_val) - new_val = max_beacon_val; - - return new_val; -} - -int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - u64 tsf; - s32 interval_tm, rem; - struct ieee80211_conf *conf = NULL; - u16 beacon_int; - struct ieee80211_vif *vif = ctx->vif; - - conf = &priv->hw->conf; - - lockdep_assert_held(&priv->mutex); - - memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); - - ctx->timing.timestamp = cpu_to_le64(priv->timestamp); - ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); - - beacon_int = vif ? vif->bss_conf.beacon_int : 0; - - /* - * TODO: For IBSS we need to get atim_window from mac80211, - * for now just always use 0 - */ - ctx->timing.atim_window = 0; - - if (ctx->ctxid == IWL_RXON_CTX_PAN && - (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && - iwl_is_associated(priv, IWL_RXON_CTX_BSS) && - priv->contexts[IWL_RXON_CTX_BSS].vif && - priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) { - ctx->timing.beacon_interval = - priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; - beacon_int = le16_to_cpu(ctx->timing.beacon_interval); - } else if (ctx->ctxid == IWL_RXON_CTX_BSS && - iwl_is_associated(priv, IWL_RXON_CTX_PAN) && - priv->contexts[IWL_RXON_CTX_PAN].vif && - priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int && - (!iwl_is_associated_ctx(ctx) || !ctx->vif || - !ctx->vif->bss_conf.beacon_int)) { - ctx->timing.beacon_interval = - priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval; - beacon_int = le16_to_cpu(ctx->timing.beacon_interval); - } else { - beacon_int = iwl_adjust_beacon_interval(beacon_int, - IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT); - ctx->timing.beacon_interval = cpu_to_le16(beacon_int); - } - - ctx->beacon_int = beacon_int; - - tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ - interval_tm = beacon_int * TIME_UNIT; - rem = do_div(tsf, interval_tm); - ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); - - ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; - - IWL_DEBUG_ASSOC(priv, - "beacon interval %d beacon timer %d beacon tim %d\n", - le16_to_cpu(ctx->timing.beacon_interval), - le32_to_cpu(ctx->timing.beacon_init_val), - le16_to_cpu(ctx->timing.atim_window)); - - return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, - CMD_SYNC, sizeof(ctx->timing), &ctx->timing); -} - -void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - int hw_decrypt) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - - if (hw_decrypt) - rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; - else - rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; - -} - -/* validate RXON structure is valid */ -int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - u32 errors = 0; - - if (rxon->flags & RXON_FLG_BAND_24G_MSK) { - if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { - IWL_WARN(priv, "check 2.4G: wrong narrow\n"); - errors |= BIT(0); - } - if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { - IWL_WARN(priv, "check 2.4G: wrong radar\n"); - errors |= BIT(1); - } - } else { - if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "check 5.2G: not short slot!\n"); - errors |= BIT(2); - } - if (rxon->flags & RXON_FLG_CCK_MSK) { - IWL_WARN(priv, "check 5.2G: CCK!\n"); - errors |= BIT(3); - } - } - if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { - IWL_WARN(priv, "mac/bssid mcast!\n"); - errors |= BIT(4); - } - - /* make sure basic rates 6Mbps and 1Mbps are supported */ - if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && - (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { - IWL_WARN(priv, "neither 1 nor 6 are basic\n"); - errors |= BIT(5); - } - - if (le16_to_cpu(rxon->assoc_id) > 2007) { - IWL_WARN(priv, "aid > 2007\n"); - errors |= BIT(6); - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { - IWL_WARN(priv, "CCK and short slot\n"); - errors |= BIT(7); - } - - if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) - == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { - IWL_WARN(priv, "CCK and auto detect"); - errors |= BIT(8); - } - - if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | - RXON_FLG_TGG_PROTECT_MSK)) == - RXON_FLG_TGG_PROTECT_MSK) { - IWL_WARN(priv, "TGg but no auto-detect\n"); - errors |= BIT(9); - } - - if (rxon->channel == 0) { - IWL_WARN(priv, "zero channel is invalid\n"); - errors |= BIT(10); - } - - WARN(errors, "Invalid RXON (%#x), channel %d", - errors, le16_to_cpu(rxon->channel)); - - return errors ? -EINVAL : 0; -} - -/** - * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed - * @priv: staging_rxon is compared to active_rxon - * - * If the RXON structure is changing enough to require a new tune, - * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that - * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. - */ -int iwl_full_rxon_required(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - const struct iwl_rxon_cmd *staging = &ctx->staging; - const struct iwl_rxon_cmd *active = &ctx->active; - -#define CHK(cond) \ - if ((cond)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ - return 1; \ - } - -#define CHK_NEQ(c1, c2) \ - if ((c1) != (c2)) { \ - IWL_DEBUG_INFO(priv, "need full RXON - " \ - #c1 " != " #c2 " - %d != %d\n", \ - (c1), (c2)); \ - return 1; \ - } - - /* These items are only settable from the full RXON command */ - CHK(!iwl_is_associated_ctx(ctx)); - CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); - CHK(compare_ether_addr(staging->node_addr, active->node_addr)); - CHK(compare_ether_addr(staging->wlap_bssid_addr, - active->wlap_bssid_addr)); - CHK_NEQ(staging->dev_type, active->dev_type); - CHK_NEQ(staging->channel, active->channel); - CHK_NEQ(staging->air_propagation, active->air_propagation); - CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, - active->ofdm_ht_single_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, - active->ofdm_ht_dual_stream_basic_rates); - CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates, - active->ofdm_ht_triple_stream_basic_rates); - CHK_NEQ(staging->assoc_id, active->assoc_id); - - /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can - * be updated with the RXON_ASSOC command -- however only some - * flag transitions are allowed using RXON_ASSOC */ - - /* Check if we are not switching bands */ - CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, - active->flags & RXON_FLG_BAND_24G_MSK); - - /* Check if we are switching association toggle */ - CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, - active->filter_flags & RXON_FILTER_ASSOC_MSK); - -#undef CHK -#undef CHK_NEQ - - return 0; -} - -static void _iwl_set_rxon_ht(struct iwl_priv *priv, - struct iwl_ht_config *ht_conf, - struct iwl_rxon_context *ctx) -{ - struct iwl_rxon_cmd *rxon = &ctx->staging; - - if (!ctx->ht.enabled) { - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | - RXON_FLG_HT40_PROT_MSK | - RXON_FLG_HT_PROT_MSK); - return; - } - - /* FIXME: if the definition of ht.protection changed, the "translation" - * will be needed for rxon->flags - */ - rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); - - /* Set up channel bandwidth: - * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ - /* clear the HT channel mode before set the mode */ - rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | - RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { - /* pure ht40 */ - if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { - rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - break; - } - } else { - /* Note: control channel is opposite of extension channel */ - switch (ctx->ht.extension_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; - rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; - break; - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - default: - /* channel location only valid if in Mixed mode */ - IWL_ERR(priv, "invalid extension channel offset\n"); - break; - } - } - } else { - rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; - } - - iwlagn_set_rxon_chain(priv, ctx); - - IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " - "extension channel offset 0x%x\n", - le32_to_cpu(rxon->flags), ctx->ht.protection, - ctx->ht.extension_chan_offset); -} - -void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) -{ - struct iwl_rxon_context *ctx; - - for_each_context(priv, ctx) - _iwl_set_rxon_ht(priv, ht_conf, ctx); -} - -/* Return valid, unused, channel for a passive scan to reset the RF */ -u8 iwl_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band) -{ - const struct iwl_channel_info *ch_info; - int i; - u8 channel = 0; - u8 min, max; - struct iwl_rxon_context *ctx; - - if (band == IEEE80211_BAND_5GHZ) { - min = 14; - max = priv->channel_count; - } else { - min = 0; - max = 14; - } - - for (i = min; i < max; i++) { - bool busy = false; - - for_each_context(priv, ctx) { - busy = priv->channel_info[i].channel == - le16_to_cpu(ctx->staging.channel); - if (busy) - break; - } - - if (busy) - continue; - - channel = priv->channel_info[i].channel; - ch_info = iwl_get_channel_info(priv, band, channel); - if (is_channel_valid(ch_info)) - break; - } - - return channel; -} - -/** - * iwl_set_rxon_channel - Set the band and channel values in staging RXON - * @ch: requested channel as a pointer to struct ieee80211_channel - - * NOTE: Does not commit to the hardware; it sets appropriate bit fields - * in the staging RXON flag structure based on the ch->band - */ -void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx) -{ - enum ieee80211_band band = ch->band; - u16 channel = ch->hw_value; - - if ((le16_to_cpu(ctx->staging.channel) == channel) && - (priv->band == band)) - return; - - ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) - ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; - else - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - - priv->band = band; - - IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); - -} - -void iwl_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif) -{ - if (band == IEEE80211_BAND_5GHZ) { - ctx->staging.flags &= - ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK - | RXON_FLG_CCK_MSK); - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - } else { - /* Copied from iwl_post_associate() */ - if (vif && vif->bss_conf.use_short_slot) - ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; - ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; - ctx->staging.flags &= ~RXON_FLG_CCK_MSK; - } -} - -/* - * initialize rxon structure with default values from eeprom - */ -void iwl_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - const struct iwl_channel_info *ch_info; - - memset(&ctx->staging, 0, sizeof(ctx->staging)); - - if (!ctx->vif) { - ctx->staging.dev_type = ctx->unused_devtype; - } else switch (ctx->vif->type) { - case NL80211_IFTYPE_AP: - ctx->staging.dev_type = ctx->ap_devtype; - break; - - case NL80211_IFTYPE_STATION: - ctx->staging.dev_type = ctx->station_devtype; - ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; - break; - - case NL80211_IFTYPE_ADHOC: - ctx->staging.dev_type = ctx->ibss_devtype; - ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; - ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | - RXON_FILTER_ACCEPT_GRP_MSK; - break; - - default: - IWL_ERR(priv, "Unsupported interface type %d\n", - ctx->vif->type); - break; - } - -#if 0 - /* TODO: Figure out when short_preamble would be set and cache from - * that */ - if (!hw_to_local(priv->hw)->short_preamble) - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; -#endif - - ch_info = iwl_get_channel_info(priv, priv->band, - le16_to_cpu(ctx->active.channel)); - - if (!ch_info) - ch_info = &priv->channel_info[0]; - - ctx->staging.channel = cpu_to_le16(ch_info->channel); - priv->band = ch_info->band; - - iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); - - ctx->staging.ofdm_basic_rates = - (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; - ctx->staging.cck_basic_rates = - (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; - - /* clear both MIX and PURE40 mode flag */ - ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | - RXON_FLG_CHANNEL_MODE_PURE_40); - if (ctx->vif) - memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); - - ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; - ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; -} - -void iwl_set_rate(struct iwl_priv *priv) -{ - const struct ieee80211_supported_band *hw = NULL; - struct ieee80211_rate *rate; - struct iwl_rxon_context *ctx; - int i; - - hw = iwl_get_hw_mode(priv, priv->band); - if (!hw) { - IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); - return; - } - - priv->active_rate = 0; - - for (i = 0; i < hw->n_bitrates; i++) { - rate = &(hw->bitrates[i]); - if (rate->hw_value < IWL_RATE_COUNT_LEGACY) - priv->active_rate |= (1 << rate->hw_value); - } - - IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); - - for_each_context(priv, ctx) { - ctx->staging.cck_basic_rates = - (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; - - ctx->staging.ofdm_basic_rates = - (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; - } -} - -void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) -{ - /* - * MULTI-FIXME - * See iwlagn_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - ieee80211_chswitch_done(ctx->vif, is_success); -} - -#ifdef CONFIG_IWLWIFI_DEBUG -void iwl_print_rx_config_cmd(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) -{ - struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; - struct iwl_rxon_cmd *rxon = &ctx->staging; - - IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); - iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); - IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); - IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); - IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", - le32_to_cpu(rxon->filter_flags)); - IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); - IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", - rxon->ofdm_basic_rates); - IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); - IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); - IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); - IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); -} -#endif - -static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) -{ - unsigned int reload_msec; - unsigned long reload_jiffies; - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) - iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); -#endif - - /* uCode is no longer loaded. */ - priv->ucode_loaded = false; - - /* Set the FW error flag -- cleared on iwl_down */ - set_bit(STATUS_FW_ERROR, &priv->shrd->status); - - /* Cancel currently queued command. */ - clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); - - iwl_abort_notification_waits(&priv->notif_wait); - - /* Keep the restart process from trying to send host - * commands by clearing the ready bit */ - clear_bit(STATUS_READY, &priv->status); - - wake_up(&trans(priv)->wait_command_queue); - - if (!ondemand) { - /* - * If firmware keep reloading, then it indicate something - * serious wrong and firmware having problem to recover - * from it. Instead of keep trying which will fill the syslog - * and hang the system, let's just stop it - */ - reload_jiffies = jiffies; - reload_msec = jiffies_to_msecs((long) reload_jiffies - - (long) priv->reload_jiffies); - priv->reload_jiffies = reload_jiffies; - if (reload_msec <= IWL_MIN_RELOAD_DURATION) { - priv->reload_count++; - if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) { - IWL_ERR(priv, "BUG_ON, Stop restarting\n"); - return; - } - } else - priv->reload_count = 0; - } - - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { - if (iwlagn_mod_params.restart_fw) { - IWL_DEBUG_FW_ERRORS(priv, - "Restarting adapter due to uCode error.\n"); - queue_work(priv->workqueue, &priv->restart); - } else - IWL_DEBUG_FW_ERRORS(priv, - "Detected FW error, but not restarting\n"); - } -} - -int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) -{ - int ret; - s8 prev_tx_power; - bool defer; - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - lockdep_assert_held(&priv->mutex); - - if (priv->tx_power_user_lmt == tx_power && !force) - return 0; - - if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { - IWL_WARN(priv, - "Requested user TXPOWER %d below lower limit %d.\n", - tx_power, - IWLAGN_TX_POWER_TARGET_POWER_MIN); - return -EINVAL; - } - - if (tx_power > priv->tx_power_device_lmt) { - IWL_WARN(priv, - "Requested user TXPOWER %d above upper limit %d.\n", - tx_power, priv->tx_power_device_lmt); - return -EINVAL; - } - - if (!iwl_is_ready_rf(priv)) - return -EIO; - - /* scan complete and commit_rxon use tx_power_next value, - * it always need to be updated for newest request */ - priv->tx_power_next = tx_power; - - /* do not set tx power when scanning or channel changing */ - defer = test_bit(STATUS_SCANNING, &priv->status) || - memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); - if (defer && !force) { - IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); - return 0; - } - - prev_tx_power = priv->tx_power_user_lmt; - priv->tx_power_user_lmt = tx_power; - - ret = iwlagn_send_tx_power(priv); - - /* if fail to set tx_power, restore the orig. tx power */ - if (ret) { - priv->tx_power_user_lmt = prev_tx_power; - priv->tx_power_next = prev_tx_power; - } - return ret; -} - -void iwl_send_bt_config(struct iwl_priv *priv) -{ - struct iwl_bt_cmd bt_cmd = { - .lead_time = BT_LEAD_TIME_DEF, - .max_kill = BT_MAX_KILL_DEF, - .kill_ack_mask = 0, - .kill_cts_mask = 0, - }; - - if (!iwlagn_mod_params.bt_coex_active) - bt_cmd.flags = BT_COEX_DISABLE; - else - bt_cmd.flags = BT_COEX_ENABLE; - - priv->bt_enable_flag = bt_cmd.flags; - IWL_DEBUG_INFO(priv, "BT coex %s\n", - (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); - - if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, - CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) - IWL_ERR(priv, "failed to send BT Coex Config\n"); -} - -int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) -{ - struct iwl_statistics_cmd statistics_cmd = { - .configuration_flags = - clear ? IWL_STATS_CONF_CLEAR_STATS : 0, - }; - - if (flags & CMD_ASYNC) - return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, - CMD_ASYNC, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd); - else - return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, - CMD_SYNC, - sizeof(struct iwl_statistics_cmd), - &statistics_cmd); -} - - - - -#ifdef CONFIG_IWLWIFI_DEBUGFS - -#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) - -void iwl_reset_traffic_log(struct iwl_priv *priv) -{ - priv->tx_traffic_idx = 0; - priv->rx_traffic_idx = 0; - if (priv->tx_traffic) - memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); - if (priv->rx_traffic) - memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); -} - -int iwl_alloc_traffic_mem(struct iwl_priv *priv) -{ - u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; - - if (iwl_have_debug_level(IWL_DL_TX)) { - if (!priv->tx_traffic) { - priv->tx_traffic = - kzalloc(traffic_size, GFP_KERNEL); - if (!priv->tx_traffic) - return -ENOMEM; - } - } - if (iwl_have_debug_level(IWL_DL_RX)) { - if (!priv->rx_traffic) { - priv->rx_traffic = - kzalloc(traffic_size, GFP_KERNEL); - if (!priv->rx_traffic) - return -ENOMEM; - } - } - iwl_reset_traffic_log(priv); - return 0; -} - -void iwl_free_traffic_mem(struct iwl_priv *priv) -{ - kfree(priv->tx_traffic); - priv->tx_traffic = NULL; - - kfree(priv->rx_traffic); - priv->rx_traffic = NULL; -} - -void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!iwl_have_debug_level(IWL_DL_TX))) - return; - - if (!priv->tx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = (length > IWL_TRAFFIC_ENTRY_SIZE) - ? IWL_TRAFFIC_ENTRY_SIZE : length; - memcpy((priv->tx_traffic + - (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), - header, len); - priv->tx_traffic_idx = - (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; - } -} - -void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ - __le16 fc; - u16 len; - - if (likely(!iwl_have_debug_level(IWL_DL_RX))) - return; - - if (!priv->rx_traffic) - return; - - fc = header->frame_control; - if (ieee80211_is_data(fc)) { - len = (length > IWL_TRAFFIC_ENTRY_SIZE) - ? IWL_TRAFFIC_ENTRY_SIZE : length; - memcpy((priv->rx_traffic + - (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), - header, len); - priv->rx_traffic_idx = - (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; - } -} - -const char *get_mgmt_string(int cmd) -{ - switch (cmd) { - IWL_CMD(MANAGEMENT_ASSOC_REQ); - IWL_CMD(MANAGEMENT_ASSOC_RESP); - IWL_CMD(MANAGEMENT_REASSOC_REQ); - IWL_CMD(MANAGEMENT_REASSOC_RESP); - IWL_CMD(MANAGEMENT_PROBE_REQ); - IWL_CMD(MANAGEMENT_PROBE_RESP); - IWL_CMD(MANAGEMENT_BEACON); - IWL_CMD(MANAGEMENT_ATIM); - IWL_CMD(MANAGEMENT_DISASSOC); - IWL_CMD(MANAGEMENT_AUTH); - IWL_CMD(MANAGEMENT_DEAUTH); - IWL_CMD(MANAGEMENT_ACTION); - default: - return "UNKNOWN"; - - } -} - -const char *get_ctrl_string(int cmd) -{ - switch (cmd) { - IWL_CMD(CONTROL_BACK_REQ); - IWL_CMD(CONTROL_BACK); - IWL_CMD(CONTROL_PSPOLL); - IWL_CMD(CONTROL_RTS); - IWL_CMD(CONTROL_CTS); - IWL_CMD(CONTROL_ACK); - IWL_CMD(CONTROL_CFEND); - IWL_CMD(CONTROL_CFENDACK); - default: - return "UNKNOWN"; - - } -} - -void iwl_clear_traffic_stats(struct iwl_priv *priv) -{ - memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); - memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); -} - -/* - * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will - * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass. - * Use debugFs to display the rx/rx_statistics - * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL - * information will be recorded, but DATA pkt still will be recorded - * for the reason of iwl_led.c need to control the led blinking based on - * number of tx and rx data. - * - */ -void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) -{ - struct traffic_stats *stats; - - if (is_tx) - stats = &priv->tx_stats; - else - stats = &priv->rx_stats; - - if (ieee80211_is_mgmt(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - stats->mgmt[MANAGEMENT_ASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): - stats->mgmt[MANAGEMENT_ASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - stats->mgmt[MANAGEMENT_REASSOC_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): - stats->mgmt[MANAGEMENT_REASSOC_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): - stats->mgmt[MANAGEMENT_PROBE_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): - stats->mgmt[MANAGEMENT_PROBE_RESP]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BEACON): - stats->mgmt[MANAGEMENT_BEACON]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ATIM): - stats->mgmt[MANAGEMENT_ATIM]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DISASSOC): - stats->mgmt[MANAGEMENT_DISASSOC]++; - break; - case cpu_to_le16(IEEE80211_STYPE_AUTH): - stats->mgmt[MANAGEMENT_AUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - stats->mgmt[MANAGEMENT_DEAUTH]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACTION): - stats->mgmt[MANAGEMENT_ACTION]++; - break; - } - } else if (ieee80211_is_ctl(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): - stats->ctrl[CONTROL_BACK_REQ]++; - break; - case cpu_to_le16(IEEE80211_STYPE_BACK): - stats->ctrl[CONTROL_BACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_PSPOLL): - stats->ctrl[CONTROL_PSPOLL]++; - break; - case cpu_to_le16(IEEE80211_STYPE_RTS): - stats->ctrl[CONTROL_RTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CTS): - stats->ctrl[CONTROL_CTS]++; - break; - case cpu_to_le16(IEEE80211_STYPE_ACK): - stats->ctrl[CONTROL_ACK]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFEND): - stats->ctrl[CONTROL_CFEND]++; - break; - case cpu_to_le16(IEEE80211_STYPE_CFENDACK): - stats->ctrl[CONTROL_CFENDACK]++; - break; - } - } else { - /* data */ - stats->data_cnt++; - stats->data_bytes += len; - } -} -#endif - -static void iwl_force_rf_reset(struct iwl_priv *priv) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!iwl_is_any_associated(priv)) { - IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); - return; - } - /* - * There is no easy and better way to force reset the radio, - * the only known method is switching channel which will force to - * reset and tune the radio. - * Use internal short scan (single channel) operation to should - * achieve this objective. - * Driver should reset the radio when number of consecutive missed - * beacon, or any other uCode error condition detected. - */ - IWL_DEBUG_INFO(priv, "perform radio reset.\n"); - iwl_internal_short_hw_scan(priv); -} - - -int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) -{ - struct iwl_force_reset *force_reset; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EINVAL; - - if (mode >= IWL_MAX_FORCE_RESET) { - IWL_DEBUG_INFO(priv, "invalid reset request.\n"); - return -EINVAL; - } - force_reset = &priv->force_reset[mode]; - force_reset->reset_request_count++; - if (!external) { - if (force_reset->last_force_reset_jiffies && - time_after(force_reset->last_force_reset_jiffies + - force_reset->reset_duration, jiffies)) { - IWL_DEBUG_INFO(priv, "force reset rejected\n"); - force_reset->reset_reject_count++; - return -EAGAIN; - } - } - force_reset->reset_success_count++; - force_reset->last_force_reset_jiffies = jiffies; - IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); - switch (mode) { - case IWL_RF_RESET: - iwl_force_rf_reset(priv); - break; - case IWL_FW_RESET: - /* - * if the request is from external(ex: debugfs), - * then always perform the request in regardless the module - * parameter setting - * if the request is from internal (uCode error or driver - * detect failure), then fw_restart module parameter - * need to be check before performing firmware reload - */ - if (!external && !iwlagn_mod_params.restart_fw) { - IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " - "module parameter setting\n"); - break; - } - IWL_ERR(priv, "On demand firmware reload\n"); - iwlagn_fw_error(priv, true); - break; - } - return 0; -} - - -int iwl_cmd_echo_test(struct iwl_priv *priv) -{ - int ret; - struct iwl_host_cmd cmd = { - .id = REPLY_ECHO, - .len = { 0 }, - .flags = CMD_SYNC, - }; - - ret = iwl_dvm_send_cmd(priv, &cmd); - if (ret) - IWL_ERR(priv, "echo testing fail: 0X%x\n", ret); - else - IWL_DEBUG_INFO(priv, "echo testing pass\n"); - return ret; -} - -static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq) -{ - if (iwl_trans_check_stuck_queue(trans(priv), txq)) { - int ret; - ret = iwl_force_reset(priv, IWL_FW_RESET, false); - return (ret == -EAGAIN) ? 0 : 1; - } - return 0; -} - -/* - * Making watchdog tick be a quarter of timeout assure we will - * discover the queue hung between timeout and 1.25*timeout - */ -#define IWL_WD_TICK(timeout) ((timeout) / 4) - -/* - * Watchdog timer callback, we check each tx queue for stuck, if if hung - * we reset the firmware. If everything is fine just rearm the timer. - */ -void iwl_bg_watchdog(unsigned long data) -{ - struct iwl_priv *priv = (struct iwl_priv *)data; - int cnt; - unsigned long timeout; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (iwl_is_rfkill(priv)) - return; - - timeout = hw_params(priv).wd_timeout; - if (timeout == 0) - return; - - /* monitor and check for stuck queues */ - for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++) - if (iwl_check_stuck_queue(priv, cnt)) - return; - - mod_timer(&priv->watchdog, jiffies + - msecs_to_jiffies(IWL_WD_TICK(timeout))); -} - -void iwl_setup_watchdog(struct iwl_priv *priv) -{ - unsigned int timeout = hw_params(priv).wd_timeout; - - if (!iwlagn_mod_params.wd_disable) { - /* use system default */ - if (timeout && !cfg(priv)->base_params->wd_disable) - mod_timer(&priv->watchdog, - jiffies + - msecs_to_jiffies(IWL_WD_TICK(timeout))); - else - del_timer(&priv->watchdog); - } else { - /* module parameter overwrite default configuration */ - if (timeout && iwlagn_mod_params.wd_disable == 2) - mod_timer(&priv->watchdog, - jiffies + - msecs_to_jiffies(IWL_WD_TICK(timeout))); - else - del_timer(&priv->watchdog); - } -} - -/** - * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv, - u16 tsf_bits) -{ - return (1 << tsf_bits) - 1; -} - -/** - * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time - * @priv -- pointer to iwl_priv data structure - * @tsf_bits -- number of bits need to shift for masking) - */ -static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv, - u16 tsf_bits) -{ - return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; -} - -/* - * extended beacon time format - * time in usec will be changed into a 32-bit value in extended:internal format - * the extended part is the beacon counts - * the internal part is the time in usec within one beacon interval - */ -u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval) -{ - u32 quot; - u32 rem; - u32 interval = beacon_interval * TIME_UNIT; - - if (!interval || !usec) - return 0; - - quot = (usec / interval) & - (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >> - IWLAGN_EXT_BEACON_TIME_POS); - rem = (usec % interval) & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - - return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem; -} - -/* base is usually what we get from ucode with each received frame, - * the same as HW timer counter counting down - */ -__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, - u32 addon, u32 beacon_interval) -{ - u32 base_low = base & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - u32 addon_low = addon & iwl_beacon_time_mask_low(priv, - IWLAGN_EXT_BEACON_TIME_POS); - u32 interval = beacon_interval * TIME_UNIT; - u32 res = (base & iwl_beacon_time_mask_high(priv, - IWLAGN_EXT_BEACON_TIME_POS)) + - (addon & iwl_beacon_time_mask_high(priv, - IWLAGN_EXT_BEACON_TIME_POS)); - - if (base_low > addon_low) - res += base_low - addon_low; - else if (base_low < addon_low) { - res += interval + base_low - addon_low; - res += (1 << IWLAGN_EXT_BEACON_TIME_POS); - } else - res += (1 << IWLAGN_EXT_BEACON_TIME_POS); - - return cpu_to_le32(res); -} - -void iwl_nic_error(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - iwlagn_fw_error(priv, false); -} - -void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - if (state) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); -} - -void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info; - - info = IEEE80211_SKB_CB(skb); - kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); - dev_kfree_skb_any(skb); -} diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h deleted file mode 100644 index 635eb685ede..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ /dev/null @@ -1,234 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __iwl_core_h__ -#define __iwl_core_h__ - -#include "iwl-dev.h" -#include "iwl-io.h" - -/************************ - * forward declarations * - ************************/ -struct iwl_host_cmd; -struct iwl_cmd; - -#define TIME_UNIT 1024 - -struct iwl_lib_ops { - /* set hw dependent parameters */ - void (*set_hw_params)(struct iwl_priv *priv); - int (*set_channel_switch)(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch); - /* device specific configuration */ - void (*nic_config)(struct iwl_priv *priv); - - /* eeprom operations (as defined in iwl-eeprom.h) */ - struct iwl_eeprom_ops eeprom_ops; - - /* temperature */ - void (*temperature)(struct iwl_priv *priv); -}; - -/*************************** - * L i b * - ***************************/ - -void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - int hw_decrypt); -int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, - struct iwl_rxon_context *ctx); -void iwl_set_flags_for_band(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - enum ieee80211_band band, - struct ieee80211_vif *vif); -u8 iwl_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band); -void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); -bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_sta_ht_cap *ht_cap); -void iwl_connection_init_rx_config(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); -void iwl_set_rate(struct iwl_priv *priv); -int iwl_cmd_echo_test(struct iwl_priv *priv); -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_alloc_traffic_mem(struct iwl_priv *priv); -void iwl_free_traffic_mem(struct iwl_priv *priv); -void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header); -void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header); -const char *get_mgmt_string(int cmd); -const char *get_ctrl_string(int cmd); -void iwl_clear_traffic_stats(struct iwl_priv *priv); -void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, - u16 len); -void iwl_reset_traffic_log(struct iwl_priv *priv); - -#else -static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv) -{ - return 0; -} -static inline void iwl_free_traffic_mem(struct iwl_priv *priv) -{ -} -static inline void iwl_reset_traffic_log(struct iwl_priv *priv) -{ -} -static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ -} -static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, - u16 length, struct ieee80211_hdr *header) -{ -} -static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, - __le16 fc, u16 len) -{ -} -#endif - -/***************************************************** -* RX -******************************************************/ -void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); - -void iwl_setup_watchdog(struct iwl_priv *priv); -/***************************************************** - * TX power - ****************************************************/ -int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); - -/******************************************************************************* - * Scanning - ******************************************************************************/ -void iwl_init_scan_params(struct iwl_priv *priv); -int iwl_scan_cancel(struct iwl_priv *priv); -void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); -void iwl_force_scan_end(struct iwl_priv *priv); -void iwl_internal_short_hw_scan(struct iwl_priv *priv); -int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); -void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); -void iwl_setup_scan_deferred_work(struct iwl_priv *priv); -void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); -int __must_check iwl_scan_initiate(struct iwl_priv *priv, - struct ieee80211_vif *vif, - enum iwl_scan_type scan_type, - enum ieee80211_band band); - -/* For faster active scanning, scan will move to the next channel if fewer than - * PLCP_QUIET_THRESH packets are heard on this channel within - * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell - * time if it's a quiet channel (nothing responded to our probe, and there's - * no other traffic). - * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ -#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ -#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ - -#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) - -/* traffic log definitions */ -#define IWL_TRAFFIC_ENTRIES (256) -#define IWL_TRAFFIC_ENTRY_SIZE (64) - -/***************************************************** - * S e n d i n g H o s t C o m m a n d s * - *****************************************************/ - -void iwl_bg_watchdog(unsigned long data); -u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval); -__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, - u32 addon, u32 beacon_interval); - - -/***************************************************** -* GEOS -******************************************************/ -int iwl_init_geos(struct iwl_priv *priv); -void iwl_free_geos(struct iwl_priv *priv); - -extern void iwl_send_bt_config(struct iwl_priv *priv); -extern int iwl_send_statistics_request(struct iwl_priv *priv, - u8 flags, bool clear); - -int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx); - -static inline const struct ieee80211_supported_band *iwl_get_hw_mode( - struct iwl_priv *priv, enum ieee80211_band band) -{ - return priv->hw->wiphy->bands[band]; -} - -static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) -{ - return cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist; -} - -extern bool bt_siso_mode; - -#endif /* __iwl_core_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 5f96ce105f0..59750543fce 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -430,6 +430,9 @@ #define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) +/* Used to enable DBGM */ +#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) + /* * Per-Tx-queue write pointer (index, really!) * Indicates index to next TFD that driver will fill (1 past latest filled). diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c index 059efabda18..2d1b42847b9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.c +++ b/drivers/net/wireless/iwlwifi/iwl-debug.c @@ -63,6 +63,7 @@ #include <linux/interrupt.h> #include "iwl-debug.h" +#include "iwl-devtrace.h" #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index a6b32a11e10..8376b842bdb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -29,10 +29,13 @@ #ifndef __iwl_debug_h__ #define __iwl_debug_h__ -#include "iwl-shared.h" -#include "iwl-devtrace.h" +#include "iwl-modparams.h" -struct iwl_priv; + +static inline bool iwl_have_debug_level(u32 level) +{ + return iwlwifi_mod_params.debug_level & level; +} void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, const char *fmt, ...); @@ -41,10 +44,10 @@ void __iwl_info(struct device *dev, const char *fmt, ...); void __iwl_crit(struct device *dev, const char *fmt, ...); /* No matter what is m (priv, bus, trans), this will work */ -#define IWL_ERR(m, f, a...) __iwl_err(trans(m)->dev, false, false, f, ## a) -#define IWL_WARN(m, f, a...) __iwl_warn(trans(m)->dev, f, ## a) -#define IWL_INFO(m, f, a...) __iwl_info(trans(m)->dev, f, ## a) -#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a) +#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a) +#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a) +#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a) +#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a) #if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) void __iwl_dbg(struct device *dev, @@ -65,9 +68,9 @@ do { \ } while (0) #define IWL_DEBUG(m, level, fmt, args...) \ - __iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args) + __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args) #define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ - __iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args) + __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args) #ifdef CONFIG_IWLWIFI_DEBUG #define iwl_print_hex_dump(m, level, p, len) \ @@ -80,19 +83,6 @@ do { \ #define iwl_print_hex_dump(m, level, p, len) #endif /* CONFIG_IWLWIFI_DEBUG */ -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); -void iwl_dbgfs_unregister(struct iwl_priv *priv); -#else -static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) -{ - return 0; -} -static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) -{ -} -#endif /* CONFIG_IWLWIFI_DEBUGFS */ - /* * To use the debug system: * diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 2bbaebd99ad..e7c157e5ebe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -37,9 +37,9 @@ #include "iwl-dev.h" #include "iwl-debug.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn.h" +#include "iwl-modparams.h" /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ @@ -111,105 +111,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .llseek = generic_file_llseek, \ }; -static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char *buf; - int pos = 0; - - int cnt; - ssize_t ret; - const size_t bufsz = 100 + - sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - get_mgmt_string(cnt), - priv->tx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - get_ctrl_string(cnt), - priv->tx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - priv->tx_stats.data_cnt); - pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - priv->tx_stats.data_bytes); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - u32 clear_flag; - char buf[8]; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%x", &clear_flag) != 1) - return -EFAULT; - iwl_clear_traffic_stats(priv); - - return count; -} - -static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) { - - struct iwl_priv *priv = file->private_data; - char *buf; - int pos = 0; - int cnt; - ssize_t ret; - const size_t bufsz = 100 + - sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); - for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - get_mgmt_string(cnt), - priv->rx_stats.mgmt[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); - for (cnt = 0; cnt < CONTROL_MAX; cnt++) { - pos += scnprintf(buf + pos, bufsz - pos, - "\t%25s\t\t: %u\n", - get_ctrl_string(cnt), - priv->rx_stats.ctrl[cnt]); - } - pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); - pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", - priv->rx_stats.data_cnt); - pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", - priv->rx_stats.data_bytes); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -230,11 +131,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { priv->dbgfs_sram_offset = 0x800000; - if (!priv->ucode_loaded) { - IWL_ERR(priv, "No uCode has been loadded.\n"); + if (!priv->ucode_loaded) return -EINVAL; - } - img = &priv->fw->img[priv->shrd->ucode_type]; + img = &priv->fw->img[priv->cur_ucode]; priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } len = priv->dbgfs_sram_len; @@ -259,7 +158,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, sram = priv->dbgfs_sram_offset & ~0x3; /* read the first u32 from sram */ - val = iwl_read_targ_mem(trans(priv), sram); + val = iwl_read_targ_mem(priv->trans, sram); for (; len; len--) { /* put the address at the start of every line */ @@ -278,7 +177,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, if (++offset == 4) { sram += 4; offset = 0; - val = iwl_read_targ_mem(trans(priv), sram); + val = iwl_read_targ_mem(priv->trans, sram); } /* put in extra spaces and split lines for human readability */ @@ -369,14 +268,19 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, i, station->sta.sta.addr, station->sta.station_flags_msk); pos += scnprintf(buf + pos, bufsz - pos, - "TID\tseq_num\trate_n_flags\n"); + "TID seqno next_rclmd " + "rate_n_flags state txq\n"); for (j = 0; j < IWL_MAX_TID_COUNT; j++) { tid_data = &priv->tid_data[i][j]; pos += scnprintf(buf + pos, bufsz - pos, - "%d:\t%#x\t%#x", + "%d: 0x%.4x 0x%.4x 0x%.8x " + "%d %.2d", j, tid_data->seq_number, - tid_data->agg.rate_n_flags); + tid_data->next_reclaimed, + tid_data->agg.rate_n_flags, + tid_data->agg.state, + tid_data->agg.txq_id); if (tid_data->agg.wait_for_ba) pos += scnprintf(buf + pos, bufsz - pos, @@ -403,30 +307,25 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, const u8 *ptr; char *buf; u16 eeprom_ver; - size_t eeprom_len = cfg(priv)->base_params->eeprom_size; + size_t eeprom_len = priv->cfg->base_params->eeprom_size; buf_size = 4 * eeprom_len + 256; - if (eeprom_len % 16) { - IWL_ERR(priv, "NVM size is not multiple of 16.\n"); + if (eeprom_len % 16) return -ENODATA; - } - ptr = priv->shrd->eeprom; - if (!ptr) { - IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); + ptr = priv->eeprom; + if (!ptr) return -ENOMEM; - } /* 4 characters for byte 0xYY */ buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } - eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); + + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " "version: 0x%x\n", - (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", eeprom_ver); for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); @@ -456,10 +355,8 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); if (supp_band) { @@ -521,8 +418,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file, int pos = 0; const size_t bufsz = sizeof(buf); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", - test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status)); pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", test_bit(STATUS_RF_KILL_HW, &priv->status)); pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", @@ -544,9 +439,9 @@ static ssize_t iwl_dbgfs_status_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", test_bit(STATUS_SCAN_HW, &priv->status)); pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", - test_bit(STATUS_POWER_PMI, &priv->shrd->status)); + test_bit(STATUS_POWER_PMI, &priv->status)); pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", - test_bit(STATUS_FW_ERROR, &priv->shrd->status)); + test_bit(STATUS_FW_ERROR, &priv->status)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -563,16 +458,14 @@ static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file, ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } for (cnt = 0; cnt < REPLY_MAX; cnt++) { if (priv->rx_handlers_stats[cnt] > 0) pos += scnprintf(buf + pos, bufsz - pos, "\tRx handler[%36s]:\t\t %u\n", - get_cmd_string(cnt), + iwl_dvm_get_cmd_string(cnt), priv->rx_handlers_stats[cnt]); } @@ -680,11 +573,8 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, return -EFAULT; if (!iwl_is_any_associated(priv)) priv->disable_ht40 = ht40 ? true : false; - else { - IWL_ERR(priv, "Sta associated with AP - " - "Change to 40MHz channel support is not allowed\n"); + else return -EINVAL; - } return count; } @@ -816,87 +706,6 @@ DEBUGFS_READ_FILE_OPS(temperature); DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); DEBUGFS_READ_FILE_OPS(current_sleep_command); -static ssize_t iwl_dbgfs_traffic_log_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0, ofs = 0; - int cnt = 0, entry; - - char *buf; - int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + - (cfg(priv)->base_params->num_of_queues * 32 * 8) + 400; - const u8 *ptr; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate buffer\n"); - return -ENOMEM; - } - if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) { - ptr = priv->tx_traffic; - pos += scnprintf(buf + pos, bufsz - pos, - "Tx Traffic idx: %u\n", priv->tx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) { - ptr = priv->rx_traffic; - pos += scnprintf(buf + pos, bufsz - pos, - "Rx Traffic idx: %u\n", priv->rx_traffic_idx); - for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { - for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; - entry++, ofs += 16) { - pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, - buf + pos, bufsz - pos, 0); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; - } - } - } - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_traffic_log_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int traffic_log; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &traffic_log) != 1) - return -EFAULT; - if (traffic_log == 0) - iwl_reset_traffic_log(priv); - - return count; -} - static const char *fmt_value = " %-30s %10u\n"; static const char *fmt_hex = " %-30s 0x%02X\n"; static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; @@ -947,10 +756,8 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } /* * the statistic information display here is based on @@ -1376,10 +1183,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } /* the statistic information display here is based on * the last statistics notification from uCode @@ -1536,17 +1341,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { pos += scnprintf(buf + pos, bufsz - pos, "tx power: (1/2 dB step)\n"); - if ((hw_params(priv).valid_tx_ant & ANT_A) && + if ((priv->hw_params.valid_tx_ant & ANT_A) && tx->tx_power.ant_a) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna A:", tx->tx_power.ant_a); - if ((hw_params(priv).valid_tx_ant & ANT_B) && + if ((priv->hw_params.valid_tx_ant & ANT_B) && tx->tx_power.ant_b) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna B:", tx->tx_power.ant_b); - if ((hw_params(priv).valid_tx_ant & ANT_C) && + if ((priv->hw_params.valid_tx_ant & ANT_C) && tx->tx_power.ant_c) pos += scnprintf(buf + pos, bufsz - pos, fmt_hex, "antenna C:", @@ -1578,10 +1383,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } /* the statistic information display here is based on * the last statistics notification from uCode @@ -1704,16 +1507,11 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file, ret = iwl_send_statistics_request(priv, CMD_SYNC, false); mutex_unlock(&priv->mutex); - if (ret) { - IWL_ERR(priv, - "Error sending statistics request: %zd\n", ret); + if (ret) return -EAGAIN; - } buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } /* * the statistic information display here is based on @@ -1790,10 +1588,8 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file, return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", @@ -1933,10 +1729,8 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, data = &priv->sensitivity_data; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", data->auto_corr_ofdm); @@ -2014,10 +1808,8 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file, data = &priv->chain_noise_data; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(priv, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", data->active_chains); @@ -2068,7 +1860,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, const size_t bufsz = sizeof(buf); u32 pwrsave_status; - pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) & + pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK; pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); @@ -2262,59 +2054,39 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, return count; } -static ssize_t iwl_dbgfs_force_reset_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t iwl_dbgfs_rf_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { struct iwl_priv *priv = file->private_data; - int i, pos = 0; + int pos = 0; char buf[300]; const size_t bufsz = sizeof(buf); - struct iwl_force_reset *force_reset; + struct iwl_rf_reset *rf_reset = &priv->rf_reset; + + pos += scnprintf(buf + pos, bufsz - pos, + "RF reset statistics\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + rf_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + rf_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + rf_reset->reset_reject_count); - for (i = 0; i < IWL_MAX_FORCE_RESET; i++) { - force_reset = &priv->force_reset[i]; - pos += scnprintf(buf + pos, bufsz - pos, - "Force reset method %d\n", i); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request: %d\n", - force_reset->reset_request_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request success: %d\n", - force_reset->reset_success_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\tnumber of reset request reject: %d\n", - force_reset->reset_reject_count); - pos += scnprintf(buf + pos, bufsz - pos, - "\treset duration: %lu\n", - force_reset->reset_duration); - } return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_force_reset_write(struct file *file, +static ssize_t iwl_dbgfs_rf_reset_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int reset, ret; + int ret; - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &reset) != 1) - return -EINVAL; - switch (reset) { - case IWL_RF_RESET: - case IWL_FW_RESET: - ret = iwl_force_reset(priv, reset, true); - break; - default: - return -EINVAL; - } + ret = iwl_force_rf_reset(priv, true); return ret ? ret : count; } @@ -2342,29 +2114,6 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, return count; } -static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - char buf[8]; - int buf_size; - int timeout; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &timeout) != 1) - return -EINVAL; - if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) - timeout = IWL_DEF_WD_TIMEOUT; - - hw_params(priv).wd_timeout = timeout; - iwl_setup_watchdog(priv); - return count; -} - static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -2420,10 +2169,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, char buf[40]; const size_t bufsz = sizeof(buf); - if (cfg(priv)->ht_params) + if (priv->cfg->ht_params) pos += scnprintf(buf + pos, bufsz - pos, "use %s for aggregation\n", - (hw_params(priv).use_rts_for_aggregation) ? + (priv->hw_params.use_rts_for_aggregation) ? "rts/cts" : "cts-to-self"); else pos += scnprintf(buf + pos, bufsz - pos, "N/A"); @@ -2440,7 +2189,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, int buf_size; int rts; - if (!cfg(priv)->ht_params) + if (!priv->cfg->ht_params) return -EINVAL; memset(buf, 0, sizeof(buf)); @@ -2450,12 +2199,29 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, if (sscanf(buf, "%d", &rts) != 1) return -EINVAL; if (rts) - hw_params(priv).use_rts_for_aggregation = true; + priv->hw_params.use_rts_for_aggregation = true; else - hw_params(priv).use_rts_for_aggregation = false; + priv->hw_params.use_rts_for_aggregation = false; return count; } +static int iwl_cmd_echo_test(struct iwl_priv *priv) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = REPLY_ECHO, + .len = { 0 }, + .flags = CMD_SYNC, + }; + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) + IWL_ERR(priv, "echo testing fail: 0X%x\n", ret); + else + IWL_DEBUG_INFO(priv, "echo testing pass\n"); + return ret; +} + static ssize_t iwl_dbgfs_echo_test_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -2473,9 +2239,93 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file, return count; } -DEBUGFS_READ_FILE_OPS(rx_statistics); -DEBUGFS_READ_FILE_OPS(tx_statistics); -DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +static ssize_t iwl_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; + + ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } + return ret; +} + +static ssize_t iwl_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + iwl_dump_nic_event_log(priv, true, NULL, false); + + return count; +} + +static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[120]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "Sensitivity calibrations %s\n", + (priv->calib_disabled & + IWL_SENSITIVITY_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Chain noise calibrations %s\n", + (priv->calib_disabled & + IWL_CHAIN_NOISE_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + pos += scnprintf(buf + pos, bufsz - pos, + "Tx power calibrations %s\n", + (priv->calib_disabled & + IWL_TX_POWER_CALIB_DISABLED) ? + "DISABLED" : "ENABLED"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &calib_disabled) != 1) + return -EFAULT; + + priv->calib_disabled = calib_disabled; + + return count; +} + DEBUGFS_READ_FILE_OPS(ucode_rx_stats); DEBUGFS_READ_FILE_OPS(ucode_tx_stats); DEBUGFS_READ_FILE_OPS(ucode_general_stats); @@ -2483,20 +2333,20 @@ DEBUGFS_READ_FILE_OPS(sensitivity); DEBUGFS_READ_FILE_OPS(chain_noise); DEBUGFS_READ_FILE_OPS(power_save_status); DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); -DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); -DEBUGFS_READ_WRITE_FILE_OPS(force_reset); +DEBUGFS_READ_WRITE_FILE_OPS(rf_reset); DEBUGFS_READ_FILE_OPS(rxon_flags); DEBUGFS_READ_FILE_OPS(rxon_filter_flags); DEBUGFS_WRITE_FILE_OPS(txfifo_flush); DEBUGFS_READ_FILE_OPS(ucode_bt_stats); -DEBUGFS_WRITE_FILE_OPS(wd_timeout); DEBUGFS_READ_FILE_OPS(bt_traffic); DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); DEBUGFS_READ_FILE_OPS(reply_tx_error); DEBUGFS_WRITE_FILE_OPS(echo_test); +DEBUGFS_READ_WRITE_FILE_OPS(log_event); +DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); /* * Create the debugfs files and directories @@ -2537,15 +2387,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); @@ -2558,17 +2404,16 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); + if (iwl_advanced_bt_coexist(priv)) DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); - DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, - &priv->disable_sens_cal); - DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, - &priv->disable_chain_noise_cal); + /* Calibrations disabled/enabled status*/ + DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); - if (iwl_trans_dbgfs_register(trans(priv), dir_debug)) + if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) goto err; return 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 16956b777f9..70062379d0e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/mutex.h> +#include "iwl-fw.h" #include "iwl-eeprom.h" #include "iwl-csr.h" #include "iwl-debug.h" @@ -47,12 +48,9 @@ #include "iwl-agn-rs.h" #include "iwl-agn-tt.h" #include "iwl-trans.h" -#include "iwl-shared.h" #include "iwl-op-mode.h" #include "iwl-notif-wait.h" -struct iwl_tx_queue; - /* CT-KILL constants */ #define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ #define CT_KILL_THRESHOLD 114 /* in Celsius */ @@ -196,6 +194,7 @@ struct iwl_qos_info { * These states relate to a specific RA / TID. * * @IWL_AGG_OFF: aggregation is not used + * @IWL_AGG_STARTING: aggregation are starting (between start and oper) * @IWL_AGG_ON: aggregation session is up * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. @@ -204,6 +203,7 @@ struct iwl_qos_info { */ enum iwl_agg_state { IWL_AGG_OFF = 0, + IWL_AGG_STARTING, IWL_AGG_ON, IWL_EMPTYING_HW_QUEUE_ADDBA, IWL_EMPTYING_HW_QUEUE_DELBA, @@ -220,8 +220,7 @@ enum iwl_agg_state { * Tx response (REPLY_TX), and the block ack notification * (REPLY_COMPRESSED_BA). * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session - used by the transport layer. - * Needed by the upper layer for debugfs only. + * @txq_id: Tx queue used by the BA session * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that @@ -507,44 +506,6 @@ struct reply_agg_tx_error_statistics { u32 unknown; }; -/* management statistics */ -enum iwl_mgmt_stats { - MANAGEMENT_ASSOC_REQ = 0, - MANAGEMENT_ASSOC_RESP, - MANAGEMENT_REASSOC_REQ, - MANAGEMENT_REASSOC_RESP, - MANAGEMENT_PROBE_REQ, - MANAGEMENT_PROBE_RESP, - MANAGEMENT_BEACON, - MANAGEMENT_ATIM, - MANAGEMENT_DISASSOC, - MANAGEMENT_AUTH, - MANAGEMENT_DEAUTH, - MANAGEMENT_ACTION, - MANAGEMENT_MAX, -}; -/* control statistics */ -enum iwl_ctrl_stats { - CONTROL_BACK_REQ = 0, - CONTROL_BACK, - CONTROL_PSPOLL, - CONTROL_RTS, - CONTROL_CTS, - CONTROL_ACK, - CONTROL_CFEND, - CONTROL_CFENDACK, - CONTROL_MAX, -}; - -struct traffic_stats { -#ifdef CONFIG_IWLWIFI_DEBUGFS - u32 mgmt[MANAGEMENT_MAX]; - u32 ctrl[CONTROL_MAX]; - u32 data_cnt; - u64 data_bytes; -#endif -}; - /* * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds * to perform continuous uCode event logging operation if enabled @@ -571,24 +532,7 @@ struct iwl_event_log { int wraps_more_count; }; -/* - * This is the threshold value of plcp error rate per 100mSecs. It is - * used to set and check for the validity of plcp_delta. - */ -#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1) -#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) -#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) -#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) -#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) -#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0) - #define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) -#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) - -/* TX queue watchdog timeouts in mSecs */ -#define IWL_DEF_WD_TIMEOUT (2000) -#define IWL_LONG_WD_TIMEOUT (10000) -#define IWL_MAX_WD_TIMEOUT (120000) /* BT Antenna Coupling Threshold (dB) */ #define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) @@ -598,18 +542,18 @@ struct iwl_event_log { #define IWL_MAX_CONTINUE_RELOAD_CNT 4 -enum iwl_reset { - IWL_RF_RESET = 0, - IWL_FW_RESET, - IWL_MAX_FORCE_RESET, -}; - -struct iwl_force_reset { +struct iwl_rf_reset { int reset_request_count; int reset_success_count; int reset_reject_count; - unsigned long reset_duration; - unsigned long last_force_reset_jiffies; + unsigned long last_reset_jiffies; +}; + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS, + IWL_RXON_CTX_PAN, + + NUM_IWL_RXON_CTX }; /* extend beacon time format bit shifting */ @@ -623,6 +567,10 @@ struct iwl_force_reset { struct iwl_rxon_context { struct ieee80211_vif *vif; + u8 mcast_queue; + u8 ac_to_queue[IEEE80211_NUM_ACS]; + u8 ac_to_fifo[IEEE80211_NUM_ACS]; + /* * We could use the vif to indicate active, but we * also need it to be active during disabling when @@ -677,6 +625,52 @@ enum iwl_scan_type { IWL_SCAN_ROC, }; +/** + * struct iwl_hw_params + * + * Holds the module parameters + * + * @tx_chains_num: Number of TX chains + * @rx_chains_num: Number of RX chains + * @valid_tx_ant: usable antennas for TX + * @valid_rx_ant: usable antennas for RX + * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX) + * @sku: sku read from EEPROM + * @ct_kill_threshold: temperature threshold - in hw dependent unit + * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit + * relevant for 1000, 6000 and up + * @struct iwl_sensitivity_ranges: range of sensitivity values + * @use_rts_for_aggregation: use rts/cts protection for HT traffic + */ +struct iwl_hw_params { + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u8 ht40_channel; + bool use_rts_for_aggregation; + u16 sku; + u32 ct_kill_threshold; + u32 ct_kill_exit_threshold; + + const struct iwl_sensitivity_ranges *sens; +}; + +struct iwl_lib_ops { + /* set hw dependent parameters */ + void (*set_hw_params)(struct iwl_priv *priv); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); + /* device specific configuration */ + void (*nic_config)(struct iwl_priv *priv); + + /* eeprom operations (as defined in iwl-eeprom.h) */ + struct iwl_eeprom_ops eeprom_ops; + + /* temperature */ + void (*temperature)(struct iwl_priv *priv); +}; + #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE struct iwl_testmode_trace { u32 buff_size; @@ -701,6 +695,17 @@ struct iwl_wipan_noa_data { u8 data[]; }; +/* Calibration disabling bit mask */ +enum { + IWL_CALIB_ENABLE_ALL = 0, + + IWL_SENSITIVITY_CALIB_DISABLED = BIT(0), + IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1), + IWL_TX_POWER_CALIB_DISABLED = BIT(2), + + IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF, +}; + #define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \ ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific)) @@ -710,9 +715,11 @@ struct iwl_wipan_noa_data { struct iwl_priv { - /*data shared among all the driver's layers */ - struct iwl_shared *shrd; + struct iwl_trans *trans; + struct device *dev; /* for debug prints only */ + const struct iwl_cfg *cfg; const struct iwl_fw *fw; + const struct iwl_lib_ops *lib; unsigned long status; spinlock_t sta_lock; @@ -720,6 +727,11 @@ struct iwl_priv { unsigned long transport_queue_stop; bool passive_no_rx; +#define IWL_INVALID_MAC80211_QUEUE 0xff + u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; + atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; + + unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; /* ieee device used by generic ieee processing code */ struct ieee80211_hw *hw; @@ -730,7 +742,10 @@ struct iwl_priv { struct workqueue_struct *workqueue; + struct iwl_hw_params hw_params; + enum ieee80211_band band; + u8 valid_contexts; void (*pre_rx_handler)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); @@ -763,8 +778,8 @@ struct iwl_priv { /*counters */ u32 rx_handlers_stats[REPLY_MAX]; - /* force reset */ - struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + /* rf reset */ + struct iwl_rf_reset rf_reset; /* firmware reload counter and timestamp */ unsigned long reload_jiffies; @@ -810,8 +825,6 @@ struct iwl_priv { __le16 switch_channel; - u16 active_rate; - u8 start_calib; struct iwl_sensitivity_data sensitivity_data; struct iwl_chain_noise_data chain_noise_data; @@ -825,10 +838,6 @@ struct iwl_priv { int activity_timer_active; - /* counts mgmt, ctl, and data packets */ - struct traffic_stats tx_stats; - struct traffic_stats rx_stats; - struct iwl_power_mgr power_data; struct iwl_tt_mgmt thermal_throttle; @@ -912,6 +921,7 @@ struct iwl_priv { __le32 kill_ack_mask; __le32 kill_cts_mask; __le16 bt_valid; + bool reduced_txpower; u16 bt_on_thresh; u16 bt_duration; u16 dynamic_frag_thresh; @@ -948,23 +958,21 @@ struct iwl_priv { #ifdef CONFIG_IWLWIFI_DEBUGFS /* debugfs */ - u16 tx_traffic_idx; - u16 rx_traffic_idx; - u8 *tx_traffic; - u8 *rx_traffic; struct dentry *debugfs_dir; u32 dbgfs_sram_offset, dbgfs_sram_len; bool disable_ht40; void *wowlan_sram; #endif /* CONFIG_IWLWIFI_DEBUGFS */ + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + enum iwl_nvm_type nvm_device_type; + struct work_struct txpower_work; - u32 disable_sens_cal; - u32 disable_chain_noise_cal; + u32 calib_disabled; struct work_struct run_time_calib_work; struct timer_list statistics_periodic; struct timer_list ucode_trace; - struct timer_list watchdog; struct iwl_event_log event_log; @@ -982,10 +990,18 @@ struct iwl_priv { __le64 replay_ctr; __le16 last_seq_ctl; bool have_rekey_data; + + /* device_pointers: pointers to ucode event tables */ + struct { + u32 error_event_table; + u32 log_event_table; + } device_pointers; + + /* indicator of loaded ucode image */ + enum iwl_ucode_type cur_ucode; }; /*iwl_priv */ extern struct kmem_cache *iwl_tx_cmd_pool; -extern struct iwl_mod_params iwlagn_mod_params; static inline struct iwl_rxon_context * iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) @@ -998,7 +1014,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) #define for_each_context(priv, ctx) \ for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ - if (priv->shrd->valid_contexts & BIT(ctx->ctxid)) + if (priv->valid_contexts & BIT(ctx->ctxid)) static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) { diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 6f312c77af5..3c72bad0ae5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -66,10 +66,13 @@ #include <linux/module.h> #include "iwl-drv.h" +#include "iwl-debug.h" #include "iwl-trans.h" -#include "iwl-shared.h" #include "iwl-op-mode.h" #include "iwl-agn-hw.h" +#include "iwl-fw.h" +#include "iwl-config.h" +#include "iwl-modparams.h" /* private includes */ #include "iwl-fw-file.h" @@ -77,8 +80,10 @@ /** * struct iwl_drv - drv common data * @fw: the iwl_fw structure - * @shrd: pointer to common shared structure * @op_mode: the running op_mode + * @trans: transport layer + * @dev: for debug prints only + * @cfg: configuration struct * @fw_index: firmware revision to try loading * @firmware_name: composite filename of ucode file to load * @request_firmware_complete: the firmware has been obtained from user space @@ -86,8 +91,10 @@ struct iwl_drv { struct iwl_fw fw; - struct iwl_shared *shrd; struct iwl_op_mode *op_mode; + struct iwl_trans *trans; + struct device *dev; + const struct iwl_cfg *cfg; int fw_index; /* firmware we're trying to load */ char firmware_name[25]; /* name of firmware file to load */ @@ -110,7 +117,7 @@ struct fw_sec { static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) { if (desc->v_addr) - dma_free_coherent(trans(drv)->dev, desc->len, + dma_free_coherent(drv->trans->dev, desc->len, desc->v_addr, desc->p_addr); desc->v_addr = NULL; desc->len = 0; @@ -138,7 +145,7 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, return -EINVAL; } - desc->v_addr = dma_alloc_coherent(trans(drv)->dev, sec->size, + desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size, &desc->p_addr, GFP_KERNEL); if (!desc->v_addr) return -ENOMEM; @@ -156,8 +163,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); static int iwl_request_firmware(struct iwl_drv *drv, bool first) { - const struct iwl_cfg *cfg = cfg(drv); - const char *name_pre = cfg->fw_name_pre; + const char *name_pre = drv->cfg->fw_name_pre; char tag[8]; if (first) { @@ -166,14 +172,14 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) strcpy(tag, UCODE_EXPERIMENTAL_TAG); } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { #endif - drv->fw_index = cfg->ucode_api_max; + drv->fw_index = drv->cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { drv->fw_index--; sprintf(tag, "%d", drv->fw_index); } - if (drv->fw_index < cfg->ucode_api_min) { + if (drv->fw_index < drv->cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); return -ENOENT; } @@ -186,7 +192,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) drv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, - trans(drv)->dev, + drv->trans->dev, GFP_KERNEL, drv, iwl_ucode_callback); } @@ -284,6 +290,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, sec->offset = le32_to_cpu(sec_parse->offset); sec->data = sec_parse->data; + sec->size = size - sizeof(sec_parse->offset); ++img->sec_counter; @@ -414,9 +421,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, struct iwl_ucode_tlv *tlv; size_t len = ucode_raw->size; const u8 *data; - int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative; - int tmp; - u64 alternatives; u32 tlv_len; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; @@ -434,23 +438,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } - /* - * Check which alternatives are present, and "downgrade" - * when the chosen alternative is not present, warning - * the user when that happens. Some files may not have - * any alternatives, so don't warn in that case. - */ - alternatives = le64_to_cpu(ucode->alternatives); - tmp = wanted_alternative; - if (wanted_alternative > 63) - wanted_alternative = 63; - while (wanted_alternative && !(alternatives & BIT(wanted_alternative))) - wanted_alternative--; - if (wanted_alternative && wanted_alternative != tmp) - IWL_WARN(drv, - "uCode alternative %d not available, choosing %d\n", - tmp, wanted_alternative); - drv->fw.ucode_ver = le32_to_cpu(ucode->ver); build = le32_to_cpu(ucode->build); @@ -475,14 +462,11 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, len -= sizeof(*ucode); while (len >= sizeof(*tlv)) { - u16 tlv_alt; - len -= sizeof(*tlv); tlv = (void *)data; tlv_len = le32_to_cpu(tlv->length); - tlv_type = le16_to_cpu(tlv->type); - tlv_alt = le16_to_cpu(tlv->alternative); + tlv_type = le32_to_cpu(tlv->type); tlv_data = tlv->data; if (len < tlv_len) { @@ -493,14 +477,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); - /* - * Alternative 0 is always valid. - * - * Skip alternative TLVs that are not selected. - */ - if (tlv_alt != 0 && tlv_alt != wanted_alternative) - continue; - switch (tlv_type) { case IWL_UCODE_TLV_INST: set_sec_data(pieces, IWL_UCODE_REGULAR, @@ -755,14 +731,13 @@ static int validate_sec_sizes(struct iwl_drv *drv, static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; - const struct iwl_cfg *cfg = cfg(drv); struct iwl_fw *fw = &drv->fw; struct iwl_ucode_header *ucode; int err; struct iwl_firmware_pieces pieces; - const unsigned int api_max = cfg->ucode_api_max; - unsigned int api_ok = cfg->ucode_api_ok; - const unsigned int api_min = cfg->ucode_api_min; + const unsigned int api_max = drv->cfg->ucode_api_max; + unsigned int api_ok = drv->cfg->ucode_api_ok; + const unsigned int api_min = drv->cfg->ucode_api_min; u32 api_ver; int i; @@ -838,46 +813,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version); /* - * For any of the failures below (before allocating pci memory) - * we will try to load a version with a smaller API -- maybe the - * user just got a corrupted version of the latest API. - */ - - IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n", - drv->fw.ucode_ver); - IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n", - get_sec_size(&pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n", - get_sec_size(&pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA)); - IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n", - get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); - IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n", - get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); - - /* Verify that uCode images will fit in card's SRAM */ - if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > - cfg->max_inst_size) { - IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", - get_sec_size(&pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_INST)); - goto try_again; - } - - if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > - cfg->max_data_size) { - IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", - get_sec_size(&pieces, IWL_UCODE_REGULAR, - IWL_UCODE_SECTION_DATA)); - goto try_again; - } - - /* * In mvm uCode there is no difference between data and instructions * sections. */ - if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, cfg)) + if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ @@ -901,14 +840,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; else fw->init_evtlog_size = - cfg->base_params->max_event_log_size; + drv->cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces.init_errlog_ptr; fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr; if (pieces.inst_evtlog_size) fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = - cfg->base_params->max_event_log_size; + drv->cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces.inst_errlog_ptr; /* @@ -924,7 +863,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) release_firmware(ucode_raw); complete(&drv->request_firmware_complete); - drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw); + drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); if (!drv->op_mode) goto out_unbind; @@ -944,42 +883,38 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); - device_release_driver(trans(drv)->dev); + device_release_driver(drv->trans->dev); } -int iwl_drv_start(struct iwl_shared *shrd, - struct iwl_trans *trans, const struct iwl_cfg *cfg) +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg) { struct iwl_drv *drv; int ret; - shrd->cfg = cfg; - drv = kzalloc(sizeof(*drv), GFP_KERNEL); - if (!drv) { - dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv"); - return -ENOMEM; - } - drv->shrd = shrd; - shrd->drv = drv; + if (!drv) + return NULL; + + drv->trans = trans; + drv->dev = trans->dev; + drv->cfg = cfg; init_completion(&drv->request_firmware_complete); ret = iwl_request_firmware(drv, true); if (ret) { - dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw"); + IWL_ERR(trans, "Couldn't request the fw\n"); kfree(drv); - shrd->drv = NULL; + drv = NULL; } - return ret; + return drv; } -void iwl_drv_stop(struct iwl_shared *shrd) +void iwl_drv_stop(struct iwl_drv *drv) { - struct iwl_drv *drv = shrd->drv; - wait_for_completion(&drv->request_firmware_complete); /* op_mode can be NULL if its start failed */ @@ -989,5 +924,95 @@ void iwl_drv_stop(struct iwl_shared *shrd) iwl_dealloc_ucode(drv); kfree(drv); - shrd->drv = NULL; } + + +/* shared module parameters */ +struct iwl_mod_params iwlwifi_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + .plcp_check = true, + .bt_coex_active = true, + .power_level = IWL_POWER_INDEX_1, + .bt_ch_announce = true, + .auto_agg = true, + /* the rest are 0 by default */ +}; + +#ifdef CONFIG_IWLWIFI_DEBUG +module_param_named(debug, iwlwifi_mod_params.debug_level, uint, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); +MODULE_PARM_DESC(11n_disable, + "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); +module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); + +module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, + int, S_IRUGO); +MODULE_PARM_DESC(antenna_coupling, + "specify antenna coupling in dB (defualt: 0 dB)"); + +module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce, + bool, S_IRUGO); +MODULE_PARM_DESC(bt_ch_inhibition, + "Enable BT channel inhibition (default: enable)"); + +module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO); +MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); + +module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO); +MODULE_PARM_DESC(wd_disable, + "Disable stuck queue watchdog timer 0=system default, " + "1=disable, 2=enable (default: 0)"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, + bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); + +module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); + +module_param_named(power_save, iwlwifi_mod_params.power_save, + bool, S_IRUGO); +MODULE_PARM_DESC(power_save, + "enable WiFi power management (default: disable)"); + +module_param_named(power_level, iwlwifi_mod_params.power_level, + int, S_IRUGO); +MODULE_PARM_DESC(power_level, + "default power save level (range from 1 - 5, default: 1)"); + +module_param_named(auto_agg, iwlwifi_mod_params.auto_agg, + bool, S_IRUGO); +MODULE_PARM_DESC(auto_agg, + "enable agg w/o check traffic load (default: enable)"); + +module_param_named(5ghz_disable, iwlwifi_mod_params.disable_5ghz, + bool, S_IRUGO); +MODULE_PARM_DESC(5ghz_disable, "disable 5GHz band (default: 0 [enabled])"); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 3b771c1d909..2cbf137b25b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -63,7 +63,12 @@ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ -#include "iwl-shared.h" +/* for all modules */ +#define DRV_NAME "iwlwifi" +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + /** * DOC: Driver system flows - drv component @@ -90,34 +95,32 @@ * 8) iwl_ucode_callback starts the wifi implementation to matches the fw */ +struct iwl_drv; +struct iwl_trans; +struct iwl_cfg; /** * iwl_drv_start - start the drv * - * @shrd: the shrd area * @trans_ops: the ops of the transport * @cfg: device specific constants / virtual functions * - * TODO: review the parameters given to this function - * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe * function should do bus related operations only, and then call to this - * function. + * function. It returns the driver object or %NULL if an error occured. */ -int iwl_drv_start(struct iwl_shared *shrd, - struct iwl_trans *trans, const struct iwl_cfg *cfg); +struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, + const struct iwl_cfg *cfg); /** * iwl_drv_stop - stop the drv * - * @shrd: the shrd area - * - * TODO: review the parameters given to this function + * @drv: * * Stop the driver. This should be called by bus specific system flows * implementations. For example, the bus specific remove function should first * call this function and then do the bus related operations only. */ -void iwl_drv_stop(struct iwl_shared *shrd); +void iwl_drv_stop(struct iwl_drv *drv); #endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 23cea42b949..50c58911e71 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -68,9 +68,7 @@ #include <net/mac80211.h> -#include "iwl-commands.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-debug.h" #include "iwl-agn.h" #include "iwl-eeprom.h" @@ -187,33 +185,33 @@ static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) } -static int iwl_eeprom_verify_signature(struct iwl_trans *trans) +static int iwl_eeprom_verify_signature(struct iwl_priv *priv) { - u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & + u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; int ret = 0; - IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp); + IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); switch (gp) { case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) { - IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", + if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) { + IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { - IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); + if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { + IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp); ret = -ENOENT; } break; case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: default: - IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, " + IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, " "EEPROM_GP=0x%08x\n", - (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", gp); ret = -ENOENT; break; @@ -221,11 +219,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans) return ret; } -u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset) +u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset) { - if (!shrd->eeprom) + if (!priv->eeprom) return 0; - return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8); + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); } int iwl_eeprom_check_version(struct iwl_priv *priv) @@ -233,11 +231,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) u16 eeprom_ver; u16 calib_ver; - eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); - calib_ver = iwl_eeprom_calib_version(priv->shrd); + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = iwl_eeprom_calib_version(priv); - if (eeprom_ver < cfg(priv)->eeprom_ver || - calib_ver < cfg(priv)->eeprom_calib_ver) + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) goto err; IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", @@ -247,58 +245,115 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) err: IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " "CALIB=0x%x < 0x%x\n", - eeprom_ver, cfg(priv)->eeprom_ver, - calib_ver, cfg(priv)->eeprom_calib_ver); + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); return -EINVAL; } int iwl_eeprom_init_hw_params(struct iwl_priv *priv) { - struct iwl_shared *shrd = priv->shrd; u16 radio_cfg; - hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP); - if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE && - !cfg(priv)->ht_params) { + priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); + if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE && + !priv->cfg->ht_params) { IWL_ERR(priv, "Invalid 11n configuration\n"); return -EINVAL; } - if (!hw_params(priv).sku) { + if (!priv->hw_params.sku) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } - IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku); + IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku); - radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG); + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); - hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); + priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); + priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); /* check overrides (some devices have wrong EEPROM) */ - if (cfg(priv)->valid_tx_ant) - hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; - if (cfg(priv)->valid_rx_ant) - hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; + if (priv->cfg->valid_tx_ant) + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + if (priv->cfg->valid_rx_ant) + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; - if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) { + if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) { IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n", - hw_params(priv).valid_tx_ant, - hw_params(priv).valid_rx_ant); + priv->hw_params.valid_tx_ant, + priv->hw_params.valid_rx_ant); return -EINVAL; } IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", - hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant); + priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant); return 0; } -void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) +u16 iwl_eeprom_calib_version(struct iwl_priv *priv) { - const u8 *addr = iwl_eeprom_query_addr(shrd, + struct iwl_eeprom_calib_hdr *hdr; + + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, + EEPROM_CALIB_ALL); + return hdr->version; +} + +static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address) +{ + u16 offset = 0; + + if ((address & INDIRECT_ADDRESS) == 0) + return address; + + switch (address & INDIRECT_TYPE_MSK) { + case INDIRECT_HOST: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); + break; + case INDIRECT_GENERAL: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); + break; + case INDIRECT_REGULATORY: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); + break; + case INDIRECT_TXP_LIMIT: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT); + break; + case INDIRECT_TXP_LIMIT_SIZE: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE); + break; + case INDIRECT_CALIBRATION: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); + break; + case INDIRECT_PROCESS_ADJST: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); + break; + case INDIRECT_OTHERS: + offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); + break; + default: + IWL_ERR(priv, "illegal indirect type: 0x%X\n", + address & INDIRECT_TYPE_MSK); + break; + } + + /* translate the offset from words to byte */ + return (address & ADDRESS_MSK) + (offset << 1); +} + +const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset) +{ + u32 address = eeprom_indirect_address(priv, offset); + BUG_ON(address >= priv->cfg->base_params->eeprom_size); + return &priv->eeprom[address]; +} + +void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac) +{ + const u8 *addr = iwl_eeprom_query_addr(priv, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } @@ -376,7 +431,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans) * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ - if (cfg(trans)->base_params->shadow_ram_support) + if (trans->cfg->base_params->shadow_ram_support) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } @@ -497,7 +552,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans, } /* more in the link list, continue */ usedblocks++; - } while (usedblocks <= cfg(trans)->base_params->max_ll_items); + } while (usedblocks <= trans->cfg->base_params->max_ll_items); /* OTP has no valid blocks */ IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n"); @@ -591,7 +646,6 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv, static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) { - struct iwl_shared *shrd = priv->shrd; struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; __le16 *txp_len; @@ -600,10 +654,10 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS); + txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS); + txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; @@ -637,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) ((txp->delta_20_in_40 & 0xf0) >> 4), (txp->delta_20_in_40 & 0x0f)); - max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx, + max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, &max_txp_avg_halfdbm); /* @@ -656,66 +710,66 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) /** * iwl_eeprom_init - read EEPROM contents * - * Load the EEPROM contents from adapter into shrd->eeprom + * Load the EEPROM contents from adapter into priv->eeprom * * NOTE: This routine uses the non-debug IO access functions. */ -int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev) +int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) { __le16 *e; - u32 gp = iwl_read32(trans, CSR_EEPROM_GP); + u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP); int sz; int ret; u16 addr; u16 validblockaddr = 0; u16 cache_addr = 0; - trans->nvm_device_type = iwl_get_nvm_type(trans, hw_rev); - if (trans->nvm_device_type == -ENOENT) + priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev); + if (priv->nvm_device_type == -ENOENT) return -ENOENT; /* allocate eeprom */ - sz = cfg(trans)->base_params->eeprom_size; - IWL_DEBUG_EEPROM(trans, "NVM size = %d\n", sz); - trans->shrd->eeprom = kzalloc(sz, GFP_KERNEL); - if (!trans->shrd->eeprom) { + sz = priv->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { ret = -ENOMEM; goto alloc_err; } - e = (__le16 *)trans->shrd->eeprom; + e = (__le16 *)priv->eeprom; - ret = iwl_eeprom_verify_signature(trans); + ret = iwl_eeprom_verify_signature(priv); if (ret < 0) { - IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); ret = -ENOENT; goto err; } /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(trans); + ret = iwl_eeprom_acquire_semaphore(priv->trans); if (ret < 0) { - IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); + IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); ret = -ENOENT; goto err; } - if (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) { + if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { - ret = iwl_init_otp_access(trans); + ret = iwl_init_otp_access(priv->trans); if (ret) { - IWL_ERR(trans, "Failed to initialize OTP access.\n"); + IWL_ERR(priv, "Failed to initialize OTP access.\n"); ret = -ENOENT; goto done; } - iwl_write32(trans, CSR_EEPROM_GP, - iwl_read32(trans, CSR_EEPROM_GP) & + iwl_write32(priv->trans, CSR_EEPROM_GP, + iwl_read32(priv->trans, CSR_EEPROM_GP) & ~CSR_EEPROM_GP_IF_OWNER_MSK); - iwl_set_bit(trans, CSR_OTP_GP_REG, + iwl_set_bit(priv->trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ - if (!cfg(trans)->base_params->shadow_ram_support) { - if (iwl_find_otp_image(trans, &validblockaddr)) { + if (!priv->cfg->base_params->shadow_ram_support) { + if (iwl_find_otp_image(priv->trans, &validblockaddr)) { ret = -ENOENT; goto done; } @@ -724,7 +778,8 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev) addr += sizeof(u16)) { __le16 eeprom_data; - ret = iwl_read_otp_word(trans, addr, &eeprom_data); + ret = iwl_read_otp_word(priv->trans, addr, + &eeprom_data); if (ret) goto done; e[cache_addr / 2] = eeprom_data; @@ -735,94 +790,93 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev) for (addr = 0; addr < sz; addr += sizeof(u16)) { u32 r; - iwl_write32(trans, CSR_EEPROM_REG, + iwl_write32(priv->trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK, CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT); if (ret < 0) { - IWL_ERR(trans, + IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr); goto done; } - r = iwl_read32(trans, CSR_EEPROM_REG); + r = iwl_read32(priv->trans, CSR_EEPROM_REG); e[addr / 2] = cpu_to_le16(r >> 16); } } - IWL_DEBUG_EEPROM(trans, "NVM Type: %s, version: 0x%x\n", - (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) + IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) ? "OTP" : "EEPROM", - iwl_eeprom_query16(trans->shrd, EEPROM_VERSION)); + iwl_eeprom_query16(priv, EEPROM_VERSION)); ret = 0; done: - iwl_eeprom_release_semaphore(trans); + iwl_eeprom_release_semaphore(priv->trans); err: if (ret) - iwl_eeprom_free(trans->shrd); + iwl_eeprom_free(priv); alloc_err: return ret; } -void iwl_eeprom_free(struct iwl_shared *shrd) +void iwl_eeprom_free(struct iwl_priv *priv) { - kfree(shrd->eeprom); - shrd->eeprom = NULL; + kfree(priv->eeprom); + priv->eeprom = NULL; } -static void iwl_init_band_reference(const struct iwl_priv *priv, +static void iwl_init_band_reference(struct iwl_priv *priv, int eep_band, int *eeprom_ch_count, const struct iwl_eeprom_channel **eeprom_ch_info, const u8 **eeprom_ch_index) { - struct iwl_shared *shrd = priv->shrd; - u32 offset = cfg(priv)->lib-> + u32 offset = priv->lib-> eeprom_ops.regulatory_bands[eep_band - 1]; switch (eep_band) { case 1: /* 2.4GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_1; break; case 2: /* 4.9GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_2; break; case 3: /* 5.2GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_3; break; case 4: /* 5.5GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_4; break; case 5: /* 5.7GHz band */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_5; break; case 6: /* 2.4GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_6; break; case 7: /* 5 GHz ht40 channels */ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); *eeprom_ch_info = (struct iwl_eeprom_channel *) - iwl_eeprom_query_addr(shrd, offset); + iwl_eeprom_query_addr(priv, offset); *eeprom_ch_index = iwl_eeprom_band_7; break; default: @@ -987,9 +1041,9 @@ int iwl_init_channel_map(struct iwl_priv *priv) } /* Check if we do have HT40 channels */ - if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] == + if (priv->lib->eeprom_ops.regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && - cfg(priv)->lib->eeprom_ops.regulatory_bands[6] == + priv->lib->eeprom_ops.regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return 0; @@ -1025,7 +1079,7 @@ int iwl_init_channel_map(struct iwl_priv *priv) * driver need to process addition information * to determine the max channel tx power limits */ - if (cfg(priv)->lib->eeprom_ops.enhanced_txpower) + if (priv->lib->eeprom_ops.enhanced_txpower) iwl_eeprom_enhanced_txpower(priv); return 0; @@ -1072,11 +1126,11 @@ void iwl_rf_config(struct iwl_priv *priv) { u16 radio_cfg; - radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG); + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); /* write radio config values to register */ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { - iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, + iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG, EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | EEPROM_RF_CFG_STEP_MSK(radio_cfg) | EEPROM_RF_CFG_DASH_MSK(radio_cfg)); @@ -1088,7 +1142,7 @@ void iwl_rf_config(struct iwl_priv *priv) WARN_ON(1); /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, + iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); } diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index e4a75834099..64bfd947cae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -66,8 +66,6 @@ #include <net/mac80211.h> struct iwl_priv; -struct iwl_shared; -struct iwl_trans; /* * EEPROM access time values: @@ -208,59 +206,6 @@ struct iwl_eeprom_calib_hdr { /* 6000 regulatory - indirect access */ #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ - -/* 5000 Specific */ -#define EEPROM_5000_TX_POWER_VERSION (4) -#define EEPROM_5000_EEPROM_VERSION (0x11A) - -/* 5050 Specific */ -#define EEPROM_5050_TX_POWER_VERSION (4) -#define EEPROM_5050_EEPROM_VERSION (0x21E) - -/* 1000 Specific */ -#define EEPROM_1000_TX_POWER_VERSION (4) -#define EEPROM_1000_EEPROM_VERSION (0x15C) - -/* 6x00 Specific */ -#define EEPROM_6000_TX_POWER_VERSION (4) -#define EEPROM_6000_EEPROM_VERSION (0x423) - -/* 6x50 Specific */ -#define EEPROM_6050_TX_POWER_VERSION (4) -#define EEPROM_6050_EEPROM_VERSION (0x532) - -/* 6150 Specific */ -#define EEPROM_6150_TX_POWER_VERSION (6) -#define EEPROM_6150_EEPROM_VERSION (0x553) - -/* 6x05 Specific */ -#define EEPROM_6005_TX_POWER_VERSION (6) -#define EEPROM_6005_EEPROM_VERSION (0x709) - -/* 6x30 Specific */ -#define EEPROM_6030_TX_POWER_VERSION (6) -#define EEPROM_6030_EEPROM_VERSION (0x709) - -/* 2x00 Specific */ -#define EEPROM_2000_TX_POWER_VERSION (6) -#define EEPROM_2000_EEPROM_VERSION (0x805) - -/* 6x35 Specific */ -#define EEPROM_6035_TX_POWER_VERSION (6) -#define EEPROM_6035_EEPROM_VERSION (0x753) - - -/* OTP */ -/* lower blocks contain EEPROM image and calibration data */ -#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ -/* high blocks contain PAPD data */ -#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ -#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ -#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ -#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ -#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ -#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ - /* 2.4 GHz */ extern const u8 iwl_eeprom_band_1[14]; @@ -306,12 +251,14 @@ struct iwl_eeprom_ops { }; -int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev); -void iwl_eeprom_free(struct iwl_shared *shrd); -int iwl_eeprom_check_version(struct iwl_priv *priv); +int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev); +void iwl_eeprom_free(struct iwl_priv *priv); +int iwl_eeprom_check_version(struct iwl_priv *priv); int iwl_eeprom_init_hw_params(struct iwl_priv *priv); -const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset); -u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset); +u16 iwl_eeprom_calib_version(struct iwl_priv *priv); +const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset); +u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset); +void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac); int iwl_init_channel_map(struct iwl_priv *priv); void iwl_free_channel_map(struct iwl_priv *priv); const struct iwl_channel_info *iwl_get_channel_info( diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index c924ccb93c8..e71564053e7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -93,15 +93,7 @@ struct iwl_ucode_header { * new TLV uCode file layout * * The new TLV file format contains TLVs, that each specify - * some piece of data. To facilitate "groups", for example - * different instruction image with different capabilities, - * bundled with the same init image, an alternative mechanism - * is provided: - * When the alternative field is 0, that means that the item - * is always valid. When it is non-zero, then it is only - * valid in conjunction with items of the same alternative, - * in which case the driver (user) selects one alternative - * to use. + * some piece of data. */ enum iwl_ucode_tlv_type { @@ -132,8 +124,7 @@ enum iwl_ucode_tlv_type { }; struct iwl_ucode_tlv { - __le16 type; /* see above */ - __le16 alternative; /* see comment */ + __le32 type; /* see above */ __le32 length; /* not including type/length fields */ u8 data[0]; }; @@ -152,7 +143,7 @@ struct iwl_tlv_ucode_header { u8 human_readable[64]; __le32 ver; /* major/minor/API/serial */ __le32 build; - __le64 alternatives; /* bitmask of valid alternatives */ + __le64 ignore; /* * The data contained herein has a TLV layout, * see above for the TLV header and types. diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 8e36bdc1e52..2153e4cc557 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -63,6 +63,7 @@ #ifndef __iwl_fw_h__ #define __iwl_fw_h__ #include <linux/types.h> +#include <net/mac80211.h> /** * enum iwl_ucode_tlv_flag - ucode API flags diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index 09b856768f6..abb3250164b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -30,7 +30,6 @@ #define __iwl_io_h__ #include "iwl-devtrace.h" -#include "iwl-shared.h" #include "iwl-trans.h" static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index 1993a2b7ae6..47000419f91 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -36,11 +36,10 @@ #include <asm/unaligned.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-agn.h" #include "iwl-io.h" #include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-modparams.h" /* Throughput OFF time(ms) ON time (ms) * >300 25 25 @@ -71,7 +70,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = { /* Set led register off */ void iwlagn_led_enable(struct iwl_priv *priv) { - iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON); + iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON); } /* @@ -107,9 +106,9 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) }; u32 reg; - reg = iwl_read32(trans(priv), CSR_LED_REG); + reg = iwl_read32(priv->trans, CSR_LED_REG); if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) - iwl_write32(trans(priv), CSR_LED_REG, + iwl_write32(priv->trans, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); return iwl_dvm_send_cmd(priv, &cmd); @@ -138,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv, } IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); led_cmd.on = iwl_blink_compensation(priv, on, - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, - cfg(priv)->base_params->led_compensation); + priv->cfg->base_params->led_compensation); ret = iwl_send_led_cmd(priv, &led_cmd); if (!ret) { @@ -175,7 +174,7 @@ static int iwl_led_blink_set(struct led_classdev *led_cdev, void iwl_leds_init(struct iwl_priv *priv) { - int mode = iwlagn_mod_params.led_mode; + int mode = iwlwifi_mod_params.led_mode; int ret; if (mode == IWL_LED_DISABLE) { @@ -183,7 +182,7 @@ void iwl_leds_init(struct iwl_priv *priv) return; } if (mode == IWL_LED_DEFAULT) - mode = cfg(priv)->led_mode; + mode = priv->cfg->led_mode; priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); @@ -207,7 +206,7 @@ void iwl_leds_init(struct iwl_priv *priv) break; } - ret = led_classdev_register(trans(priv)->dev, &priv->led); + ret = led_classdev_register(priv->trans->dev, &priv->led); if (ret) { kfree(priv->led.name); return; diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c index c24a7134a6f..d33cc9cc7d3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c @@ -44,13 +44,12 @@ #include "iwl-eeprom.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" -#include "iwl-shared.h" #include "iwl-trans.h" #include "iwl-op-mode.h" +#include "iwl-modparams.h" /***************************************************************************** * @@ -147,7 +146,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_QUEUE_CONTROL | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_SCAN_WHILE_IDLE; + + hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; /* * Including the following line will crash some AP's. This @@ -156,10 +161,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; */ - hw->flags |= IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - - if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) + if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS; @@ -197,13 +199,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, WIPHY_FLAG_IBSS_RSN; if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && - trans(priv)->ops->wowlan_suspend && - device_can_wakeup(trans(priv)->dev)) { + priv->trans->ops->wowlan_suspend && + device_can_wakeup(priv->trans->dev)) { hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE; - if (!iwlagn_mod_params.sw_crypto) + if (!iwlwifi_mod_params.sw_crypto) hw->wiphy->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE; @@ -215,7 +217,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, IWLAGN_WOWLAN_MAX_PATTERN_LEN; } - if (iwlagn_mod_params.power_save) + if (iwlwifi_mod_params.power_save) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -224,8 +226,11 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; - /* Default value; 4 EDCA QOS priorities */ - hw->queues = 4; + /* + * We don't use all queues: 4 and 9 are unused and any + * aggregation queue gets mapped down to the AC queue. + */ + hw->queues = IWLAGN_FIRST_AMPDU_QUEUE; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; @@ -236,7 +241,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; - hw->wiphy->hw_version = trans(priv)->hw_id; + hw->wiphy->hw_version = priv->trans->hw_id; iwl_leds_init(priv); @@ -332,7 +337,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) return 0; } -static void iwlagn_mac_stop(struct ieee80211_hw *hw) +void iwlagn_mac_stop(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -355,18 +360,18 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw) * even if interface is down, trans->down will leave the RF * kill interrupt enabled */ - iwl_trans_stop_hw(trans(priv)); + iwl_trans_stop_hw(priv->trans, false); IWL_DEBUG_MAC80211(priv, "leave\n"); } -static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) +void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - if (iwlagn_mod_params.sw_crypto) + if (iwlwifi_mod_params.sw_crypto) return; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -388,8 +393,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, #ifdef CONFIG_PM_SLEEP -static int iwlagn_mac_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan) +int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; @@ -412,9 +416,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, if (ret) goto error; - device_set_wakeup_enable(trans(priv)->dev, true); + device_set_wakeup_enable(priv->trans->dev, true); - iwl_trans_wowlan_suspend(trans(priv)); + iwl_trans_wowlan_suspend(priv->trans); goto out; @@ -437,27 +441,28 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) unsigned long flags; u32 base, status = 0xffffffff; int ret = -EIO; - const struct fw_img *img; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); - iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - base = priv->shrd->device_pointers.error_event_table; + base = priv->device_pointers.error_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { - spin_lock_irqsave(&trans(priv)->reg_lock, flags); - ret = iwl_grab_nic_access_silent(trans(priv)); + spin_lock_irqsave(&priv->trans->reg_lock, flags); + ret = iwl_grab_nic_access_silent(priv->trans); if (likely(ret == 0)) { - iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base); - status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); - iwl_release_nic_access(trans(priv)); + iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base); + status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); + iwl_release_nic_access(priv->trans); } - spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); + spin_unlock_irqrestore(&priv->trans->reg_lock, flags); #ifdef CONFIG_IWLWIFI_DEBUGFS if (ret == 0) { + const struct fw_img *img; + img = &(priv->fw->img[IWL_UCODE_WOWLAN]); if (!priv->wowlan_sram) { priv->wowlan_sram = @@ -467,7 +472,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) if (priv->wowlan_sram) _iwl_read_targ_mem_words( - trans(priv), 0x800000, + priv->trans, 0x800000, priv->wowlan_sram, img->sec[IWL_UCODE_SECTION_DATA].len / 4); } @@ -479,7 +484,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) priv->wowlan = false; - device_set_wakeup_enable(trans(priv)->dev, false); + device_set_wakeup_enable(priv->trans->dev, false); iwlagn_prepare_restart(priv); @@ -497,7 +502,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) #endif -static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -508,21 +513,21 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); } -static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) +void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); } -static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; @@ -532,7 +537,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, IWL_DEBUG_MAC80211(priv, "enter\n"); - if (iwlagn_mod_params.sw_crypto) { + if (iwlwifi_mod_params.sw_crypto) { IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); return -EOPNOTSUPP; } @@ -622,11 +627,11 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return ret; } -static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) +int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret = -EINVAL; @@ -635,7 +640,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); - if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)) + if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)) return -EACCES; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -643,7 +648,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); @@ -653,7 +658,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_sta_rx_agg_stop(priv, sta, tid); break; case IEEE80211_AMPDU_TX_START: - if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + if (!priv->trans->ops->tx_agg_setup) + break; + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); @@ -667,7 +674,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, priv->agg_tids_count); } if (!priv->agg_tids_count && - hw_params(priv).use_rts_for_aggregation) { + priv->hw_params.use_rts_for_aggregation) { /* * switch off RTS/CTS if it was previously enabled */ @@ -746,11 +753,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, return ret; } -static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - enum ieee80211_sta_state old_state, - enum ieee80211_sta_state new_state) +int iwlagn_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; @@ -829,8 +836,8 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, return ret; } -static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch) +void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); const struct iwl_channel_info *ch_info; @@ -863,7 +870,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, if (!iwl_is_associated_ctx(ctx)) goto out; - if (!cfg(priv)->lib->set_channel_switch) + if (!priv->lib->set_channel_switch) goto out; ch = channel->hw_value; @@ -892,14 +899,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, iwl_set_rxon_ht(priv, ht_conf); iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); - iwl_set_rate(priv); /* * at this point, staging_rxon has the * configuration for channel switch */ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = cpu_to_le16(ch); - if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { + if (priv->lib->set_channel_switch(priv, ch_switch)) { clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = 0; ieee80211_chswitch_done(ctx->vif, false); @@ -910,10 +916,25 @@ out: IWL_DEBUG_MAC80211(priv, "leave\n"); } -static void iwlagn_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + /* + * MULTI-FIXME + * See iwlagn_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) + ieee80211_chswitch_done(ctx->vif, is_success); +} + +void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); __le32 filter_or = 0, filter_nand = 0; @@ -960,7 +981,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw, FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; } -static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) +void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -988,7 +1009,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) } } IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(trans(priv)); + iwl_trans_wait_tx_queue_empty(priv->trans); done: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); @@ -1003,7 +1024,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; int err = 0; - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) return -EOPNOTSUPP; if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) @@ -1087,11 +1108,11 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, return err; } -static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) +int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) return -EOPNOTSUPP; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -1104,16 +1125,16 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) return 0; } -static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, - enum ieee80211_rssi_event rssi_event) +void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { if (rssi_event == RSSI_EVENT_LOW) priv->bt_enable_pspoll = true; else if (rssi_event == RSSI_EVENT_HIGH) @@ -1129,8 +1150,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "leave\n"); } -static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, bool set) +int iwlagn_mac_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -1139,9 +1160,9 @@ static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, return 0; } -static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) +int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; @@ -1183,7 +1204,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, return 0; } -static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) +int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -1199,11 +1220,10 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) return iwlagn_commit_rxon(priv, ctx); } -static int iwl_setup_interface(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) +int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { struct ieee80211_vif *vif = ctx->vif; - int err; + int err, ac; lockdep_assert_held(&priv->mutex); @@ -1223,7 +1243,7 @@ static int iwl_setup_interface(struct iwl_priv *priv, return err; } - if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && vif->type == NL80211_IFTYPE_ADHOC) { /* * pretend to have high BT traffic as long as we @@ -1233,11 +1253,20 @@ static int iwl_setup_interface(struct iwl_priv *priv, priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; } + /* set up queue mappings */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + vif->hw_queue[ac] = ctx->ac_to_queue[ac]; + + if (vif->type == NL80211_IFTYPE_AP) + vif->cab_queue = ctx->mcast_queue; + else + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; } static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; @@ -1311,9 +1340,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, return err; } -static void iwl_teardown_interface(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool mode_change) +void iwl_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) { struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); @@ -1454,9 +1483,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, return err; } -static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_scan_request *req) +int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret; @@ -1511,7 +1540,7 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) iwl_send_add_sta(priv, &cmd, CMD_ASYNC); } -static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, +void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h new file mode 100644 index 00000000000..d9a86d6b2bd --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -0,0 +1,126 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_modparams_h__ +#define __iwl_modparams_h__ + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/gfp.h> +#include <net/mac80211.h> + +extern struct iwl_mod_params iwlwifi_mod_params; + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +#define IWL_DISABLE_HT_ALL BIT(0) +#define IWL_DISABLE_HT_TXAGG BIT(1) +#define IWL_DISABLE_HT_RXAGG BIT(2) + +/** + * struct iwl_mod_params + * + * Holds the module parameters + * + * @sw_crypto: using hardware encryption, default = 0 + * @disable_11n: disable 11n capabilities, default = 0, + * use IWL_DISABLE_HT_* constants + * @amsdu_size_8K: enable 8K amsdu size, default = 1 + * @restart_fw: restart firmware, default = 1 + * @plcp_check: enable plcp health check, default = true + * @wd_disable: enable stuck queue check, default = 0 + * @bt_coex_active: enable bt coex, default = true + * @led_mode: system default, default = 0 + * @power_save: disable power save, default = false + * @power_level: power level, default = 1 + * @debug_level: levels are IWL_DL_* + * @ant_coupling: antenna coupling in dB, default = 0 + * @bt_ch_announce: BT channel inhibition, default = enable + * @auto_agg: enable agg. without check, default = true + * @disable_5ghz: disable 5GHz capability, default = false + */ +struct iwl_mod_params { + int sw_crypto; + unsigned int disable_11n; + int amsdu_size_8K; + int restart_fw; + bool plcp_check; + int wd_disable; + bool bt_coex_active; + int led_mode; + bool power_save; + int power_level; + u32 debug_level; + int ant_coupling; + bool bt_ch_announce; + bool auto_agg; + bool disable_5ghz; +}; + +#endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c index 88dc4a0f96b..0066b899fe5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c @@ -75,21 +75,45 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt) { + bool triggered = false; + if (!list_empty(¬if_wait->notif_waits)) { struct iwl_notification_wait *w; spin_lock(¬if_wait->notif_wait_lock); list_for_each_entry(w, ¬if_wait->notif_waits, list) { - if (w->cmd != pkt->hdr.cmd) + int i; + bool found = false; + + /* + * If it already finished (triggered) or has been + * aborted then don't evaluate it again to avoid races, + * Otherwise the function could be called again even + * though it returned true before + */ + if (w->triggered || w->aborted) + continue; + + for (i = 0; i < w->n_cmds; i++) { + if (w->cmds[i] == pkt->hdr.cmd) { + found = true; + break; + } + } + if (!found) continue; - w->triggered = true; - if (w->fn) - w->fn(notif_wait, pkt, w->fn_data); + + if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) { + w->triggered = true; + triggered = true; + } } spin_unlock(¬if_wait->notif_wait_lock); - wake_up_all(¬if_wait->notif_waitq); } + + if (triggered) + wake_up_all(¬if_wait->notif_waitq); } void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) @@ -109,14 +133,18 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) void iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_notif_wait_data *notif_wait, + const u8 *cmds, int n_cmds, + bool (*fn)(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data), void *fn_data) { + if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) + n_cmds = MAX_NOTIF_CMDS; + wait_entry->fn = fn; wait_entry->fn_data = fn_data; - wait_entry->cmd = cmd; + wait_entry->n_cmds = n_cmds; + memcpy(wait_entry->cmds, cmds, n_cmds); wait_entry->triggered = false; wait_entry->aborted = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h index 5e8af957aa7..821523100cf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h @@ -72,11 +72,19 @@ struct iwl_notif_wait_data { wait_queue_head_t notif_waitq; }; +#define MAX_NOTIF_CMDS 5 + /** * struct iwl_notification_wait - notification wait entry * @list: list head for global list - * @fn: function called with the notification - * @cmd: command ID + * @fn: Function called with the notification. If the function + * returns true, the wait is over, if it returns false then + * the waiter stays blocked. If no function is given, any + * of the listed commands will unblock the waiter. + * @cmds: command IDs + * @n_cmds: number of command IDs + * @triggered: waiter should be woken up + * @aborted: wait was aborted * * This structure is not used directly, to wait for a * notification declare it on the stack, and call @@ -93,11 +101,12 @@ struct iwl_notif_wait_data { struct iwl_notification_wait { struct list_head list; - void (*fn)(struct iwl_notif_wait_data *notif_data, + bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data); void *fn_data; - u8 cmd; + u8 cmds[MAX_NOTIF_CMDS]; + u8 n_cmds; bool triggered, aborted; }; @@ -112,8 +121,8 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, - u8 cmd, - void (*fn)(struct iwl_notif_wait_data *notif_data, + const u8 *cmds, int n_cmds, + bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data), void *fn_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index 6ea4163ff56..4ef742b28e0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -69,6 +69,7 @@ struct sk_buff; struct iwl_device_cmd; struct iwl_rx_cmd_buffer; struct iwl_fw; +struct iwl_cfg; /** * DOC: Operational mode - what is it ? @@ -111,10 +112,10 @@ struct iwl_fw; * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * HCMD the this Rx responds to. * Must be atomic. - * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue + * @queue_full: notifies that a HW queue is full. * Must be atomic * @queue_not_full: notifies that a HW queue is not full any more. - * Ac is the ac of the queue. Must be atomic + * Must be atomic * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that * the radio is killed. Must be atomic. * @free_skb: allows the transport layer to free skbs that haven't been @@ -125,20 +126,23 @@ struct iwl_fw; * @cmd_queue_full: Called when the command queue gets full. Must be atomic. * @nic_config: configure NIC, called before firmware is started. * May sleep + * @wimax_active: invoked when WiMax becomes active. Must be atomic. */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, + const struct iwl_cfg *cfg, const struct iwl_fw *fw); void (*stop)(struct iwl_op_mode *op_mode); int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); - void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac); - void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac); + void (*queue_full)(struct iwl_op_mode *op_mode, int queue); + void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); void (*nic_error)(struct iwl_op_mode *op_mode); void (*cmd_queue_full)(struct iwl_op_mode *op_mode); void (*nic_config)(struct iwl_op_mode *op_mode); + void (*wimax_active)(struct iwl_op_mode *op_mode); }; /** @@ -169,15 +173,16 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode, return op_mode->ops->rx(op_mode, rxb, cmd); } -static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac) +static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, + int queue) { - op_mode->ops->queue_full(op_mode, ac); + op_mode->ops->queue_full(op_mode, queue); } static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, - u8 ac) + int queue) { - op_mode->ops->queue_not_full(op_mode, ac); + op_mode->ops->queue_not_full(op_mode, queue); } static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, @@ -208,6 +213,11 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) op_mode->ops->nic_config(op_mode); } +static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) +{ + op_mode->ops->wimax_active(op_mode); +} + /***************************************************** * Op mode layers implementations ******************************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index c5e339ee918..0c8a1c2d884 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -60,17 +60,18 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-aspm.h> -#include "iwl-io.h" -#include "iwl-shared.h" #include "iwl-trans.h" -#include "iwl-csr.h" #include "iwl-cfg.h" #include "iwl-drv.h" #include "iwl-trans.h" +#include "iwl-trans-pcie-int.h" #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ @@ -261,61 +262,46 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 +#ifndef CONFIG_IWLWIFI_IDI + static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); - struct iwl_shared *shrd; struct iwl_trans *iwl_trans; - int err; - - shrd = kzalloc(sizeof(*iwl_trans->shrd), GFP_KERNEL); - if (!shrd) { - dev_printk(KERN_ERR, &pdev->dev, - "Couldn't allocate iwl_shared"); - err = -ENOMEM; - goto out_free_bus; - } + struct iwl_trans_pcie *trans_pcie; -#ifdef CONFIG_IWLWIFI_IDI - iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent); -#else - iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent); -#endif - if (iwl_trans == NULL) { - err = -ENOMEM; - goto out_free_bus; - } + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); + if (iwl_trans == NULL) + return -ENOMEM; - shrd->trans = iwl_trans; pci_set_drvdata(pdev, iwl_trans); - err = iwl_drv_start(shrd, iwl_trans, cfg); - if (err) + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); + if (!trans_pcie->drv) goto out_free_trans; return 0; out_free_trans: - iwl_trans_free(iwl_trans); + iwl_trans_pcie_free(iwl_trans); pci_set_drvdata(pdev, NULL); -out_free_bus: - kfree(shrd); - return err; + return -EFAULT; } static void __devexit iwl_pci_remove(struct pci_dev *pdev) { - struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); - struct iwl_shared *shrd = iwl_trans->shrd; + struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - iwl_drv_stop(shrd); - iwl_trans_free(shrd->trans); + iwl_drv_stop(trans_pcie->drv); + iwl_trans_pcie_free(trans); pci_set_drvdata(pdev, NULL); - - kfree(shrd); } +#endif /* CONFIG_IWLWIFI_IDI */ + #ifdef CONFIG_PM_SLEEP static int iwl_pci_suspend(struct device *device) @@ -360,6 +346,15 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); #endif +#ifdef CONFIG_IWLWIFI_IDI +/* + * Defined externally in iwl-idi.c + */ +int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void __devexit iwl_pci_remove(struct pci_dev *pdev); + +#endif /* CONFIG_IWLWIFI_IDI */ + static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, .id_table = iwl_hw_card_ids, diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c new file mode 100644 index 00000000000..f166955340f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c @@ -0,0 +1,288 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/slab.h> +#include <linux/string.h> + +#include "iwl-debug.h" +#include "iwl-dev.h" + +#include "iwl-phy-db.h" + +#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ + +struct iwl_phy_db *iwl_phy_db_init(struct device *dev) +{ + struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), + GFP_KERNEL); + + if (!phy_db) + return phy_db; + + phy_db->dev = dev; + + /* TODO: add default values of the phy db. */ + return phy_db; +} + +/* + * get phy db section: returns a pointer to a phy db section specified by + * type and channel group id. + */ +static struct iwl_phy_db_entry * +iwl_phy_db_get_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX) + return NULL; + + switch (type) { + case IWL_PHY_DB_CFG: + return &phy_db->cfg; + case IWL_PHY_DB_CALIB_NCH: + return &phy_db->calib_nch; + case IWL_PHY_DB_CALIB_CH: + return &phy_db->calib_ch; + case IWL_PHY_DB_CALIB_CHG_PAPD: + if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_papd[chg_id]; + case IWL_PHY_DB_CALIB_CHG_TXP: + if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS) + return NULL; + return &phy_db->calib_ch_group_txp[chg_id]; + default: + return NULL; + } + return NULL; +} + +static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, + u16 chg_id) +{ + struct iwl_phy_db_entry *entry = + iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return; + + kfree(entry->data); + entry->data = NULL; + entry->size = 0; +} + +void iwl_phy_db_free(struct iwl_phy_db *phy_db) +{ + int i; + + if (!phy_db) + return; + + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0); + for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) + iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); + + kfree(phy_db); +} + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, u8 *data, + u16 size, gfp_t alloc_ctx) +{ + struct iwl_phy_db_entry *entry; + u16 chg_id = 0; + + if (!phy_db) + return -EINVAL; + + if (type == IWL_PHY_DB_CALIB_CHG_PAPD || + type == IWL_PHY_DB_CALIB_CHG_TXP) + chg_id = le16_to_cpup((__le16 *)data); + + entry = iwl_phy_db_get_section(phy_db, type, chg_id); + if (!entry) + return -EINVAL; + + kfree(entry->data); + entry->data = kmemdup(data, size, alloc_ctx); + if (!entry->data) { + entry->size = 0; + return -ENOMEM; + } + + entry->size = size; + + if (type == IWL_PHY_DB_CALIB_CH) { + phy_db->channel_num = le32_to_cpup((__le32 *)data); + phy_db->channel_size = + (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; + } + + return 0; +} + +static int is_valid_channel(u16 ch_id) +{ + if (ch_id <= 14 || + (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || + (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || + (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) + return 1; + return 0; +} + +static u8 ch_id_to_ch_index(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (ch_id <= 14) + return ch_id - 1; + if (ch_id <= 64) + return (ch_id + 20) / 4; + if (ch_id <= 140) + return (ch_id - 12) / 4; + return (ch_id - 13) / 4; +} + + +static u16 channel_id_to_papd(u16 ch_id) +{ + if (WARN_ON(!is_valid_channel(ch_id))) + return 0xff; + + if (1 <= ch_id && ch_id <= 14) + return 0; + if (36 <= ch_id && ch_id <= 64) + return 1; + if (100 <= ch_id && ch_id <= 140) + return 2; + return 3; +} + +static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) +{ + struct iwl_phy_db_chg_txp *txp_chg; + int i; + u8 ch_index = ch_id_to_ch_index(ch_id); + if (ch_index == 0xff) + return 0xff; + + for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) { + txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; + if (!txp_chg) + return 0xff; + /* + * Looking for the first channel group that its max channel is + * higher then wanted channel. + */ + if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index) + return i; + } + return 0xff; +} + +int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, u8 **data, + u16 *size, u16 ch_id) +{ + struct iwl_phy_db_entry *entry; + u32 channel_num; + u32 channel_size; + u16 ch_group_id = 0; + u16 index; + + if (!phy_db) + return -EINVAL; + + /* find wanted channel group */ + if (type == IWL_PHY_DB_CALIB_CHG_PAPD) + ch_group_id = channel_id_to_papd(ch_id); + else if (type == IWL_PHY_DB_CALIB_CHG_TXP) + ch_group_id = channel_id_to_txp(phy_db, ch_id); + + entry = iwl_phy_db_get_section(phy_db, type, ch_group_id); + if (!entry) + return -EINVAL; + + if (type == IWL_PHY_DB_CALIB_CH) { + index = ch_id_to_ch_index(ch_id); + channel_num = phy_db->channel_num; + channel_size = phy_db->channel_size; + if (index >= channel_num) { + IWL_ERR(phy_db, "Wrong channel number %d", ch_id); + return -EINVAL; + } + *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size; + *size = channel_size; + } else { + *data = entry->data; + *size = entry->size; + } + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h new file mode 100644 index 00000000000..c34c6a9303a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h @@ -0,0 +1,129 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __IWL_PHYDB_H__ +#define __IWL_PHYDB_H__ + +#include <linux/types.h> + +#define IWL_NUM_PAPD_CH_GROUPS 4 +#define IWL_NUM_TXP_CH_GROUPS 8 + +struct iwl_phy_db_entry { + u16 size; + u8 *data; +}; + +struct iwl_shared; + +/** + * struct iwl_phy_db - stores phy configuration and calibration data. + * + * @cfg: phy configuration. + * @calib_nch: non channel specific calibration data. + * @calib_ch: channel specific calibration data. + * @calib_ch_group_papd: calibration data related to papd channel group. + * @calib_ch_group_txp: calibration data related to tx power chanel group. + */ +struct iwl_phy_db { + struct iwl_phy_db_entry cfg; + struct iwl_phy_db_entry calib_nch; + struct iwl_phy_db_entry calib_ch; + struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; + struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; + + u32 channel_num; + u32 channel_size; + + /* for an access to the logger */ + struct device *dev; +}; + +enum iwl_phy_db_section_type { + IWL_PHY_DB_CFG = 1, + IWL_PHY_DB_CALIB_NCH, + IWL_PHY_DB_CALIB_CH, + IWL_PHY_DB_CALIB_CHG_PAPD, + IWL_PHY_DB_CALIB_CHG_TXP, + IWL_PHY_DB_MAX +}; + +/* for parsing of tx power channel group data that comes from the firmware*/ +struct iwl_phy_db_chg_txp { + __le32 space; + __le16 max_channel_idx; +} __packed; + +struct iwl_phy_db *iwl_phy_db_init(struct device *dev); + +void iwl_phy_db_free(struct iwl_phy_db *phy_db); + +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, u8 *data, + u16 size, gfp_t alloc_ctx); + +int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, + enum iwl_phy_db_section_type type, u8 **data, + u16 *size, u16 ch_id); + +#endif /* __IWL_PHYDB_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 958d9d09aee..8352265dbc4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -37,13 +37,12 @@ #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-agn.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-commands.h" #include "iwl-debug.h" #include "iwl-power.h" #include "iwl-trans.h" -#include "iwl-shared.h" +#include "iwl-modparams.h" /* * Setting power level allows the card to go to sleep when not busy. @@ -167,7 +166,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, u8 skip; u32 slp_itrvl; - if (cfg(priv)->adv_pm) { + if (priv->cfg->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; @@ -215,13 +214,13 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; - if (cfg(priv)->base_params->shadow_reg_enable) + if (priv->cfg->base_params->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!cfg(priv)->bt_params->bt_sco_disable) + if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -268,61 +267,6 @@ static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); } -static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, - struct iwl_powertable_cmd *cmd, - int dynps_ms, int wakeup_period) -{ - /* - * These are the original power level 3 sleep successions. The - * device may behave better with such succession and was also - * only tested with that. Just like the original sleep commands, - * also adjust the succession here to the wakeup_period below. - * The ranges are the same as for the sleep commands, 0-2, 3-9 - * and >10, which is selected based on the DTIM interval for - * the sleep index but here we use the wakeup period since that - * is what we need to do for the latency requirements. - */ - static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 }; - static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 }; - static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF }; - const u8 *slp_succ = slp_succ_r0; - int i; - - if (wakeup_period > IWL_DTIM_RANGE_0_MAX) - slp_succ = slp_succ_r1; - if (wakeup_period > IWL_DTIM_RANGE_1_MAX) - slp_succ = slp_succ_r2; - - memset(cmd, 0, sizeof(*cmd)); - - cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | - IWL_POWER_FAST_PD; /* no use seeing frames for others */ - - if (priv->power_data.bus_pm) - cmd->flags |= IWL_POWER_PCI_PM_MSK; - - if (cfg(priv)->base_params->shadow_reg_enable) - cmd->flags |= IWL_POWER_SHADOW_REG_ENA; - else - cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; - - if (iwl_advanced_bt_coexist(priv)) { - if (!cfg(priv)->bt_params->bt_sco_disable) - cmd->flags |= IWL_POWER_BT_SCO_ENA; - else - cmd->flags &= ~IWL_POWER_BT_SCO_ENA; - } - - cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); - cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); - - for (i = 0; i < IWL_POWER_VEC_SIZE; i++) - cmd->sleep_interval[i] = - cpu_to_le32(min_t(int, slp_succ[i], wakeup_period)); - - IWL_DEBUG_POWER(priv, "Automatic sleep command\n"); -} - static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); @@ -350,7 +294,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!cfg(priv)->base_params->no_idle_support && + else if (!priv->cfg->base_params->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { @@ -363,18 +307,15 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, iwl_static_sleep_cmd(priv, cmd, priv->power_data.debug_sleep_level_override, dtimper); - else if (iwlagn_mod_params.no_sleep_autoadjust) { - if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 && - iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5) + else { + if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 && + iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5) iwl_static_sleep_cmd(priv, cmd, - iwlagn_mod_params.power_level, dtimper); + iwlwifi_mod_params.power_level, dtimper); else iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper); - } else - iwl_power_fill_sleep_cmd(priv, cmd, - priv->hw->conf.dynamic_ps_timeout, - priv->hw->conf.max_sleep_period); + } } int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, @@ -403,12 +344,12 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, } if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) - set_bit(STATUS_POWER_PMI, &priv->shrd->status); + iwl_dvm_set_pmi(priv, true); ret = iwl_set_power(priv, cmd); if (!ret) { if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) - clear_bit(STATUS_POWER_PMI, &priv->shrd->status); + iwl_dvm_set_pmi(priv, false); if (update_chains) iwl_update_chain_flags(priv); @@ -436,7 +377,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force) /* initialize to default */ void iwl_power_initialize(struct iwl_priv *priv) { - priv->power_data.bus_pm = trans(priv)->pm_support; + priv->power_data.bus_pm = priv->trans->pm_support; priv->power_data.debug_sleep_level_override = -1; diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h index 07a19fce5fd..21afc92efac 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.h +++ b/drivers/net/wireless/iwlwifi/iwl-power.h @@ -30,15 +30,6 @@ #include "iwl-commands.h" -enum iwl_power_level { - IWL_POWER_INDEX_1, - IWL_POWER_INDEX_2, - IWL_POWER_INDEX_3, - IWL_POWER_INDEX_4, - IWL_POWER_INDEX_5, - IWL_POWER_NUM -}; - struct iwl_power_mgr { struct iwl_powertable_cmd sleep_cmd; struct iwl_powertable_cmd sleep_cmd_next; diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 902efe4bc89..a8437a6bc18 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -32,7 +32,6 @@ #include "iwl-eeprom.h" #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn.h" #include "iwl-trans.h" @@ -69,7 +68,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv) if (!test_bit(STATUS_READY, &priv->status) || !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || !test_bit(STATUS_SCAN_HW, &priv->status) || - test_bit(STATUS_FW_ERROR, &priv->shrd->status)) + test_bit(STATUS_FW_ERROR, &priv->status)) return -EIO; ret = iwl_dvm_send_cmd(priv, &cmd); @@ -451,6 +450,46 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, return iwl_limit_dwell(priv, passive); } +/* Return valid, unused, channel for a passive scan to reset the RF */ +static u8 iwl_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band) +{ + const struct iwl_channel_info *ch_info; + int i; + u8 channel = 0; + u8 min, max; + struct iwl_rxon_context *ctx; + + if (band == IEEE80211_BAND_5GHZ) { + min = 14; + max = priv->channel_count; + } else { + min = 0; + max = 14; + } + + for (i = min; i < max; i++) { + bool busy = false; + + for_each_context(priv, ctx) { + busy = priv->channel_info[i].channel == + le16_to_cpu(ctx->staging.channel); + if (busy) + break; + } + + if (busy) + continue; + + channel = priv->channel_info[i].channel; + ch_info = iwl_get_channel_info(priv, band, channel); + if (is_channel_valid(ch_info)) + break; + } + + return channel; +} + static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, enum ieee80211_band band, @@ -633,12 +672,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) u16 rx_chain = 0; enum ieee80211_band band; u8 n_probes = 0; - u8 rx_ant = hw_params(priv).valid_rx_ant; + u8 rx_ant = priv->hw_params.valid_rx_ant; u8 rate; bool is_active = false; int chan_mod; u8 active_chains; - u8 scan_tx_antennas = hw_params(priv).valid_tx_ant; + u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; int ret; lockdep_assert_held(&priv->mutex); @@ -751,8 +790,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * Internal scans are passive, so we can indiscriminately set * the BT ignore flag on 2.4 GHz since it applies to TX only. */ - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; case IEEE80211_BAND_5GHZ: @@ -793,12 +832,9 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (cfg(priv)->scan_rx_antennas[band]) - rx_ant = cfg(priv)->scan_rx_antennas[band]; - if (band == IEEE80211_BAND_2GHZ && - cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ scan_tx_antennas = first_antenna(scan_tx_antennas); } @@ -809,8 +845,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); - /* In power save mode use one chain, otherwise use all chains */ - if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) { + /* + * In power save mode while associated use one chain, + * otherwise use all chains + */ + if (test_bit(STATUS_POWER_PMI, &priv->status) && + !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { /* rx_ant has been set to all valid chains previously */ active_chains = rx_ant & ((u8)(priv->chain_noise_data.active_chains)); @@ -822,8 +862,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_ant = first_antenna(active_chains); } - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist && + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ rx_ant = first_antenna(rx_ant); @@ -831,7 +871,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) /* MIMO is not used here, but value is required */ rx_chain |= - hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; @@ -944,7 +984,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) void iwl_init_scan_params(struct iwl_priv *priv) { - u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1; + u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c index 76f7f925143..060aac3e22f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-testmode.c +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c @@ -71,7 +71,6 @@ #include <net/netlink.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-agn.h" @@ -184,9 +183,10 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv, "Run out of memory for messages to user space ?\n"); return; } - NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); - /* the length doesn't include len_n_flags field, so add it manually */ - NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data); + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + /* the length doesn't include len_n_flags field, so add it manually */ + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data)) + goto nla_put_failure; cfg80211_testmode_event(skb, GFP_ATOMIC); return; @@ -218,7 +218,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv) if (priv->testmode_trace.trace_enabled) { if (priv->testmode_trace.cpu_addr && priv->testmode_trace.dma_addr) - dma_free_coherent(trans(priv)->dev, + dma_free_coherent(priv->trans->dev, priv->testmode_trace.total_size, priv->testmode_trace.cpu_addr, priv->testmode_trace.dma_addr); @@ -314,8 +314,9 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) memcpy(reply_buf, &(pkt->hdr), reply_len); iwl_free_resp(&cmd); - NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); - NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf); + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) + goto nla_put_failure; return cfg80211_testmode_reply(skb); nla_put_failure: @@ -371,7 +372,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) switch (cmd) { case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - val32 = iwl_read_direct32(trans(priv), ofs); + val32 = iwl_read_direct32(priv->trans, ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); @@ -379,7 +380,8 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) IWL_ERR(priv, "Memory allocation fail\n"); return -ENOMEM; } - NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); + if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", status); @@ -391,7 +393,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); - iwl_write_direct32(trans(priv), ofs, val32); + iwl_write_direct32(priv->trans, ofs, val32); } break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: @@ -401,7 +403,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) } else { val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); IWL_INFO(priv, "8bit value to write 0x%x\n", val8); - iwl_write8(trans(priv), ofs, val8); + iwl_write8(priv->trans, ofs, val8); } break; default: @@ -420,10 +422,13 @@ nla_put_failure: static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; + static const u8 calib_complete[] = { + CALIBRATION_COMPLETE_NOTIFICATION + }; int ret; iwl_init_notification_wait(&priv->notif_wait, &calib_wait, - CALIBRATION_COMPLETE_NOTIFICATION, + calib_complete, ARRAY_SIZE(calib_complete), NULL, NULL); ret = iwl_init_alive_start(priv); if (ret) { @@ -461,7 +466,7 @@ cfg_init_calib_error: static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_trans *trans = trans(priv); + struct iwl_trans *trans = priv->trans; struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; @@ -470,18 +475,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: - rsp_data_ptr = (unsigned char *)cfg(priv)->name; - rsp_data_len = strlen(cfg(priv)->name); + rsp_data_ptr = (unsigned char *)priv->cfg->name; + rsp_data_len = strlen(priv->cfg->name); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, rsp_data_len + 20); if (!skb) { IWL_ERR(priv, "Memory allocation fail\n"); return -ENOMEM; } - NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_SYNC_RSP); - NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP, - rsp_data_len, rsp_data_ptr); + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, + IWL_TM_CMD_DEV2APP_SYNC_RSP) || + nla_put(skb, IWL_TM_ATTR_SYNC_RSP, + rsp_data_len, rsp_data_ptr)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", status); @@ -529,18 +535,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) break; case IWL_TM_CMD_APP2DEV_GET_EEPROM: - if (priv->shrd->eeprom) { + if (priv->eeprom) { skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, - cfg(priv)->base_params->eeprom_size + 20); + priv->cfg->base_params->eeprom_size + 20); if (!skb) { IWL_ERR(priv, "Memory allocation fail\n"); return -ENOMEM; } - NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_EEPROM_RSP); - NLA_PUT(skb, IWL_TM_ATTR_EEPROM, - cfg(priv)->base_params->eeprom_size, - priv->shrd->eeprom); + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, + IWL_TM_CMD_DEV2APP_EEPROM_RSP) || + nla_put(skb, IWL_TM_ATTR_EEPROM, + priv->cfg->base_params->eeprom_size, + priv->eeprom)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", @@ -566,15 +573,16 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) IWL_ERR(priv, "Memory allocation fail\n"); return -ENOMEM; } - NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, - priv->fw->ucode_ver); + if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, + priv->fw->ucode_ver)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - devid = trans(priv)->hw_id; + devid = priv->trans->hw_id; IWL_INFO(priv, "hw version: 0x%x\n", devid); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); @@ -582,7 +590,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) IWL_ERR(priv, "Memory allocation fail\n"); return -ENOMEM; } - NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); + if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", status); @@ -598,13 +607,14 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) IWL_ERR(priv, "No uCode has not been loaded\n"); return -EINVAL; } else { - img = &priv->fw->img[priv->shrd->ucode_type]; + img = &priv->fw->img[priv->cur_ucode]; inst_size = img->sec[IWL_UCODE_SECTION_INST].len; data_size = img->sec[IWL_UCODE_SECTION_DATA].len; } - NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type); - NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size); - NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size); + if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) || + nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) || + nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) IWL_ERR(priv, "Error sending msg : %d\n", status); @@ -639,7 +649,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct sk_buff *skb; int status = 0; - struct device *dev = trans(priv)->dev; + struct device *dev = priv->trans->dev; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: @@ -678,9 +688,10 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) iwl_trace_cleanup(priv); return -ENOMEM; } - NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, - sizeof(priv->testmode_trace.dma_addr), - (u64 *)&priv->testmode_trace.dma_addr); + if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, + sizeof(priv->testmode_trace.dma_addr), + (u64 *)&priv->testmode_trace.dma_addr)) + goto nla_put_failure; status = cfg80211_testmode_reply(skb); if (status < 0) { IWL_ERR(priv, "Error sending msg : %d\n", status); @@ -725,9 +736,10 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, length = priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE; - NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, - priv->testmode_trace.trace_addr + - (DUMP_CHUNK_SIZE * idx)); + if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, + priv->testmode_trace.trace_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; idx++; cb->args[4] = idx; return 0; @@ -779,7 +791,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) { - struct iwl_trans *trans = trans(priv); + struct iwl_trans *trans = priv->trans; unsigned long flags; int i; @@ -819,7 +831,7 @@ static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, u32 size, unsigned char *buf) { - struct iwl_trans *trans = trans(priv); + struct iwl_trans *trans = priv->trans; u32 val, i; unsigned long flags; @@ -922,9 +934,10 @@ static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw, length = priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE; - NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length, - priv->testmode_mem.buff_addr + - (DUMP_CHUNK_SIZE * idx)); + if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, + priv->testmode_mem.buff_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; idx++; cb->args[4] = idx; return 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h index 1c2fe87bd7e..6213c05a4b5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h @@ -34,17 +34,15 @@ #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/pci.h> +#include <linux/timer.h> #include "iwl-fh.h" #include "iwl-csr.h" -#include "iwl-shared.h" #include "iwl-trans.h" #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-op-mode.h" -struct iwl_tx_queue; -struct iwl_queue; struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -136,21 +134,14 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) return --index & (n_bd - 1); } -/* - * This queue number is required for proper operation - * because the ucode will stop/start the scheduler as - * required. - */ -#define IWL_IPAN_MCAST_QUEUE 8 - struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - u32 flags; - DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); + + u32 flags; }; /* @@ -188,72 +179,66 @@ struct iwl_queue { * space less than this */ }; +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct iwl_pcie_tx_queue_entry { + struct iwl_device_cmd *cmd; + struct sk_buff *skb; + struct iwl_cmd_meta meta; +}; + /** * struct iwl_tx_queue - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor - * @bd: base of circular buffer of TFDs - * @cmd: array of command/TX buffer pointers - * @meta: array of meta data for each command/tx buffer - * @dma_addr_cmd: physical address of cmd/tx buffer array - * @txb: array of per-TFD driver data - * lock: queue lock - * @time_stamp: time (in jiffies) of last read_ptr change + * @tfds: transmit frame descriptors (DMA memory) + * @entries: transmit entries (driver state) + * @lock: queue lock + * @stuck_timer: timer that fires if queue gets stuck + * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index - * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled - * @sta_id: valid if sched_retry is set - * @tid: valid if sched_retry is set + * @active: stores if queue is active * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. */ -#define TFD_TX_CMD_SLOTS 256 -#define TFD_CMD_SLOTS 32 - struct iwl_tx_queue { struct iwl_queue q; struct iwl_tfd *tfds; - struct iwl_device_cmd **cmd; - struct iwl_cmd_meta *meta; - struct sk_buff **skbs; + struct iwl_pcie_tx_queue_entry *entries; spinlock_t lock; - unsigned long time_stamp; + struct timer_list stuck_timer; + struct iwl_trans_pcie *trans_pcie; u8 need_update; - u8 sched_retry; u8 active; - u8 swq_id; - - u16 sta_id; - u16 tid; }; /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_replenish: work that will be called when buffers need to be allocated + * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @irq - the irq number for the device * @irq_requested: true when the irq has been requested * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler * @kw: keep warm address - * @ac_to_fifo: to what fifo is a specifc AC mapped ? - * @ac_to_queue: to what tx queue is a specifc AC mapped ? - * @mcast_queue: - * @txq: Tx DMA processing queues - * @txq_ctx_active_msk: what queue is active - * queue_stopped: tracks what queue is stopped - * queue_stop_count: tracks what SW queue is stopped * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @status - transport specific status flags * @cmd_queue - command queue number + * @rx_buf_size_8k: 8 kB RX buffer size + * @rx_page_order: page order for receive buffer size + * @wd_timeout: queue watchdog timeout (jiffies) */ struct iwl_trans_pcie { struct iwl_rx_queue rxq; struct work_struct rx_replenish; struct iwl_trans *trans; + struct iwl_drv *drv; /* INT ICT Table */ __le32 *ict_tbl; @@ -272,16 +257,9 @@ struct iwl_trans_pcie { struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; - const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; - const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; - u8 mcast_queue[NUM_IWL_RXON_CTX]; - u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; - struct iwl_tx_queue *txq; - unsigned long txq_ctx_active_msk; -#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - atomic_t queue_stop_count[4]; /* PCI bus related data */ struct pci_dev *pci_dev; @@ -293,11 +271,41 @@ struct iwl_trans_pcie { u8 cmd_queue; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; + u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES]; + u8 n_q_to_fifo; + + bool rx_buf_size_8k; + u32 rx_page_order; + + const char **command_names; + + /* queue watchdog */ + unsigned long wd_timeout; }; +/***************************************************** +* DRIVER STATUS FUNCTIONS +******************************************************/ +#define STATUS_HCMD_ACTIVE 0 +#define STATUS_DEVICE_ENABLED 1 +#define STATUS_TPOWER_PMI 2 +#define STATUS_INT_ENABLED 3 + #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) +static inline struct iwl_trans * +iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) +{ + return container_of((void *)trans_pcie, struct iwl_trans, + trans_specific); +} + +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg *cfg); +void iwl_trans_pcie_free(struct iwl_trans *trans); + /***************************************************** * RX ******************************************************/ @@ -331,15 +339,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt); -int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, - int sta_id, int tid); +void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue); void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry); -int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); -void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active); +void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, int index, enum dma_data_direction dma_dir); @@ -350,8 +355,6 @@ int iwl_queue_space(const struct iwl_queue *q); /***************************************************** * Error handling ******************************************************/ -int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, - char **buf, bool display); int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); void iwl_dump_csr(struct iwl_trans *trans); @@ -388,91 +391,28 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); } -/* - * we have 8 bits used like this: - * - * 7 6 5 4 3 2 1 0 - * | | | | | | | | - * | | | | | | +-+-------- AC queue (0-3) - * | | | | | | - * | +-+-+-+-+------------ HW queue ID - * | - * +---------------------- unused - */ -static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) -{ - BUG_ON(ac > 3); /* only have 2 bits */ - BUG_ON(hwq > 31); /* only use 5 bits */ - - txq->swq_id = (hwq << 2) | ac; -} - -static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq) -{ - return txq->swq_id & 0x3; -} - static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_tx_queue *txq) { - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) { - if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) { - iwl_op_mode_queue_not_full(trans->op_mode, ac); - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d", - hwq, ac); - } else { - IWL_DEBUG_TX_QUEUES(trans, - "Don't wake hwq %d ac %d stop count %d", - hwq, ac, - atomic_read(&trans_pcie->queue_stop_count[ac])); - } + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); } } static inline void iwl_stop_queue(struct iwl_trans *trans, struct iwl_tx_queue *txq) { - u8 queue = txq->swq_id; - u8 ac = queue & 3; - u8 hwq = (queue >> 2) & 0x1f; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) { - if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) { - iwl_op_mode_queue_full(trans->op_mode, ac); - IWL_DEBUG_TX_QUEUES(trans, - "Stop hwq %d ac %d stop count %d", - hwq, ac, - atomic_read(&trans_pcie->queue_stop_count[ac])); - } else { - IWL_DEBUG_TX_QUEUES(trans, - "Don't stop hwq %d ac %d stop count %d", - hwq, ac, - atomic_read(&trans_pcie->queue_stop_count[ac])); - } - } else { - IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped", - hwq); - } -} - -static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie, - int txq_id) -{ - set_bit(txq_id, &trans_pcie->txq_ctx_active_msk); -} + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, - int txq_id) -{ - clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); + if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->q.id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); + } else + IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", + txq->q.id); } static inline int iwl_queue_used(const struct iwl_queue *q, int i) @@ -487,19 +427,18 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) return index & (q->n_window - 1); } -#define IWL_TX_FIFO_BK 0 /* shared */ -#define IWL_TX_FIFO_BE 1 -#define IWL_TX_FIFO_VI 2 /* shared */ -#define IWL_TX_FIFO_VO 3 -#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK -#define IWL_TX_FIFO_BE_IPAN 4 -#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI -#define IWL_TX_FIFO_VO_IPAN 5 -/* re-uses the VO FIFO, uCode will properly flush/schedule */ -#define IWL_TX_FIFO_AUX 5 -#define IWL_TX_FIFO_UNUSED -1 - -/* AUX (TX during scan dwell) queue */ -#define IWL_AUX_QUEUE 10 +static inline const char * +trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) +{ + if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) + return "UNKNOWN"; + return trans_pcie->command_names[cmd]; +} + +static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) +{ + return !(iwl_read32(trans, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); +} #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c index aa7aea16813..08517d3c80b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c @@ -140,14 +140,17 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, if (q->need_update == 0) goto exit_unlock; - if (cfg(trans)->base_params->shadow_reg_enable) { + if (trans->cfg->base_params->shadow_reg_enable) { /* shadow register enabled */ /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); } else { + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { + if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { @@ -271,17 +274,17 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; - if (hw_params(trans).rx_page_order > 0) + if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, - hw_params(trans).rx_page_order); + trans_pcie->rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, " "order: %d\n", - hw_params(trans).rx_page_order); + trans_pcie->rx_page_order); if ((rxq->free_count <= RX_LOW_WATERMARK) && net_ratelimit()) @@ -300,7 +303,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, hw_params(trans).rx_page_order); + __free_pages(page, trans_pcie->rx_page_order); return; } element = rxq->rx_used.next; @@ -313,7 +316,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << hw_params(trans).rx_page_order, + PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); @@ -362,84 +365,98 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_device_cmd *cmd; unsigned long flags; - int len, err; - u16 sequence; - struct iwl_rx_cmd_buffer rxcb; - struct iwl_rx_packet *pkt; - bool reclaim; - int index, cmd_index; + bool page_stolen = false; + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + u32 offset = 0; if (WARN_ON(!rxb)) return; - rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; - dma_unmap_page(trans->dev, rxb->page_dma, - rxcb.truesize, - DMA_FROM_DEVICE); - - rxcb._page = rxb->page; - pkt = rxb_addr(&rxcb); + dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); - IWL_DEBUG_RX(trans, "%s, 0x%02x\n", - get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { + struct iwl_rx_packet *pkt; + struct iwl_device_cmd *cmd; + u16 sequence; + bool reclaim; + int index, cmd_index, err, len; + struct iwl_rx_cmd_buffer rxcb = { + ._offset = offset, + ._page = rxb->page, + ._page_stolen = false, + .truesize = max_len, + }; + pkt = rxb_addr(&rxcb); - len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - trace_iwlwifi_dev_rx(trans->dev, pkt, len); - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); - if (reclaim) { - int i; + if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) + break; - for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { - if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { - reclaim = false; - break; + IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", + rxcb._offset, + trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), + pkt->hdr.cmd); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_dev_rx(trans->dev, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); + if (reclaim) { + int i; + + for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { + if (trans_pcie->no_reclaim_cmds[i] == + pkt->hdr.cmd) { + reclaim = false; + break; + } } } - } - sequence = le16_to_cpu(pkt->hdr.sequence); - index = SEQ_TO_INDEX(sequence); - cmd_index = get_cmd_index(&txq->q, index); + sequence = le16_to_cpu(pkt->hdr.sequence); + index = SEQ_TO_INDEX(sequence); + cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) - cmd = txq->cmd[cmd_index]; - else - cmd = NULL; + if (reclaim) + cmd = txq->entries[cmd_index].cmd; + else + cmd = NULL; - err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); + err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); - /* - * XXX: After here, we should always check rxcb._page - * against NULL before touching it or its virtual - * memory (pkt). Because some rx_handler might have - * already taken or freed the pages. - */ + /* + * After here, we should always check rxcb._page_stolen, + * if it is true then one of the handlers took the page. + */ - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking - * iwl_trans_send_cmd() - * as we reclaim the driver command queue */ - if (rxcb._page) - iwl_tx_cmd_complete(trans, &rxcb, err); - else - IWL_WARN(trans, "Claim null rxb?\n"); + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking + * iwl_trans_send_cmd() + * as we reclaim the driver command queue */ + if (!rxcb._page_stolen) + iwl_tx_cmd_complete(trans, &rxcb, err); + else + IWL_WARN(trans, "Claim null rxb?\n"); + } + + page_stolen |= rxcb._page_stolen; + offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } - /* page was stolen from us */ - if (rxcb._page == NULL) + /* page was stolen from us -- free our reference */ + if (page_stolen) { + __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; + } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the @@ -448,7 +465,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, - PAGE_SIZE << hw_params(trans).rx_page_order, + PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; @@ -521,412 +538,32 @@ static void iwl_rx_handle(struct iwl_trans *trans) iwlagn_rx_queue_restock(trans); } -static const char * const desc_lookup_text[] = { - "OK", - "FAIL", - "BAD_PARAM", - "BAD_CHECKSUM", - "NMI_INTERRUPT_WDG", - "SYSASSERT", - "FATAL_ERROR", - "BAD_COMMAND", - "HW_ERROR_TUNE_LOCK", - "HW_ERROR_TEMPERATURE", - "ILLEGAL_CHAN_FREQ", - "VCC_NOT_STABLE", - "FH_ERROR", - "NMI_INTERRUPT_HOST", - "NMI_INTERRUPT_ACTION_PT", - "NMI_INTERRUPT_UNKNOWN", - "UCODE_VERSION_MISMATCH", - "HW_ERROR_ABS_LOCK", - "HW_ERROR_CAL_LOCK_FAIL", - "NMI_INTERRUPT_INST_ACTION_PT", - "NMI_INTERRUPT_DATA_ACTION_PT", - "NMI_TRM_HW_ER", - "NMI_INTERRUPT_TRM", - "NMI_INTERRUPT_BREAK_POINT", - "DEBUG_0", - "DEBUG_1", - "DEBUG_2", - "DEBUG_3", -}; - -static struct { char *name; u8 num; } advanced_lookup[] = { - { "NMI_INTERRUPT_WDG", 0x34 }, - { "SYSASSERT", 0x35 }, - { "UCODE_VERSION_MISMATCH", 0x37 }, - { "BAD_COMMAND", 0x38 }, - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, - { "FATAL_ERROR", 0x3D }, - { "NMI_TRM_HW_ERR", 0x46 }, - { "NMI_INTERRUPT_TRM", 0x4C }, - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, - { "NMI_INTERRUPT_HOST", 0x66 }, - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, - { "ADVANCED_SYSASSERT", 0 }, -}; - -static const char *desc_lookup(u32 num) -{ - int i; - int max = ARRAY_SIZE(desc_lookup_text); - - if (num < max) - return desc_lookup_text[num]; - - max = ARRAY_SIZE(advanced_lookup) - 1; - for (i = 0; i < max; i++) { - if (advanced_lookup[i].num == num) - break; - } - return advanced_lookup[i].name; -} - -#define ERROR_START_OFFSET (1 * sizeof(u32)) -#define ERROR_ELEM_SIZE (7 * sizeof(u32)) - -static void iwl_dump_nic_error_log(struct iwl_trans *trans) -{ - u32 base; - struct iwl_error_event_table table; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - base = trans->shrd->device_pointers.error_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { - if (!base) - base = trans->shrd->fw->init_errlog_ptr; - } else { - if (!base) - base = trans->shrd->fw->inst_errlog_ptr; - } - - if (!iwlagn_hw_valid_rtc_data_addr(base)) { - IWL_ERR(trans, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (trans->shrd->ucode_type == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - trans->shrd->status, table.valid); - } - - trans_pcie->isr_stats.err_code = table.error_id; - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.line, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, - table.hw_ver, table.brd_ver); - IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(trans, "0x%08X | uPc\n", table.pc); - IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(trans, "0x%08X | data1\n", table.data1); - IWL_ERR(trans, "0x%08X | data2\n", table.data2); - IWL_ERR(trans, "0x%08X | line\n", table.line); - IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver); - IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd); - - IWL_ERR(trans, "0x%08X | isr0\n", table.isr0); - IWL_ERR(trans, "0x%08X | isr1\n", table.isr1); - IWL_ERR(trans, "0x%08X | isr2\n", table.isr2); - IWL_ERR(trans, "0x%08X | isr3\n", table.isr3); - IWL_ERR(trans, "0x%08X | isr4\n", table.isr4); - IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler); -} - /** * iwl_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_irq_handle_error(struct iwl_trans *trans) { /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ - if (cfg(trans)->internal_wimax_coex && + if (trans->cfg->internal_wimax_coex && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { - /* - * Keep the restart process from trying to send host - * commands by clearing the ready bit. - */ - clear_bit(STATUS_READY, &trans->shrd->status); - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + struct iwl_trans_pcie *trans_pcie; + + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + iwl_op_mode_wimax_active(trans->op_mode); wake_up(&trans->wait_command_queue); - IWL_ERR(trans, "RF is used by WiMAX\n"); return; } - IWL_ERR(trans, "Loaded firmware version: %s\n", - trans->shrd->fw->fw_version); - - iwl_dump_nic_error_log(trans); iwl_dump_csr(trans); iwl_dump_fh(trans, NULL, false); - iwl_dump_nic_event_log(trans, false, NULL, false); iwl_op_mode_nic_error(trans->op_mode); } -#define EVENT_START_OFFSET (4 * sizeof(u32)) - -/** - * iwl_print_event_log - Dump error event log to syslog - * - */ -static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx, - u32 num_events, u32 mode, - int pos, char **buf, size_t bufsz) -{ - u32 i; - u32 base; /* SRAM byte address of event log header */ - u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ - u32 ptr; /* SRAM byte address of log data */ - u32 ev, time, data; /* event log data */ - unsigned long reg_flags; - - if (num_events == 0) - return pos; - - base = trans->shrd->device_pointers.log_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { - if (!base) - base = trans->shrd->fw->init_evtlog_ptr; - } else { - if (!base) - base = trans->shrd->fw->inst_evtlog_ptr; - } - - if (mode == 0) - event_size = 2 * sizeof(u32); - else - event_size = 3 * sizeof(u32); - - ptr = base + EVENT_START_OFFSET + (start_idx * event_size); - - /* Make sure device is powered up for SRAM reads */ - spin_lock_irqsave(&trans->reg_lock, reg_flags); - if (unlikely(!iwl_grab_nic_access(trans))) - goto out_unlock; - - /* Set starting address; reads will auto-increment */ - iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr); - - /* "time" is actually "data" for mode 0 (no timestamp). - * place event id # at far right for easier visual parsing. */ - for (i = 0; i < num_events; i++) { - ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - time = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - if (mode == 0) { - /* data, ev */ - if (bufsz) { - pos += scnprintf(*buf + pos, bufsz - pos, - "EVT_LOG:0x%08x:%04u\n", - time, ev); - } else { - trace_iwlwifi_dev_ucode_event(trans->dev, 0, - time, ev); - IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n", - time, ev); - } - } else { - data = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - if (bufsz) { - pos += scnprintf(*buf + pos, bufsz - pos, - "EVT_LOGT:%010u:0x%08x:%04u\n", - time, data, ev); - } else { - IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n", - time, data, ev); - trace_iwlwifi_dev_ucode_event(trans->dev, time, - data, ev); - } - } - } - - /* Allow device to power down */ - iwl_release_nic_access(trans); -out_unlock: - spin_unlock_irqrestore(&trans->reg_lock, reg_flags); - return pos; -} - -/** - * iwl_print_last_event_logs - Dump the newest # of event log to syslog - */ -static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity, - u32 num_wraps, u32 next_entry, - u32 size, u32 mode, - int pos, char **buf, size_t bufsz) -{ - /* - * display the newest DEFAULT_LOG_ENTRIES entries - * i.e the entries just before the next ont that uCode would fill. - */ - if (num_wraps) { - if (next_entry < size) { - pos = iwl_print_event_log(trans, - capacity - (size - next_entry), - size - next_entry, mode, - pos, buf, bufsz); - pos = iwl_print_event_log(trans, 0, - next_entry, mode, - pos, buf, bufsz); - } else - pos = iwl_print_event_log(trans, next_entry - size, - size, mode, pos, buf, bufsz); - } else { - if (next_entry < size) { - pos = iwl_print_event_log(trans, 0, next_entry, - mode, pos, buf, bufsz); - } else { - pos = iwl_print_event_log(trans, next_entry - size, - size, mode, pos, buf, bufsz); - } - } - return pos; -} - -#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) - -int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, - char **buf, bool display) -{ - u32 base; /* SRAM byte address of event log header */ - u32 capacity; /* event log capacity in # entries */ - u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ - u32 num_wraps; /* # times uCode wrapped to top of log */ - u32 next_entry; /* index of next entry to be written by uCode */ - u32 size; /* # entries that we'll print */ - u32 logsize; - int pos = 0; - size_t bufsz = 0; - - base = trans->shrd->device_pointers.log_event_table; - if (trans->shrd->ucode_type == IWL_UCODE_INIT) { - logsize = trans->shrd->fw->init_evtlog_size; - if (!base) - base = trans->shrd->fw->init_evtlog_ptr; - } else { - logsize = trans->shrd->fw->inst_evtlog_size; - if (!base) - base = trans->shrd->fw->inst_evtlog_ptr; - } - - if (!iwlagn_hw_valid_rtc_data_addr(base)) { - IWL_ERR(trans, - "Invalid event log pointer 0x%08X for %s uCode\n", - base, - (trans->shrd->ucode_type == IWL_UCODE_INIT) - ? "Init" : "RT"); - return -EINVAL; - } - - /* event log header */ - capacity = iwl_read_targ_mem(trans, base); - mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32))); - num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32))); - next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32))); - - if (capacity > logsize) { - IWL_ERR(trans, "Log capacity %d is bogus, limit to %d " - "entries\n", capacity, logsize); - capacity = logsize; - } - - if (next_entry > logsize) { - IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n", - next_entry, logsize); - next_entry = logsize; - } - - size = num_wraps ? capacity : next_entry; - - /* bail out if nothing in log */ - if (size == 0) { - IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n"); - return pos; - } - -#ifdef CONFIG_IWLWIFI_DEBUG - if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) - size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) - ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; -#else - size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) - ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; -#endif - IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n", - size); - -#ifdef CONFIG_IWLWIFI_DEBUG - if (display) { - if (full_log) - bufsz = capacity * 48; - else - bufsz = size * 48; - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - } - if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) { - /* - * if uCode has wrapped back to top of log, - * start at the oldest entry, - * i.e the next one that uCode would fill. - */ - if (num_wraps) - pos = iwl_print_event_log(trans, next_entry, - capacity - next_entry, mode, - pos, buf, bufsz); - /* (then/else) start at top of log */ - pos = iwl_print_event_log(trans, 0, - next_entry, mode, pos, buf, bufsz); - } else - pos = iwl_print_last_event_logs(trans, capacity, num_wraps, - next_entry, size, mode, - pos, buf, bufsz); -#else - pos = iwl_print_last_event_logs(trans, capacity, num_wraps, - next_entry, size, mode, - pos, buf, bufsz); -#endif - return pos; -} - /* tasklet for iwlagn interrupt */ void iwl_irq_tasklet(struct iwl_trans *trans) { @@ -964,7 +601,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) if (iwl_have_debug_level(IWL_DL_ISR)) { /* just for debug */ inta_mask = iwl_read32(trans, CSR_INT_MASK); - IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ", + IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta, inta_mask); } #endif @@ -1012,8 +649,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) if (inta & CSR_INT_BIT_RF_KILL) { bool hw_rfkill; - hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + hw_rfkill = iwl_is_rfkill_set(trans); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); @@ -1044,7 +680,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); - for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) iwl_txq_update_write_ptr(trans, &trans_pcie->txq[i]); diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c index e92972fd6ec..21a8a672fbb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c @@ -37,47 +37,12 @@ #include "iwl-agn-hw.h" #include "iwl-op-mode.h" #include "iwl-trans-pcie-int.h" +/* FIXME: need to abstract out TX command (once we know what it looks like) */ +#include "iwl-commands.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 -/* - * mac80211 queues, ACs, hardware queues, FIFOs. - * - * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues - * - * Mac80211 uses the following numbers, which we get as from it - * by way of skb_get_queue_mapping(skb): - * - * VO 0 - * VI 1 - * BE 2 - * BK 3 - * - * - * Regular (not A-MPDU) frames are put into hardware queues corresponding - * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their - * own queue per aggregation session (RA/TID combination), such queues are - * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In - * order to map frames to the right queue, we also need an AC->hw queue - * mapping. This is implemented here. - * - * Due to the way hw queues are set up (by the hw specific code), the AC->hw - * queue mapping is the identity mapping. - */ - -static const u8 tid_to_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO -}; - - /** * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ @@ -95,7 +60,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload; + (void *) txq->entries[txq->q.write_ptr].cmd->payload; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -136,13 +101,15 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) if (txq->need_update == 0) return; - if (cfg(trans)->base_params->shadow_reg_enable) { + if (trans->cfg->base_params->shadow_reg_enable) { /* shadow register enabled */ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); } else { + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); /* if we're trying to save power */ - if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { + if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { /* wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ @@ -256,13 +223,14 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, lockdep_assert_held(&txq->lock); - iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); + iwlagn_unmap_tfd(trans, &txq->entries[index].meta, + &tfd_tmp[index], dma_dir); /* free SKB */ - if (txq->skbs) { + if (txq->entries) { struct sk_buff *skb; - skb = txq->skbs[index]; + skb = txq->entries[index].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being @@ -270,7 +238,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); - txq->skbs[index] = NULL; + txq->entries[index].skb = NULL; } } } @@ -393,7 +361,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, u8 sta_id = 0; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload; + (void *)txq->entries[txq->q.read_ptr].cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -448,20 +416,17 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); + IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); } void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry) + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id = txq->q.id; - int active = - test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0; iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | @@ -469,77 +434,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); - txq->sched_retry = scd_retry; - if (active) - IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", - scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n", + txq_id, tx_fifo_id); else - IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", - scd_retry ? "BA" : "AC/CMD", txq_id); -} - -static inline int get_ac_from_tid(u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return tid_to_ac[tid]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie, - u8 ctx, u16 tid) -{ - const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx]; - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return ac_to_fifo[tid_to_ac[tid]]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } -static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) +void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) { - if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) - return false; - return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues); -} - -void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid, int frame_limit, u16 ssn) -{ - int tx_fifo, txq_id; - u16 ra_tid; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; + u16 ra_tid = BUILD_RAxTID(sta_id, tid); - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - if (WARN_ON(sta_id == IWL_INVALID_STATION)) - return; - if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) - return; - - tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid); - if (WARN_ON(tx_fifo < 0)) { - IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo); - return; - } - - txq_id = trans_pcie->agg_txq[sta_id][tid]; - if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return; - } - - ra_tid = BUILD_RAxTID(sta_id, tid); + if (test_and_set_bit(txq_id, trans_pcie->queue_used)) + WARN_ONCE(1, "queue %d already used - expect issues", txq_id); spin_lock_irqsave(&trans_pcie->irq_lock, flags); @@ -550,10 +460,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* Set this queue as a chain-building queue */ - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); + iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); /* enable aggregations for the queue */ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); + iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ @@ -563,92 +473,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id) + - sizeof(u32), - ((frame_limit << - SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | - ((frame_limit << - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], - tx_fifo, 1); - - trans_pcie->txq[txq_id].sta_id = sta_id; - trans_pcie->txq[txq_id].tid = tid; + fifo, true); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } -/* - * Find first available (lowest unused) Tx Queue, mark it "active". - * Called only when finding queue for aggregation. - * Should never return anything < 7, because they should already - * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) - */ -static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; - txq_id++) - if (!test_and_set_bit(txq_id, - &trans_pcie->txq_ctx_active_msk)) - return txq_id; - return -1; -} - -int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, - int sta_id, int tid) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - txq_id = iwlagn_txq_ctx_activate_free(trans); - if (txq_id == -1) { - IWL_ERR(trans, "No free aggregation queue available\n"); - return -ENXIO; - } - - trans_pcie->agg_txq[sta_id][tid] = txq_id; - iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); - - return 0; -} - -int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) +void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u8 txq_id = trans_pcie->agg_txq[sta_id][tid]; - if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return -EINVAL; + if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { + WARN_ONCE(1, "queue %d not used", txq_id); + return; } iwlagn_tx_queue_stop_scheduler(trans, txq_id); - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - trans_pcie->agg_txq[sta_id][tid] = 0; trans_pcie->txq[txq_id].q.read_ptr = 0; trans_pcie->txq[txq_id].q.write_ptr = 0; - /* supposes that ssn_idx is valid (!= 0xFFF) */ iwl_trans_set_wr_ptrs(trans, txq_id, 0); - iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(trans_pcie, txq_id); - iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); - return 0; + iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id)); + + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + 0, false); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -681,11 +541,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int trace_idx; #endif - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_WARN(trans, "fw recovery, no hcmd send\n"); - return -EIO; - } - copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); @@ -726,8 +581,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } idx = get_cmd_index(q, q->write_ptr); - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) @@ -753,12 +608,11 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) cmd_dest += cmd->len[i]; } - IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), cmd_size, - q->write_ptr, idx, trans_pcie->cmd_queue); + IWL_DEBUG_HC(trans, + "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, + q->write_ptr, idx, trans_pcie->cmd_queue); phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); @@ -816,6 +670,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) trace_bufs[2], trace_lens[2]); #endif + /* start timer if queue currently empty */ + if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); @@ -825,6 +683,22 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) return idx; } +static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, + struct iwl_tx_queue *txq) +{ + if (!trans_pcie->wd_timeout) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->q.read_ptr == txq->q.write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); +} + /** * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd * @@ -859,6 +733,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, } } + + iwl_queue_progress(trans_pcie, txq); } /** @@ -899,10 +775,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, spin_lock(&txq->lock); cmd_index = get_cmd_index(&txq->q, index); - cmd = txq->cmd[cmd_index]; - meta = &txq->meta[cmd_index]; - - txq->time_stamp = jiffies; + cmd = txq->entries[cmd_index].cmd; + meta = &txq->entries[cmd_index].meta; iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); @@ -913,21 +787,23 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); - meta->source->_rx_page_order = hw_params(trans).rx_page_order; + meta->source->_rx_page_order = trans_pcie->rx_page_order; meta->source->handler_status = handler_status; } iwl_hcmd_queue_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { - if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { + if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", - get_cmd_string(cmd->hdr.cmd)); + trans_pcie_get_cmd_string(trans_pcie, + cmd->hdr.cmd)); } - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->hdr.cmd)); + trans_pcie_get_cmd_string(trans_pcie, + cmd->hdr.cmd)); wake_up(&trans->wait_command_queue); } @@ -940,6 +816,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* An asynchronous command can not expect an SKB to be set. */ @@ -951,7 +828,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); + trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); return ret; } return 0; @@ -964,55 +841,51 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", - get_cmd_string(cmd->id)); - - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s failed: FW Error\n", - get_cmd_string(cmd->id)); - return -EIO; - } + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, - &trans->shrd->status))) { + &trans_pcie->status))) { IWL_ERR(trans, "Command %s: a command is already active!\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); return -EIO; } IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); cmd_idx = iwl_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); + trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); return ret; } ret = wait_event_timeout(trans->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), + !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { + if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - get_cmd_string(cmd->id), + trans_pcie_get_cmd_string(trans_pcie, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" - "%s\n", get_cmd_string(cmd->id)); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + IWL_DEBUG_INFO(trans, + "Clearing HCMD_ACTIVE for command %s\n", + trans_pcie_get_cmd_string(trans_pcie, + cmd->id)); ret = -ETIMEDOUT; goto cancel; } @@ -1020,7 +893,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); ret = -EIO; goto cancel; } @@ -1035,8 +908,8 @@ cancel: * in later, it will possibly set an invalid * address (cmd->meta.source). */ - trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= - ~CMD_WANT_SKB; + trans_pcie->txq[trans_pcie->cmd_queue]. + entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { @@ -1091,17 +964,20 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, q->read_ptr != index; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { - if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) + if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) continue; - __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); + __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); - txq->skbs[txq->q.read_ptr] = NULL; + txq->entries[txq->q.read_ptr].skb = NULL; iwlagn_txq_inval_byte_cnt_tbl(trans, txq); iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); freed++; } + + iwl_queue_progress(trans_pcie, txq); + return freed; } diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 4d7b30d3e64..2e57161854b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -68,18 +68,20 @@ #include <linux/bitops.h> #include <linux/gfp.h> +#include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-trans-pcie-int.h" #include "iwl-csr.h" #include "iwl-prph.h" -#include "iwl-shared.h" #include "iwl-eeprom.h" #include "iwl-agn-hw.h" +/* FIXME: need to abstract out TX command (once we know what it looks like) */ +#include "iwl-commands.h" #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ - (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\ + (((1<<trans->cfg->base_params->num_of_queues) - 1) &\ (~(1<<(trans_pcie)->cmd_queue))) static int iwl_trans_rx_alloc(struct iwl_trans *trans) @@ -132,10 +134,10 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { dma_unmap_page(trans->dev, rxq->pool[i].page_dma, - PAGE_SIZE << hw_params(trans).rx_page_order, + PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); __free_pages(rxq->pool[i].page, - hw_params(trans).rx_page_order); + trans_pcie->rx_page_order); rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); @@ -145,11 +147,12 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) static void iwl_trans_rx_hw_init(struct iwl_trans *trans, struct iwl_rx_queue *rxq) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ - if (iwlagn_mod_params.amsdu_size_8K) + if (trans_pcie->rx_buf_size_8k) rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; else rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; @@ -180,7 +183,6 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | rb_size| (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); @@ -299,6 +301,33 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, memset(ptr, 0, sizeof(*ptr)); } +static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) +{ + struct iwl_tx_queue *txq = (void *)data; + struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; + struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); + + spin_lock(&txq->lock); + /* check if triggered erroneously */ + if (txq->q.read_ptr == txq->q.write_ptr) { + spin_unlock(&txq->lock); + return; + } + spin_unlock(&txq->lock); + + + IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, + jiffies_to_msecs(trans_pcie->wd_timeout)); + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + txq->q.read_ptr, txq->q.write_ptr); + IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) + & (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); + + iwl_op_mode_nic_error(trans->op_mode); +} + static int iwl_trans_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue *txq, int slots_num, u32 txq_id) @@ -307,40 +336,31 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, int i; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) + if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; + setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer, + (unsigned long)txq); + txq->trans_pcie = trans_pcie; + txq->q.n_window = slots_num; - txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL); - txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL); + txq->entries = kcalloc(slots_num, + sizeof(struct iwl_pcie_tx_queue_entry), + GFP_KERNEL); - if (!txq->meta || !txq->cmd) + if (!txq->entries) goto error; if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) { - txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->cmd[i]) + txq->entries[i].cmd = + kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->entries[i].cmd) goto error; } - /* Alloc driver data array and TFD circular buffer */ - /* Driver private data, only for Tx (not command) queues, - * not shared with device. */ - if (txq_id != trans_pcie->cmd_queue) { - txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]), - GFP_KERNEL); - if (!txq->skbs) { - IWL_ERR(trans, "kmalloc for auxiliary BD " - "structures failed\n"); - goto error; - } - } else { - txq->skbs = NULL; - } - /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, @@ -353,37 +373,22 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, return 0; error: - kfree(txq->skbs); - txq->skbs = NULL; - /* since txq->cmd has been zeroed, - * all non allocated cmd[i] will be NULL */ - if (txq->cmd && txq_id == trans_pcie->cmd_queue) + if (txq->entries && txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) - kfree(txq->cmd[i]); - kfree(txq->meta); - kfree(txq->cmd); - txq->meta = NULL; - txq->cmd = NULL; + kfree(txq->entries[i].cmd); + kfree(txq->entries); + txq->entries = NULL; return -ENOMEM; } static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id) + int slots_num, u32 txq_id) { int ret; txq->need_update = 0; - memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); - - /* - * For the default queues 0-3, set up the swq_id - * already -- all others need to get one later - * (if they need one at all). - */ - if (txq_id < 4) - iwl_set_swq_id(txq, txq_id, txq_id); /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ @@ -461,7 +466,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) - kfree(txq->cmd[i]); + kfree(txq->entries[i].cmd); /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) { @@ -470,15 +475,10 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); } - /* De-alloc array of per-TFD driver data */ - kfree(txq->skbs); - txq->skbs = NULL; + kfree(txq->entries); + txq->entries = NULL; - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; + del_timer_sync(&txq->stuck_timer); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); @@ -497,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) /* Tx queues */ if (trans_pcie->txq) { for (txq_id = 0; - txq_id < cfg(trans)->base_params->num_of_queues; txq_id++) + txq_id < trans->cfg->base_params->num_of_queues; txq_id++) iwl_tx_queue_free(trans, txq_id); } @@ -522,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues * + u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. @@ -546,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) goto error; } - trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues, + trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, sizeof(struct iwl_tx_queue), GFP_KERNEL); if (!trans_pcie->txq) { IWL_ERR(trans, "Not enough memory for txq\n"); @@ -555,7 +555,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { slots_num = (txq_id == trans_pcie->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; @@ -601,7 +601,7 @@ static int iwl_tx_init(struct iwl_trans *trans) spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { slots_num = (txq_id == trans_pcie->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; @@ -724,9 +724,9 @@ static int iwl_apm_init(struct iwl_trans *trans) iwl_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ - if (cfg(trans)->base_params->pll_cfg_val) + if (trans->cfg->base_params->pll_cfg_val) iwl_set_bit(trans, CSR_ANA_PLL_CFG, - cfg(trans)->base_params->pll_cfg_val); + trans->cfg->base_params->pll_cfg_val); /* * Set "initialization complete" bit to move adapter from @@ -836,7 +836,7 @@ static int iwl_nic_init(struct iwl_trans *trans) if (iwl_tx_init(trans)) return -ENOMEM; - if (cfg(trans)->base_params->shadow_reg_enable) { + if (trans->cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); @@ -895,59 +895,6 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans) return ret; } -#define IWL_AC_UNSET -1 - -struct queue_to_fifo_ac { - s8 fifo, ac; -}; - -static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { - { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, - { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, -}; - -static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { - { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, - { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, - { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_BE_IPAN, 2, }, - { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, - { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, -}; - -static const u8 iwlagn_bss_ac_to_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, -}; -static const u8 iwlagn_bss_ac_to_queue[] = { - 0, 1, 2, 3, -}; -static const u8 iwlagn_pan_ac_to_fifo[] = { - IWL_TX_FIFO_VO_IPAN, - IWL_TX_FIFO_VI_IPAN, - IWL_TX_FIFO_BE_IPAN, - IWL_TX_FIFO_BK_IPAN, -}; -static const u8 iwlagn_pan_ac_to_queue[] = { - 7, 6, 5, 4, -}; - /* * ucode */ @@ -1028,34 +975,21 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw) { int ret; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; - trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; - trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; - - trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo; - trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo; - - trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; - trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; - /* This may fail if AMT took ownership of the device */ if (iwl_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); return -EIO; } + iwl_enable_rfkill_int(trans); + /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); - - if (hw_rfkill) { - iwl_enable_rfkill_int(trans); + if (hw_rfkill) return -ERFKILL; - } iwl_write32(trans, CSR_INT, 0xFFFFFFFF); @@ -1098,9 +1032,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) static void iwl_tx_start(struct iwl_trans *trans) { - const struct queue_to_fifo_ac *queue_to_fifo; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 a; unsigned long flags; int i, chan; @@ -1121,7 +1053,7 @@ static void iwl_tx_start(struct iwl_trans *trans) iwl_write_targ_mem(trans, a, 0); for (; a < trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE( - cfg(trans)->base_params->num_of_queues); + trans->cfg->base_params->num_of_queues); a += 4) iwl_write_targ_mem(trans, a, 0); @@ -1144,7 +1076,7 @@ static void iwl_tx_start(struct iwl_trans *trans) iwl_write_prph(trans, SCD_AGGR_SEL, 0); /* initiate the queues */ - for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) { + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + @@ -1161,46 +1093,24 @@ static void iwl_tx_start(struct iwl_trans *trans) } iwl_write_prph(trans, SCD_INTERRUPT_MASK, - IWL_MASK(0, cfg(trans)->base_params->num_of_queues)); + IWL_MASK(0, trans->cfg->base_params->num_of_queues)); /* Activate all Tx DMA/FIFO channels */ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); - /* map queues to FIFOs */ - if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)) - queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; - else - queue_to_fifo = iwlagn_default_queue_to_tx_fifo; - iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0); - /* make sure all queue are not stopped */ - memset(&trans_pcie->queue_stopped[0], 0, - sizeof(trans_pcie->queue_stopped)); - for (i = 0; i < 4; i++) - atomic_set(&trans_pcie->queue_stop_count[i], 0); - - /* reset to 0 to enable all the queue first */ - trans_pcie->txq_ctx_active_msk = 0; + /* make sure all queue are not stopped/used */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < - IWLAGN_FIRST_AMPDU_QUEUE); - BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) < - IWLAGN_FIRST_AMPDU_QUEUE); + for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { + int fifo = trans_pcie->setup_q_to_fifo[i]; - for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { - int fifo = queue_to_fifo[i].fifo; - int ac = queue_to_fifo[i].ac; + set_bit(i, trans_pcie->queue_used); - iwl_txq_ctx_activate(trans_pcie, i); - - if (fifo == IWL_TX_FIFO_UNUSED) - continue; - - if (ac != IWL_AC_UNSET) - iwl_set_swq_id(&trans_pcie->txq[i], ac, i); iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], - fifo, 0); + fifo, true); } spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); @@ -1251,7 +1161,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) } /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) iwl_tx_queue_unmap(trans, txq_id); @@ -1303,6 +1213,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + iwl_enable_rfkill_int(trans); + /* wait to make sure we flush pending tasklet*/ synchronize_irq(trans_pcie->irq); tasklet_kill(&trans_pcie->irq_tasklet); @@ -1311,6 +1223,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* stop and reset the on-board processor */ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* clear all status bits */ + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); + clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); + clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); } static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) @@ -1325,81 +1243,43 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) } static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) + struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_tx_queue *txq; struct iwl_queue *q; - dma_addr_t phys_addr = 0; dma_addr_t txcmd_phys; dma_addr_t scratch_phys; u16 len, firstlen, secondlen; u8 wait_write_ptr = 0; - u8 txq_id; - bool is_agg = false; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); u16 __maybe_unused wifi_seq; - /* - * Send this frame after DTIM -- there's a special queue - * reserved for this for contexts that support AP mode. - */ - if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { - txq_id = trans_pcie->mcast_queue[ctx]; - - /* - * The microcode will clear the more data - * bit in the last frame it transmits. - */ - hdr->frame_control |= - cpu_to_le16(IEEE80211_FCTL_MOREDATA); - } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) - txq_id = IWL_AUX_QUEUE; - else - txq_id = - trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; - - /* aggregation is on for this <sta,tid> */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - WARN_ON(tid >= IWL_MAX_TID_COUNT); - txq_id = trans_pcie->agg_txq[sta_id][tid]; - is_agg = true; - } - txq = &trans_pcie->txq[txq_id]; q = &txq->q; - spin_lock(&txq->lock); + if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { + WARN_ON_ONCE(1); + return -EINVAL; + } - /* In AGG mode, the index in the ring must correspond to the WiFi - * sequence number. This is a HW requirements to help the SCD to parse - * the BA. - * Check here that the packets are in the right place on the ring. - */ -#ifdef CONFIG_IWLWIFI_DEBUG - wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr), - "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); -#endif + spin_lock(&txq->lock); /* Set up driver data for this TFD */ - txq->skbs[q->write_ptr] = skb; - txq->cmd[q->write_ptr] = dev_cmd; + txq->entries[q->write_ptr].skb = skb; + txq->entries[q->write_ptr].cmd = dev_cmd; dev_cmd->hdr.cmd = REPLY_TX; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->meta[q->write_ptr]; + out_meta = &txq->entries[q->write_ptr].meta; /* * Use the first empty entry in this queue's command buffer array @@ -1481,6 +1361,10 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, firstlen, skb->data + hdr_len, secondlen); + /* start timer if queue currently empty */ + if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); @@ -1541,8 +1425,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_apm_init(trans); - hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + /* From now on, the op_mode will be kept updated about RF kill state */ + iwl_enable_rfkill_int(trans); + + hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); return err; @@ -1555,18 +1441,41 @@ error: return err; } -static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans) +static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, + bool op_mode_leaving) { + bool hw_rfkill; + unsigned long flags; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + iwl_apm_stop(trans); + spin_lock_irqsave(&trans_pcie->irq_lock, flags); + iwl_disable_interrupts(trans); + spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); - /* Even if we stop the HW, we still want the RF kill interrupt */ - iwl_enable_rfkill_int(trans); + if (!op_mode_leaving) { + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); + + /* + * Check again since the RF kill state may have changed while + * all the interrupts were disabled, in this case we couldn't + * receive the RF kill interrupt and update the state in the + * op_mode. + */ + hw_rfkill = iwl_is_rfkill_set(trans); + iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + } } -static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, - int txq_id, int ssn, struct sk_buff_head *skbs) +static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; @@ -1576,35 +1485,15 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, spin_lock(&txq->lock); - txq->time_stamp = jiffies; - - if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && - tid != IWL_TID_NON_QOS && - txq_id != trans_pcie->agg_txq[sta_id][tid])) { - /* - * FIXME: this is a uCode bug which need to be addressed, - * log the information and return for now. - * Since it is can possibly happen very often and in order - * not to fill the syslog, don't use IWL_ERR or IWL_WARN - */ - IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " - "agg_txq[sta_id[tid] %d", txq_id, - trans_pcie->agg_txq[sta_id][tid]); - spin_unlock(&txq->lock); - return 1; - } - if (txq->q.read_ptr != tfd_num) { - IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", - txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, - tfd_num, ssn); + IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", + txq_id, txq->q.read_ptr, tfd_num, ssn); freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); if (iwl_queue_space(&txq->q) > txq->q.low_mark) iwl_wake_queue(trans, txq); } spin_unlock(&txq->lock); - return 0; } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1623,7 +1512,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) } static void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) + const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1635,9 +1524,31 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, if (trans_pcie->n_no_reclaim_cmds) memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); + + trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo; + + if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES)) + trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES; + + /* at least the command queue must be mapped */ + WARN_ON(!trans_pcie->n_q_to_fifo); + + memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo, + trans_pcie->n_q_to_fifo * sizeof(u8)); + + trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; + if (trans_pcie->rx_buf_size_8k) + trans_pcie->rx_page_order = get_order(8 * 1024); + else + trans_pcie->rx_page_order = get_order(4 * 1024); + + trans_pcie->wd_timeout = + msecs_to_jiffies(trans_cfg->queue_watchdog_timeout); + + trans_pcie->command_names = trans_cfg->command_names; } -static void iwl_trans_pcie_free(struct iwl_trans *trans) +void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1656,10 +1567,19 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans) pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - trans->shrd->trans = NULL; kfree(trans); } +static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (state) + set_bit(STATUS_TPOWER_PMI, &trans_pcie->status); + else + clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); +} + #ifdef CONFIG_PM_SLEEP static int iwl_trans_pcie_suspend(struct iwl_trans *trans) { @@ -1670,16 +1590,14 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) { bool hw_rfkill; - hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); - - if (hw_rfkill) - iwl_enable_rfkill_int(trans); - else - iwl_enable_interrupts(trans); + iwl_enable_rfkill_int(trans); + hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + if (!hw_rfkill) + iwl_enable_interrupts(trans); + return 0; } #endif /* CONFIG_PM_SLEEP */ @@ -1696,7 +1614,7 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) int ret = 0; /* waiting for all the tx frames complete might take a while */ - for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { if (cnt == trans_pcie->cmd_queue) continue; txq = &trans_pcie->txq[cnt]; @@ -1714,42 +1632,9 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) return ret; } -/* - * On every watchdog tick we check (latest) time stamp. If it does not - * change during timeout period and queue is not empty we reset firmware. - */ -static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_queue *txq = &trans_pcie->txq[cnt]; - struct iwl_queue *q = &txq->q; - unsigned long timeout; - - if (q->read_ptr == q->write_ptr) { - txq->time_stamp = jiffies; - return 0; - } - - timeout = txq->time_stamp + - msecs_to_jiffies(hw_params(trans).wd_timeout); - - if (time_after(jiffies, timeout)) { - IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, - hw_params(trans).wd_timeout); - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - q->read_ptr, q->write_ptr); - IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", - iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) - & (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); - return 1; - } - - return 0; -} - static const char *get_fh_string(int cmd) { +#define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); @@ -1763,6 +1648,7 @@ static const char *get_fh_string(int cmd) default: return "UNKNOWN"; } +#undef IWL_CMD } int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) @@ -1811,6 +1697,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) static const char *get_csr_string(int cmd) { +#define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); IWL_CMD(CSR_INT_COALESCING); @@ -1838,6 +1725,7 @@ static const char *get_csr_string(int cmd) default: return "UNKNOWN"; } +#undef IWL_CMD } void iwl_dump_csr(struct iwl_trans *trans) @@ -1938,32 +1826,23 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, int ret; size_t bufsz; - bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues; + bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues; - if (!trans_pcie->txq) { - IWL_ERR(trans, "txq not ready\n"); + if (!trans_pcie->txq) return -EAGAIN; - } + buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; - for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { txq = &trans_pcie->txq[cnt]; q = &txq->q; pos += scnprintf(buf + pos, bufsz - pos, - "hwq %.2d: read=%u write=%u stop=%d" - " swq_id=%#.2x (ac %d/hwq %d)\n", + "hwq %.2d: read=%u write=%u use=%d stop=%d\n", cnt, q->read_ptr, q->write_ptr, - !!test_bit(cnt, trans_pcie->queue_stopped), - txq->swq_id, txq->swq_id & 3, - (txq->swq_id >> 2) & 0x1f); - if (cnt >= 4) - continue; - /* for the ACs, display the stop count too */ - pos += scnprintf(buf + pos, bufsz - pos, - " stop-count: %d\n", - atomic_read(&trans_pcie->queue_stop_count[cnt])); + !!test_bit(cnt, trans_pcie->queue_used), + !!test_bit(cnt, trans_pcie->queue_stopped)); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); @@ -1997,44 +1876,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_log_event_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - char *buf; - int pos = 0; - ssize_t ret = -ENOMEM; - - ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true); - if (buf) { - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - } - return ret; -} - -static ssize_t iwl_dbgfs_log_event_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_trans *trans = file->private_data; - u32 event_log_flag; - char buf[8]; - int buf_size; - - memset(buf, 0, sizeof(buf)); - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - if (sscanf(buf, "%d", &event_log_flag) != 1) - return -EFAULT; - if (event_log_flag == 1) - iwl_dump_nic_event_log(trans, true, NULL, false); - - return count; -} - static ssize_t iwl_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -2050,10 +1891,8 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file, ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) { - IWL_ERR(trans, "Can not allocate Buffer\n"); + if (!buf) return -ENOMEM; - } pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); @@ -2161,12 +2000,26 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, return ret; } -DEBUGFS_READ_WRITE_FILE_OPS(log_event); +static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + + if (!trans->op_mode) + return -EAGAIN; + + iwl_op_mode_nic_error(trans->op_mode); + + return count; +} + DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_WRITE_FILE_OPS(csr); +DEBUGFS_WRITE_FILE_OPS(fw_restart); /* * Create the debugfs files and directories @@ -2177,10 +2030,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, { DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); - DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); + DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); return 0; } #else @@ -2190,7 +2043,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, #endif /*CONFIG_IWLWIFI_DEBUGFS */ -const struct iwl_trans_ops trans_ops_pcie = { +static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, .stop_hw = iwl_trans_pcie_stop_hw, .fw_alive = iwl_trans_pcie_fw_alive, @@ -2205,15 +2058,11 @@ const struct iwl_trans_ops trans_ops_pcie = { .reclaim = iwl_trans_pcie_reclaim, .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, - .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc, .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, - .free = iwl_trans_pcie_free, - .dbgfs_register = iwl_trans_pcie_dbgfs_register, .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, - .check_stuck_queue = iwl_trans_pcie_check_stuck_queue, #ifdef CONFIG_PM_SLEEP .suspend = iwl_trans_pcie_suspend, @@ -2223,11 +2072,12 @@ const struct iwl_trans_ops trans_ops_pcie = { .write32 = iwl_trans_pcie_write32, .read32 = iwl_trans_pcie_read32, .configure = iwl_trans_pcie_configure, + .set_pmi = iwl_trans_pcie_set_pmi, }; -struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, - struct pci_dev *pdev, - const struct pci_device_id *ent) +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg *cfg) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; @@ -2243,7 +2093,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans->ops = &trans_ops_pcie; - trans->shrd = shrd; + trans->cfg = cfg; trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); @@ -2325,6 +2175,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); + spin_lock_init(&trans->reg_lock); return trans; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index fdf97886a5e..79a1e7ae499 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -66,8 +66,9 @@ #include <linux/ieee80211.h> #include <linux/mm.h> /* for page_address */ -#include "iwl-shared.h" #include "iwl-debug.h" +#include "iwl-config.h" +#include "iwl-fw.h" /** * DOC: Transport layer - what is it ? @@ -104,13 +105,6 @@ * 6) Eventually, the free function will be called. */ -struct iwl_priv; -struct iwl_shared; -struct iwl_op_mode; -struct fw_img; -struct sk_buff; -struct dentry; - /** * DOC: Host command section * @@ -162,6 +156,8 @@ struct iwl_cmd_header { #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ +#define FH_RSCSR_FRAME_INVALID 0x55550000 +#define FH_RSCSR_FRAME_ALIGN 0x40 struct iwl_rx_packet { /* @@ -260,28 +256,43 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd) struct iwl_rx_cmd_buffer { struct page *_page; + int _offset; + bool _page_stolen; unsigned int truesize; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) { - return page_address(r->_page); + return (void *)((unsigned long)page_address(r->_page) + r->_offset); +} + +static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) +{ + return r->_offset; } static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) { - struct page *p = r->_page; - r->_page = NULL; - return p; + r->_page_stolen = true; + get_page(r->_page); + return r->_page; } #define MAX_NO_RECLAIM_CMDS 6 +/* + * Maximum number of HW queues the transport layer + * currently supports + */ +#define IWL_MAX_HW_QUEUES 32 + /** * struct iwl_trans_config - transport configuration * * @op_mode: pointer to the upper layer. - * Must be set before any other call. + * @queue_to_fifo: queue to FIFO mapping to set up by + * default + * @n_queue_to_fifo: number of queues to set up * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @no_reclaim_cmds: Some devices erroneously don't set the @@ -289,14 +300,29 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) * list of such notifications to filter. Max length is * %MAX_NO_RECLAIM_CMDS. * @n_no_reclaim_cmds: # of commands in list + * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, + * if unset 4k will be the RX buffer size + * @queue_watchdog_timeout: time (in ms) after which queues + * are considered stuck and will trigger device restart + * @command_names: array of command names, must be 256 entries + * (one for each command); for debugging only */ struct iwl_trans_config { struct iwl_op_mode *op_mode; + const u8 *queue_to_fifo; + u8 n_queue_to_fifo; + u8 cmd_queue; const u8 *no_reclaim_cmds; int n_no_reclaim_cmds; + + bool rx_buf_size_8k; + unsigned int queue_watchdog_timeout; + const char **command_names; }; +struct iwl_trans; + /** * struct iwl_trans_ops - transport specific operations * @@ -305,7 +331,8 @@ struct iwl_trans_config { * @start_hw: starts the HW- from that point on, the HW can send interrupts * May sleep * @stop_hw: stops the HW- from that point on, the HW will be in low power but - * will still issue interrupt if the HW RF kill is triggered. + * will still issue interrupt if the HW RF kill is triggered unless + * op_mode_leaving is true. * May sleep * @start_fw: allocates and inits all the resources for the transport * layer. Also kick a fw image. @@ -323,18 +350,11 @@ struct iwl_trans_config { * Must be atomic * @reclaim: free packet until ssn. Returns a list of freed packets. * Must be atomic - * @tx_agg_alloc: allocate resources for a TX BA session - * Must be atomic * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is * ready and a successful ADDBA response has been received. * May sleep * @tx_agg_disable: de-configure a Tx queue to send AMPDUs * Must be atomic - * @free: release all the ressource for the transport layer itself such as - * irq, tasklet etc... From this point on, the device may not issue - * any interrupt (incl. RFKILL). - * May sleep - * @check_stuck_queue: check if a specific queue is stuck * @wait_tx_queue_empty: wait until all tx queues are empty * May sleep * @dbgfs_register: add the dbgfs files under this directory. Files will be @@ -347,11 +367,12 @@ struct iwl_trans_config { * @configure: configure parameters required by the transport layer from * the op_mode. May be called several times before start_fw, can't be * called after that. + * @set_pmi: set the power pmi state */ struct iwl_trans_ops { int (*start_hw)(struct iwl_trans *iwl_trans); - void (*stop_hw)(struct iwl_trans *iwl_trans); + void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving); int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); void (*fw_alive)(struct iwl_trans *trans); void (*stop_device)(struct iwl_trans *trans); @@ -361,23 +382,15 @@ struct iwl_trans_ops { int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid); - int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, - int txq_id, int ssn, struct sk_buff_head *skbs); + struct iwl_device_cmd *dev_cmd, int queue); + void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs); - int (*tx_agg_disable)(struct iwl_trans *trans, - int sta_id, int tid); - int (*tx_agg_alloc)(struct iwl_trans *trans, - int sta_id, int tid); - void (*tx_agg_setup)(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, int tid, - int frame_limit, u16 ssn); - - void (*free)(struct iwl_trans *trans); + void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn); + void (*tx_agg_disable)(struct iwl_trans *trans, int queue); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); - int (*check_stuck_queue)(struct iwl_trans *trans, int q); int (*wait_tx_queue_empty)(struct iwl_trans *trans); #ifdef CONFIG_PM_SLEEP int (*suspend)(struct iwl_trans *trans); @@ -388,6 +401,7 @@ struct iwl_trans_ops { u32 (*read32)(struct iwl_trans *trans, u32 ofs); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); + void (*set_pmi)(struct iwl_trans *trans, bool state); }; /** @@ -406,20 +420,19 @@ enum iwl_trans_state { * * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode - * @shrd - pointer to iwl_shared which holds shared data from the upper layer + * @cfg - pointer to the configuration * @reg_lock - protect hw register access * @dev - pointer to struct device * that represents the device * @hw_id: a u32 with the ID of the device / subdevice. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. - * @nvm_device_type: indicates OTP or eeprom * @pm_support: set to true in start_hw if link pm is supported * @wait_command_queue: the wait_queue for SYNC host commands */ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; - struct iwl_shared *shrd; + const struct iwl_cfg *cfg; enum iwl_trans_state state; spinlock_t reg_lock; @@ -428,7 +441,6 @@ struct iwl_trans { u32 hw_id; char hw_id_str[52]; - int nvm_device_type; bool pm_support; wait_queue_head_t wait_command_queue; @@ -457,11 +469,12 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans) return trans->ops->start_hw(trans); } -static inline void iwl_trans_stop_hw(struct iwl_trans *trans) +static inline void iwl_trans_stop_hw(struct iwl_trans *trans, + bool op_mode_leaving) { might_sleep(); - trans->ops->stop_hw(trans); + trans->ops->stop_hw(trans, op_mode_leaving); trans->state = IWL_TRANS_NO_FW; } @@ -508,60 +521,42 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans, } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, - u8 sta_id, u8 tid) -{ - if (trans->state != IWL_TRANS_FW_ALIVE) - IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); - - return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid); -} - -static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id, - int tid, int txq_id, int ssn, - struct sk_buff_head *skbs) + struct iwl_device_cmd *dev_cmd, int queue) { WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "%s bad state = %d", __func__, trans->state); - return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs); + return trans->ops->tx(trans, skb, dev_cmd, queue); } -static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, - int sta_id, int tid) +static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, + int ssn, struct sk_buff_head *skbs) { WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "%s bad state = %d", __func__, trans->state); - return trans->ops->tx_agg_disable(trans, sta_id, tid); + trans->ops->reclaim(trans, queue, ssn, skbs); } -static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, - int sta_id, int tid) +static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue) { WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "%s bad state = %d", __func__, trans->state); - return trans->ops->tx_agg_alloc(trans, sta_id, tid); + trans->ops->tx_agg_disable(trans, queue); } - -static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, - int sta_id, int tid, - int frame_limit, u16 ssn) +static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) { might_sleep(); WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "%s bad state = %d", __func__, trans->state); - trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); -} - -static inline void iwl_trans_free(struct iwl_trans *trans) -{ - trans->ops->free(trans); + trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid, + frame_limit, ssn); } static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) @@ -572,13 +567,6 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) return trans->ops->wait_tx_queue_empty(trans); } -static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q) -{ - WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, - "%s bad state = %d", __func__, trans->state); - - return trans->ops->check_stuck_queue(trans, q); -} static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, struct dentry *dir) { @@ -612,20 +600,15 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) return trans->ops->read32(trans, ofs); } +static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) +{ + trans->ops->set_pmi(trans, state); +} + /***************************************************** -* Transport layers implementations + their allocation function +* driver (transport) register/unregister functions ******************************************************/ -struct pci_dev; -struct pci_device_id; -extern const struct iwl_trans_ops trans_ops_pcie; -struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, - struct pci_dev *pdev, - const struct pci_device_id *ent); int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); -extern const struct iwl_trans_ops trans_ops_idi; -struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd, - void *pdev_void, - const void *ent_void); #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c index 25282872883..bc40dc68b0f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c @@ -31,7 +31,6 @@ #include <linux/init.h> #include "iwl-dev.h" -#include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-hw.h" #include "iwl-agn.h" @@ -40,37 +39,6 @@ #include "iwl-fh.h" #include "iwl-op-mode.h" -static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { - {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, - 0, COEX_UNASSOC_IDLE_FLAGS}, - {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP, - 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS}, - {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP, - 0, COEX_UNASSOC_AUTO_SCAN_FLAGS}, - {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP, - 0, COEX_CALIBRATION_FLAGS}, - {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP, - 0, COEX_PERIODIC_CALIBRATION_FLAGS}, - {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP, - 0, COEX_CONNECTION_ESTAB_FLAGS}, - {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP, - 0, COEX_ASSOCIATED_IDLE_FLAGS}, - {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP, - 0, COEX_ASSOC_MANUAL_SCAN_FLAGS}, - {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP, - 0, COEX_ASSOC_AUTO_SCAN_FLAGS}, - {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP, - 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS}, - {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS}, - {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS}, - {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP, - 0, COEX_STAND_ALONE_DEBUG_FLAGS}, - {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP, - 0, COEX_IPAN_ASSOC_LEVEL_FLAGS}, - {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS}, - {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS} -}; - /****************************************************************************** * * uCode download functions @@ -93,7 +61,7 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv) { struct iwl_calib_xtal_freq_cmd cmd; __le16 *xtal_calib = - (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); @@ -105,8 +73,7 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) { struct iwl_calib_temperature_offset_cmd cmd; __le16 *offset_calib = - (__le16 *)iwl_eeprom_query_addr(priv->shrd, - EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); @@ -122,16 +89,15 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) { struct iwl_calib_temperature_offset_v2_cmd cmd; - __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd, + __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_KELVIN_TEMPERATURE); __le16 *offset_calib_low = - (__le16 *)iwl_eeprom_query_addr(priv->shrd, - EEPROM_RAW_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); struct iwl_eeprom_calib_hdr *hdr; memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd, + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, EEPROM_CALIB_ALL); memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, sizeof(*offset_calib_high)); @@ -174,30 +140,12 @@ static int iwl_send_calib_cfg(struct iwl_priv *priv) return iwl_dvm_send_cmd(priv, &cmd); } -int iwlagn_rx_calib_result(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data; - int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - - /* reduce the size of the length field itself */ - len -= 4; - - if (iwl_calib_set(priv, hdr, len)) - IWL_ERR(priv, "Failed to record calibration data %d\n", - hdr->op_code); - - return 0; -} - int iwl_init_alive_start(struct iwl_priv *priv) { int ret; - if (cfg(priv)->bt_params && - cfg(priv)->bt_params->advanced_bt_coexist) { + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { /* * Tell uCode we are ready to perform calibration * need to perform this before any calibration @@ -219,8 +167,8 @@ int iwl_init_alive_start(struct iwl_priv *priv) * temperature offset calibration is only needed for runtime ucode, * so prepare the value now. */ - if (cfg(priv)->need_temp_offset_calib) { - if (cfg(priv)->temp_offset_v2) + if (priv->cfg->need_temp_offset_calib) { + if (priv->cfg->temp_offset_v2) return iwl_set_temperature_offset_calib_v2(priv); else return iwl_set_temperature_offset_calib(priv); @@ -229,29 +177,13 @@ int iwl_init_alive_start(struct iwl_priv *priv) return 0; } -static int iwl_send_wimax_coex(struct iwl_priv *priv) +int iwl_send_wimax_coex(struct iwl_priv *priv) { struct iwl_wimax_coex_cmd coex_cmd; - if (cfg(priv)->base_params->support_wimax_coexist) { - /* UnMask wake up src at associated sleep */ - coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK; + /* coexistence is disabled */ + memset(&coex_cmd, 0, sizeof(coex_cmd)); - /* UnMask wake up src at unassociated sleep */ - coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK; - memcpy(coex_cmd.sta_prio, cu_priorities, - sizeof(struct iwl_wimax_coex_event_entry) * - COEX_NUM_OF_EVENTS); - - /* enabling the coexistence feature */ - coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK; - - /* enabling the priorities tables */ - coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK; - } else { - /* coexistence is disabled */ - memset(&coex_cmd, 0, sizeof(coex_cmd)); - } return iwl_dvm_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD, CMD_SYNC, sizeof(coex_cmd), &coex_cmd); @@ -311,7 +243,7 @@ static int iwl_alive_notify(struct iwl_priv *priv) { int ret; - iwl_trans_fw_alive(trans(priv)); + iwl_trans_fw_alive(priv->trans); priv->passive_no_rx = false; priv->transport_queue_stop = 0; @@ -320,7 +252,7 @@ static int iwl_alive_notify(struct iwl_priv *priv) if (ret) return ret; - if (!cfg(priv)->no_xtal_calib) { + if (!priv->cfg->no_xtal_calib) { ret = iwl_set_Xtal_calib(priv); if (ret) return ret; @@ -349,9 +281,9 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv, /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ - iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, + iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR, i + fw_desc->offset); - val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); + val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) return -EIO; } @@ -370,14 +302,14 @@ static void iwl_print_mismatch_sec(struct iwl_priv *priv, IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); - iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, + iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR, fw_desc->offset); for (offs = 0; offs < len && errors < 20; offs += sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ - val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); + val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "uCode INST section at " "offset 0x%x, is 0x%x, s/b 0x%x\n", @@ -417,9 +349,8 @@ struct iwl_alive_data { u8 subtype; }; -static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, - void *data) +static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) { struct iwl_priv *priv = container_of(notif_wait, struct iwl_priv, notif_wait); @@ -433,13 +364,15 @@ static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, palive->is_valid, palive->ver_type, palive->ver_subtype); - priv->shrd->device_pointers.error_event_table = + priv->device_pointers.error_event_table = le32_to_cpu(palive->error_event_table_ptr); - priv->shrd->device_pointers.log_event_table = + priv->device_pointers.log_event_table = le32_to_cpu(palive->log_event_table_ptr); alive_data->subtype = palive->ver_subtype; alive_data->valid = palive->is_valid == UCODE_VALID_OK; + + return true; } #define UCODE_ALIVE_TIMEOUT HZ @@ -453,9 +386,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, const struct fw_img *fw; int ret; enum iwl_ucode_type old_type; + static const u8 alive_cmd[] = { REPLY_ALIVE }; - old_type = priv->shrd->ucode_type; - priv->shrd->ucode_type = ucode_type; + old_type = priv->cur_ucode; + priv->cur_ucode = ucode_type; fw = iwl_get_ucode_image(priv, ucode_type); priv->ucode_loaded = false; @@ -463,12 +397,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, if (!fw) return -EINVAL; - iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE, - iwl_alive_fn, &alive_data); + iwl_init_notification_wait(&priv->notif_wait, &alive_wait, + alive_cmd, ARRAY_SIZE(alive_cmd), + iwl_alive_fn, &alive_data); - ret = iwl_trans_start_fw(trans(priv), fw); + ret = iwl_trans_start_fw(priv->trans, fw); if (ret) { - priv->shrd->ucode_type = old_type; + priv->cur_ucode = old_type; iwl_remove_notification(&priv->notif_wait, &alive_wait); return ret; } @@ -480,13 +415,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, ret = iwl_wait_notification(&priv->notif_wait, &alive_wait, UCODE_ALIVE_TIMEOUT); if (ret) { - priv->shrd->ucode_type = old_type; + priv->cur_ucode = old_type; return ret; } if (!alive_data.valid) { IWL_ERR(priv, "Loaded ucode is not valid!\n"); - priv->shrd->ucode_type = old_type; + priv->cur_ucode = old_type; return -EIO; } @@ -498,7 +433,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, if (ucode_type != IWL_UCODE_WOWLAN) { ret = iwl_verify_ucode(priv, ucode_type); if (ret) { - priv->shrd->ucode_type = old_type; + priv->cur_ucode = old_type; return ret; } @@ -510,7 +445,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, if (ret) { IWL_WARN(priv, "Could not complete ALIVE transition: %d\n", ret); - priv->shrd->ucode_type = old_type; + priv->cur_ucode = old_type; return ret; } @@ -519,9 +454,38 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return 0; } +static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_priv *priv = data; + struct iwl_calib_hdr *hdr; + int len; + + if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) { + WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION); + return true; + } + + hdr = (struct iwl_calib_hdr *)pkt->data; + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + + /* reduce the size by the length field itself */ + len -= sizeof(__le32); + + if (iwl_calib_set(priv, hdr, len)) + IWL_ERR(priv, "Failed to record calibration data %d\n", + hdr->op_code); + + return false; +} + int iwl_run_init_ucode(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; + static const u8 calib_complete[] = { + CALIBRATION_RES_NOTIFICATION, + CALIBRATION_COMPLETE_NOTIFICATION + }; int ret; lockdep_assert_held(&priv->mutex); @@ -534,8 +498,8 @@ int iwl_run_init_ucode(struct iwl_priv *priv) return 0; iwl_init_notification_wait(&priv->notif_wait, &calib_wait, - CALIBRATION_COMPLETE_NOTIFICATION, - NULL, NULL); + calib_complete, ARRAY_SIZE(calib_complete), + iwlagn_wait_calib, priv); /* Will also start the device */ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT); @@ -561,7 +525,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv) iwl_remove_notification(&priv->notif_wait, &calib_wait); out: /* Whatever happened, stop the device */ - iwl_trans_stop_device(trans(priv)); + iwl_trans_stop_device(priv->trans); priv->ucode_loaded = false; return ret; diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig index 03f998d098c..7107ce53d4d 100644 --- a/drivers/net/wireless/iwmc3200wifi/Kconfig +++ b/drivers/net/wireless/iwmc3200wifi/Kconfig @@ -1,5 +1,5 @@ config IWM - tristate "Intel Wireless Multicomm 3200 WiFi driver" + tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)" depends on MMC && EXPERIMENTAL depends on CFG80211 select FW_LOADER diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile index f7d01bfa2e4..eac72f7bd34 100644 --- a/drivers/net/wireless/libertas/Makefile +++ b/drivers/net/wireless/libertas/Makefile @@ -6,6 +6,7 @@ libertas-y += ethtool.o libertas-y += main.o libertas-y += rx.o libertas-y += tx.o +libertas-y += firmware.o libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o usb8xxx-objs += if_usb.o diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h index bc951ab4b68..84a3aa7ac57 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/libertas/decl.h @@ -19,6 +19,10 @@ struct lbs_fw_table { }; struct lbs_private; +typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret, + const struct firmware *helper, const struct firmware *mainfw); + +struct lbs_private; struct sk_buff; struct net_device; struct cmd_ds_command; @@ -66,10 +70,13 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv); u32 lbs_fw_index_to_data_rate(u8 index); u8 lbs_data_rate_to_fw_index(u32 rate); -int lbs_get_firmware(struct device *dev, const char *user_helper, - const char *user_mainfw, u32 card_model, +int lbs_get_firmware(struct device *dev, u32 card_model, const struct lbs_fw_table *fw_table, const struct firmware **helper, const struct firmware **mainfw); +int lbs_get_firmware_async(struct lbs_private *priv, struct device *device, + u32 card_model, const struct lbs_fw_table *fw_table, + lbs_fw_cb callback); +void lbs_wait_for_firmware_load(struct lbs_private *priv); #endif diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index f3fd447131c..672005430ac 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -7,6 +7,7 @@ #define _LBS_DEV_H_ #include "defs.h" +#include "decl.h" #include "host.h" #include <linux/kfifo.h> @@ -180,6 +181,15 @@ struct lbs_private { wait_queue_head_t scan_q; /* Whether the scan was initiated internally and not by cfg80211 */ bool internal_scan; + + /* Firmware load */ + u32 fw_model; + wait_queue_head_t fw_waitq; + struct device *fw_device; + const struct firmware *helper_fw; + const struct lbs_fw_table *fw_table; + const struct lbs_fw_table *fw_iter; + lbs_fw_cb fw_callback; }; extern struct cmd_confirm_sleep confirm_sleep; diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c new file mode 100644 index 00000000000..601f2075355 --- /dev/null +++ b/drivers/net/wireless/libertas/firmware.c @@ -0,0 +1,224 @@ +/* + * Firmware loading and handling functions. + */ + +#include <linux/sched.h> +#include <linux/firmware.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/sched.h> + +#include "dev.h" +#include "decl.h" + +static void load_next_firmware_from_table(struct lbs_private *private); + +static void lbs_fw_loaded(struct lbs_private *priv, int ret, + const struct firmware *helper, const struct firmware *mainfw) +{ + unsigned long flags; + + lbs_deb_fw("firmware load complete, code %d\n", ret); + + /* User must free helper/mainfw */ + priv->fw_callback(priv, ret, helper, mainfw); + + spin_lock_irqsave(&priv->driver_lock, flags); + priv->fw_callback = NULL; + wake_up(&priv->fw_waitq); + spin_unlock_irqrestore(&priv->driver_lock, flags); +} + +static void do_load_firmware(struct lbs_private *priv, const char *name, + void (*cb)(const struct firmware *fw, void *context)) +{ + int ret; + + lbs_deb_fw("Requesting %s\n", name); + ret = request_firmware_nowait(THIS_MODULE, true, name, + priv->fw_device, GFP_KERNEL, priv, cb); + if (ret) { + lbs_deb_fw("request_firmware_nowait error %d\n", ret); + lbs_fw_loaded(priv, ret, NULL, NULL); + } +} + +static void main_firmware_cb(const struct firmware *firmware, void *context) +{ + struct lbs_private *priv = context; + + if (!firmware) { + /* Failed to find firmware: try next table entry */ + load_next_firmware_from_table(priv); + return; + } + + /* Firmware found! */ + lbs_fw_loaded(priv, 0, priv->helper_fw, firmware); +} + +static void helper_firmware_cb(const struct firmware *firmware, void *context) +{ + struct lbs_private *priv = context; + + if (!firmware) { + /* Failed to find firmware: try next table entry */ + load_next_firmware_from_table(priv); + return; + } + + /* Firmware found! */ + if (priv->fw_iter->fwname) { + priv->helper_fw = firmware; + do_load_firmware(priv, priv->fw_iter->fwname, main_firmware_cb); + } else { + /* No main firmware needed for this helper --> success! */ + lbs_fw_loaded(priv, 0, firmware, NULL); + } +} + +static void load_next_firmware_from_table(struct lbs_private *priv) +{ + const struct lbs_fw_table *iter; + + if (!priv->fw_iter) + iter = priv->fw_table; + else + iter = ++priv->fw_iter; + + if (priv->helper_fw) { + release_firmware(priv->helper_fw); + priv->helper_fw = NULL; + } + +next: + if (!iter->helper) { + /* End of table hit. */ + lbs_fw_loaded(priv, -ENOENT, NULL, NULL); + return; + } + + if (iter->model != priv->fw_model) { + iter++; + goto next; + } + + priv->fw_iter = iter; + do_load_firmware(priv, iter->helper, helper_firmware_cb); +} + +void lbs_wait_for_firmware_load(struct lbs_private *priv) +{ + wait_event(priv->fw_waitq, priv->fw_callback == NULL); +} + +/** + * lbs_get_firmware_async - Retrieves firmware asynchronously. Can load + * either a helper firmware and a main firmware (2-stage), or just the helper. + * + * @priv: Pointer to lbs_private instance + * @dev: A pointer to &device structure + * @card_model: Bus-specific card model ID used to filter firmware table + * elements + * @fw_table: Table of firmware file names and device model numbers + * terminated by an entry with a NULL helper name + * @callback: User callback to invoke when firmware load succeeds or fails. + */ +int lbs_get_firmware_async(struct lbs_private *priv, struct device *device, + u32 card_model, const struct lbs_fw_table *fw_table, + lbs_fw_cb callback) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->driver_lock, flags); + if (priv->fw_callback) { + lbs_deb_fw("firmware load already in progress\n"); + spin_unlock_irqrestore(&priv->driver_lock, flags); + return -EBUSY; + } + + priv->fw_device = device; + priv->fw_callback = callback; + priv->fw_table = fw_table; + priv->fw_iter = NULL; + priv->fw_model = card_model; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + lbs_deb_fw("Starting async firmware load\n"); + load_next_firmware_from_table(priv); + return 0; +} +EXPORT_SYMBOL_GPL(lbs_get_firmware_async); + +/** + * lbs_get_firmware - Retrieves two-stage firmware + * + * @dev: A pointer to &device structure + * @card_model: Bus-specific card model ID used to filter firmware table + * elements + * @fw_table: Table of firmware file names and device model numbers + * terminated by an entry with a NULL helper name + * @helper: On success, the helper firmware; caller must free + * @mainfw: On success, the main firmware; caller must free + * + * Deprecated: use lbs_get_firmware_async() instead. + * + * returns: 0 on success, non-zero on failure + */ +int lbs_get_firmware(struct device *dev, u32 card_model, + const struct lbs_fw_table *fw_table, + const struct firmware **helper, + const struct firmware **mainfw) +{ + const struct lbs_fw_table *iter; + int ret; + + BUG_ON(helper == NULL); + BUG_ON(mainfw == NULL); + + /* Search for firmware to use from the table. */ + iter = fw_table; + while (iter && iter->helper) { + if (iter->model != card_model) + goto next; + + if (*helper == NULL) { + ret = request_firmware(helper, iter->helper, dev); + if (ret) + goto next; + + /* If the device has one-stage firmware (ie cf8305) and + * we've got it then we don't need to bother with the + * main firmware. + */ + if (iter->fwname == NULL) + return 0; + } + + if (*mainfw == NULL) { + ret = request_firmware(mainfw, iter->fwname, dev); + if (ret) { + /* Clear the helper to ensure we don't have + * mismatched firmware pairs. + */ + release_firmware(*helper); + *helper = NULL; + } + } + + if (*helper && *mainfw) + return 0; + + next: + iter++; + } + + /* Failed */ + release_firmware(*helper); + *helper = NULL; + release_firmware(*mainfw); + *mainfw = NULL; + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(lbs_get_firmware); diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index 234ee88dec9..16beaf39dc5 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -738,6 +738,50 @@ done: return ret; } +static void if_cs_prog_firmware(struct lbs_private *priv, int ret, + const struct firmware *helper, + const struct firmware *mainfw) +{ + struct if_cs_card *card = priv->card; + + if (ret) { + pr_err("failed to find firmware (%d)\n", ret); + return; + } + + /* Load the firmware */ + ret = if_cs_prog_helper(card, helper); + if (ret == 0 && (card->model != MODEL_8305)) + ret = if_cs_prog_real(card, mainfw); + if (ret) + goto out; + + /* Now actually get the IRQ */ + ret = request_irq(card->p_dev->irq, if_cs_interrupt, + IRQF_SHARED, DRV_NAME, card); + if (ret) { + pr_err("error in request_irq\n"); + goto out; + } + + /* + * Clear any interrupt cause that happened while sending + * firmware/initializing card + */ + if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK); + if_cs_enable_ints(card); + + /* And finally bring the card up */ + priv->fw_ready = 1; + if (lbs_start_card(priv) != 0) { + pr_err("could not activate card\n"); + free_irq(card->p_dev->irq, card); + } + +out: + release_firmware(helper); + release_firmware(mainfw); +} /********************************************************************/ @@ -809,8 +853,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev) unsigned int prod_id; struct lbs_private *priv; struct if_cs_card *card; - const struct firmware *helper = NULL; - const struct firmware *mainfw = NULL; lbs_deb_enter(LBS_DEB_CS); @@ -890,20 +932,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev) goto out2; } - ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model, - &fw_table[0], &helper, &mainfw); - if (ret) { - pr_err("failed to find firmware (%d)\n", ret); - goto out2; - } - - /* Load the firmware early, before calling into libertas.ko */ - ret = if_cs_prog_helper(card, helper); - if (ret == 0 && (card->model != MODEL_8305)) - ret = if_cs_prog_real(card, mainfw); - if (ret) - goto out2; - /* Make this card known to the libertas driver */ priv = lbs_add_card(card, &p_dev->dev); if (!priv) { @@ -911,37 +939,22 @@ static int if_cs_probe(struct pcmcia_device *p_dev) goto out2; } - /* Finish setting up fields in lbs_private */ + /* Set up fields in lbs_private */ card->priv = priv; priv->card = card; priv->hw_host_to_card = if_cs_host_to_card; priv->enter_deep_sleep = NULL; priv->exit_deep_sleep = NULL; priv->reset_deep_sleep_wakeup = NULL; - priv->fw_ready = 1; - /* Now actually get the IRQ */ - ret = request_irq(p_dev->irq, if_cs_interrupt, - IRQF_SHARED, DRV_NAME, card); + /* Get firmware */ + ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table, + if_cs_prog_firmware); if (ret) { - pr_err("error in request_irq\n"); - goto out3; - } - - /* - * Clear any interrupt cause that happened while sending - * firmware/initializing card - */ - if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK); - if_cs_enable_ints(card); - - /* And finally bring the card up */ - if (lbs_start_card(priv) != 0) { - pr_err("could not activate card\n"); + pr_err("failed to find firmware (%d)\n", ret); goto out3; } - ret = 0; goto out; out3: @@ -951,11 +964,6 @@ out2: out1: pcmcia_disable_device(p_dev); out: - if (helper) - release_firmware(helper); - if (mainfw) - release_firmware(mainfw); - lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 9804ebc892d..76caebaa439 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -65,12 +65,6 @@ static void if_sdio_interrupt(struct sdio_func *func); */ static u8 user_rmmod; -static char *lbs_helper_name = NULL; -module_param_named(helper_name, lbs_helper_name, charp, 0644); - -static char *lbs_fw_name = NULL; -module_param_named(fw_name, lbs_fw_name, charp, 0644); - static const struct sdio_device_id if_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_LIBERTAS) }, @@ -123,11 +117,8 @@ struct if_sdio_card { int model; unsigned long ioport; unsigned int scratch_reg; - - const char *helper; - const char *firmware; - bool helper_allocated; - bool firmware_allocated; + bool started; + wait_queue_head_t pwron_waitq; u8 buffer[65536] __attribute__((aligned(4))); @@ -140,6 +131,9 @@ struct if_sdio_card { u8 rx_unit; }; +static void if_sdio_finish_power_on(struct if_sdio_card *card); +static int if_sdio_power_off(struct if_sdio_card *card); + /********************************************************************/ /* I/O */ /********************************************************************/ @@ -680,12 +674,39 @@ out: return ret; } +static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret, + const struct firmware *helper, + const struct firmware *mainfw) +{ + struct if_sdio_card *card = priv->card; + + if (ret) { + pr_err("failed to find firmware (%d)\n", ret); + return; + } + + ret = if_sdio_prog_helper(card, helper); + if (ret) + goto out; + + lbs_deb_sdio("Helper firmware loaded\n"); + + ret = if_sdio_prog_real(card, mainfw); + if (ret) + goto out; + + lbs_deb_sdio("Firmware loaded\n"); + if_sdio_finish_power_on(card); + +out: + release_firmware(helper); + release_firmware(mainfw); +} + static int if_sdio_prog_firmware(struct if_sdio_card *card) { int ret; u16 scratch; - const struct firmware *helper = NULL; - const struct firmware *mainfw = NULL; lbs_deb_enter(LBS_DEB_SDIO); @@ -719,43 +740,18 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card) */ if (scratch == IF_SDIO_FIRMWARE_OK) { lbs_deb_sdio("firmware already loaded\n"); - goto success; + if_sdio_finish_power_on(card); + return 0; } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) { lbs_deb_sdio("firmware may be running\n"); - goto success; - } - - ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name, - card->model, &fw_table[0], &helper, &mainfw); - if (ret) { - pr_err("failed to find firmware (%d)\n", ret); - goto out; + if_sdio_finish_power_on(card); + return 0; } - ret = if_sdio_prog_helper(card, helper); - if (ret) - goto out; - - lbs_deb_sdio("Helper firmware loaded\n"); - - ret = if_sdio_prog_real(card, mainfw); - if (ret) - goto out; - - lbs_deb_sdio("Firmware loaded\n"); - -success: - sdio_claim_host(card->func); - sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); - sdio_release_host(card->func); - ret = 0; + ret = lbs_get_firmware_async(card->priv, &card->func->dev, card->model, + fw_table, if_sdio_do_prog_firmware); out: - if (helper) - release_firmware(helper); - if (mainfw) - release_firmware(mainfw); - lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); return ret; } @@ -764,55 +760,15 @@ out: /* Power management */ /********************************************************************/ -static int if_sdio_power_on(struct if_sdio_card *card) +/* Finish power on sequence (after firmware is loaded) */ +static void if_sdio_finish_power_on(struct if_sdio_card *card) { struct sdio_func *func = card->func; struct lbs_private *priv = card->priv; - struct mmc_host *host = func->card->host; int ret; sdio_claim_host(func); - - ret = sdio_enable_func(func); - if (ret) - goto release; - - /* For 1-bit transfers to the 8686 model, we need to enable the - * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0 - * bit to allow access to non-vendor registers. */ - if ((card->model == MODEL_8686) && - (host->caps & MMC_CAP_SDIO_IRQ) && - (host->ios.bus_width == MMC_BUS_WIDTH_1)) { - u8 reg; - - func->card->quirks |= MMC_QUIRK_LENIENT_FN0; - reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret); - if (ret) - goto disable; - - reg |= SDIO_BUS_ECSI; - sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret); - if (ret) - goto disable; - } - - card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret); - if (ret) - goto disable; - - card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8; - if (ret) - goto disable; - - card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16; - if (ret) - goto disable; - - sdio_release_host(func); - ret = if_sdio_prog_firmware(card); - sdio_claim_host(func); - if (ret) - goto disable; + sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); /* * Get rx_unit if the chip is SD8688 or newer. @@ -837,7 +793,7 @@ static int if_sdio_power_on(struct if_sdio_card *card) */ ret = sdio_claim_irq(func, if_sdio_interrupt); if (ret) - goto disable; + goto release; /* * Enable interrupts now that everything is set up @@ -863,11 +819,79 @@ static int if_sdio_power_on(struct if_sdio_card *card) } priv->fw_ready = 1; + wake_up(&card->pwron_waitq); - return 0; + if (!card->started) { + ret = lbs_start_card(priv); + if_sdio_power_off(card); + if (ret == 0) { + card->started = true; + /* Tell PM core that we don't need the card to be + * powered now */ + pm_runtime_put_noidle(&func->dev); + } + } + + return; release_irq: sdio_release_irq(func); +release: + sdio_release_host(func); +} + +static int if_sdio_power_on(struct if_sdio_card *card) +{ + struct sdio_func *func = card->func; + struct mmc_host *host = func->card->host; + int ret; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) + goto release; + + /* For 1-bit transfers to the 8686 model, we need to enable the + * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0 + * bit to allow access to non-vendor registers. */ + if ((card->model == MODEL_8686) && + (host->caps & MMC_CAP_SDIO_IRQ) && + (host->ios.bus_width == MMC_BUS_WIDTH_1)) { + u8 reg; + + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret); + if (ret) + goto disable; + + reg |= SDIO_BUS_ECSI; + sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret); + if (ret) + goto disable; + } + + card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret); + if (ret) + goto disable; + + card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8; + if (ret) + goto disable; + + card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16; + if (ret) + goto disable; + + sdio_release_host(func); + ret = if_sdio_prog_firmware(card); + if (ret) { + sdio_disable_func(func); + return ret; + } + + return 0; + disable: sdio_disable_func(func); release: @@ -1074,11 +1098,17 @@ static int if_sdio_power_save(struct lbs_private *priv) static int if_sdio_power_restore(struct lbs_private *priv) { struct if_sdio_card *card = priv->card; + int r; /* Make sure the card will not be powered off by runtime PM */ pm_runtime_get_sync(&card->func->dev); - return if_sdio_power_on(card); + r = if_sdio_power_on(card); + if (r) + return r; + + wait_event(card->pwron_waitq, priv->fw_ready); + return 0; } @@ -1179,6 +1209,7 @@ static int if_sdio_probe(struct sdio_func *func, spin_lock_init(&card->lock); card->workqueue = create_workqueue("libertas_sdio"); INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); + init_waitqueue_head(&card->pwron_waitq); /* Check if we support this card */ for (i = 0; i < ARRAY_SIZE(fw_table); i++) { @@ -1220,14 +1251,6 @@ static int if_sdio_probe(struct sdio_func *func, if (ret) goto err_activate_card; - ret = lbs_start_card(priv); - if_sdio_power_off(card); - if (ret) - goto err_activate_card; - - /* Tell PM core that we don't need the card to be powered now */ - pm_runtime_put_noidle(&func->dev); - out: lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); @@ -1244,10 +1267,6 @@ free: kfree(packet); } - if (card->helper_allocated) - kfree(card->helper); - if (card->firmware_allocated) - kfree(card->firmware); kfree(card); goto out; @@ -1295,12 +1314,6 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } - if (card->helper_allocated) - kfree(card->helper); - if (card->firmware_allocated) - kfree(card->firmware); - kfree(card); - lbs_deb_leave(LBS_DEB_SDIO); } diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 50b1ee7721e..9604a1c4a74 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -1064,9 +1064,8 @@ static int if_spi_init_card(struct if_spi_card *card) goto out; } - err = lbs_get_firmware(&card->spi->dev, NULL, NULL, - card->card_id, &fw_table[0], &helper, - &mainfw); + err = lbs_get_firmware(&card->spi->dev, card->card_id, + &fw_table[0], &helper, &mainfw); if (err) { netdev_err(priv->dev, "failed to find firmware (%d)\n", err); @@ -1095,10 +1094,8 @@ static int if_spi_init_card(struct if_spi_card *card) goto out; out: - if (helper) - release_firmware(helper); - if (mainfw) - release_firmware(mainfw); + release_firmware(helper); + release_firmware(mainfw); lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 74da5f1ea24..75403e6e399 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -29,9 +29,6 @@ #define MESSAGE_HEADER_LEN 4 -static char *lbs_fw_name = NULL; -module_param_named(fw_name, lbs_fw_name, charp, 0644); - MODULE_FIRMWARE("libertas/usb8388_v9.bin"); MODULE_FIRMWARE("libertas/usb8388_v5.bin"); MODULE_FIRMWARE("libertas/usb8388.bin"); @@ -44,6 +41,16 @@ enum { MODEL_8682 = 0x2 }; +/* table of firmware file names */ +static const struct lbs_fw_table fw_table[] = { + { MODEL_8388, "libertas/usb8388_olpc.bin", NULL }, + { MODEL_8388, "libertas/usb8388_v9.bin", NULL }, + { MODEL_8388, "libertas/usb8388_v5.bin", NULL }, + { MODEL_8388, "libertas/usb8388.bin", NULL }, + { MODEL_8388, "usb8388.bin", NULL }, + { MODEL_8682, "libertas/usb8682.bin", NULL } +}; + static struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 }, @@ -55,10 +62,9 @@ MODULE_DEVICE_TABLE(usb, if_usb_table); static void if_usb_receive(struct urb *urb); static void if_usb_receive_fwload(struct urb *urb); -static int __if_usb_prog_firmware(struct if_usb_card *cardp, - const char *fwname, int cmd); -static int if_usb_prog_firmware(struct if_usb_card *cardp, - const char *fwname, int cmd); +static void if_usb_prog_firmware(struct lbs_private *priv, int ret, + const struct firmware *fw, + const struct firmware *unused); static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, uint8_t *payload, uint16_t nb); static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, @@ -67,69 +73,6 @@ static void if_usb_free(struct if_usb_card *cardp); static int if_usb_submit_rx_urb(struct if_usb_card *cardp); static int if_usb_reset_device(struct if_usb_card *cardp); -/* sysfs hooks */ - -/* - * Set function to write firmware to device's persistent memory - */ -static ssize_t if_usb_firmware_set(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct lbs_private *priv = to_net_dev(dev)->ml_priv; - struct if_usb_card *cardp = priv->card; - int ret; - - BUG_ON(buf == NULL); - - ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW); - if (ret == 0) - return count; - - return ret; -} - -/* - * lbs_flash_fw attribute to be exported per ethX interface through sysfs - * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to - * the device's persistent memory: - * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw - */ -static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set); - -/** - * if_usb_boot2_set - write firmware to device's persistent memory - * - * @dev: target device - * @attr: device attributes - * @buf: firmware buffer to write - * @count: number of bytes to write - * - * returns: number of bytes written or negative error code - */ -static ssize_t if_usb_boot2_set(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct lbs_private *priv = to_net_dev(dev)->ml_priv; - struct if_usb_card *cardp = priv->card; - int ret; - - BUG_ON(buf == NULL); - - ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2); - if (ret == 0) - return count; - - return ret; -} - -/* - * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs - * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware - * to the device's persistent memory: - * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2 - */ -static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set); - /** * if_usb_write_bulk_callback - callback function to handle the status * of the URB @@ -256,6 +199,7 @@ static int if_usb_probe(struct usb_interface *intf, struct usb_endpoint_descriptor *endpoint; struct lbs_private *priv; struct if_usb_card *cardp; + int r = -ENOMEM; int i; udev = interface_to_usbdev(intf); @@ -313,20 +257,10 @@ static int if_usb_probe(struct usb_interface *intf, goto dealloc; } - /* Upload firmware */ - kparam_block_sysfs_write(fw_name); - if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) { - kparam_unblock_sysfs_write(fw_name); - lbs_deb_usbd(&udev->dev, "FW upload failed\n"); - goto err_prog_firmware; - } - kparam_unblock_sysfs_write(fw_name); - if (!(priv = lbs_add_card(cardp, &intf->dev))) - goto err_prog_firmware; + goto err_add_card; cardp->priv = priv; - cardp->priv->fw_ready = 1; priv->hw_host_to_card = if_usb_host_to_card; priv->enter_deep_sleep = NULL; @@ -339,42 +273,25 @@ static int if_usb_probe(struct usb_interface *intf, cardp->boot2_version = udev->descriptor.bcdDevice; - if_usb_submit_rx_urb(cardp); - - if (lbs_start_card(priv)) - goto err_start_card; - - if_usb_setup_firmware(priv); - usb_get_dev(udev); usb_set_intfdata(intf, cardp); - if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw)) - netdev_err(priv->dev, - "cannot register lbs_flash_fw attribute\n"); - - if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2)) - netdev_err(priv->dev, - "cannot register lbs_flash_boot2 attribute\n"); - - /* - * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware. - */ - priv->wol_criteria = EHS_REMOVE_WAKEUP; - if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL)) - priv->ehs_remove_supported = false; + r = lbs_get_firmware_async(priv, &udev->dev, cardp->model, + fw_table, if_usb_prog_firmware); + if (r) + goto err_get_fw; return 0; -err_start_card: +err_get_fw: lbs_remove_card(priv); -err_prog_firmware: +err_add_card: if_usb_reset_device(cardp); dealloc: if_usb_free(cardp); error: - return -ENOMEM; + return r; } /** @@ -389,9 +306,6 @@ static void if_usb_disconnect(struct usb_interface *intf) lbs_deb_enter(LBS_DEB_MAIN); - device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2); - device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw); - cardp->surprise_removed = 1; if (priv) { @@ -912,121 +826,22 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen) return ret; } - -/** -* if_usb_prog_firmware - programs the firmware subject to cmd -* -* @cardp: the if_usb_card descriptor -* @fwname: firmware or boot2 image file name -* @cmd: either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW, -* or BOOT_CMD_UPDATE_BOOT2. -* returns: 0 or error code -*/ -static int if_usb_prog_firmware(struct if_usb_card *cardp, - const char *fwname, int cmd) -{ - struct lbs_private *priv = cardp->priv; - unsigned long flags, caps; - int ret; - - caps = priv->fwcapinfo; - if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) || - ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE))) - return -EOPNOTSUPP; - - /* Ensure main thread is idle. */ - spin_lock_irqsave(&priv->driver_lock, flags); - while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) { - spin_unlock_irqrestore(&priv->driver_lock, flags); - if (wait_event_interruptible(priv->waitq, - (priv->cur_cmd == NULL && - priv->dnld_sent == DNLD_RES_RECEIVED))) { - return -ERESTARTSYS; - } - spin_lock_irqsave(&priv->driver_lock, flags); - } - priv->dnld_sent = DNLD_BOOTCMD_SENT; - spin_unlock_irqrestore(&priv->driver_lock, flags); - - ret = __if_usb_prog_firmware(cardp, fwname, cmd); - - spin_lock_irqsave(&priv->driver_lock, flags); - priv->dnld_sent = DNLD_RES_RECEIVED; - spin_unlock_irqrestore(&priv->driver_lock, flags); - - wake_up(&priv->waitq); - - return ret; -} - -/* table of firmware file names */ -static const struct { - u32 model; - const char *fwname; -} fw_table[] = { - { MODEL_8388, "libertas/usb8388_v9.bin" }, - { MODEL_8388, "libertas/usb8388_v5.bin" }, - { MODEL_8388, "libertas/usb8388.bin" }, - { MODEL_8388, "usb8388.bin" }, - { MODEL_8682, "libertas/usb8682.bin" } -}; - -#ifdef CONFIG_OLPC - -static int try_olpc_fw(struct if_usb_card *cardp) -{ - int retval = -ENOENT; - - /* try the OLPC firmware first; fall back to fw_table list */ - if (machine_is_olpc() && cardp->model == MODEL_8388) - retval = request_firmware(&cardp->fw, - "libertas/usb8388_olpc.bin", &cardp->udev->dev); - return retval; -} - -#else -static int try_olpc_fw(struct if_usb_card *cardp) { return -ENOENT; } -#endif /* !CONFIG_OLPC */ - -static int get_fw(struct if_usb_card *cardp, const char *fwname) -{ - int i; - - /* Try user-specified firmware first */ - if (fwname) - return request_firmware(&cardp->fw, fwname, &cardp->udev->dev); - - /* Handle OLPC firmware */ - if (try_olpc_fw(cardp) == 0) - return 0; - - /* Otherwise search for firmware to use */ - for (i = 0; i < ARRAY_SIZE(fw_table); i++) { - if (fw_table[i].model != cardp->model) - continue; - if (request_firmware(&cardp->fw, fw_table[i].fwname, - &cardp->udev->dev) == 0) - return 0; - } - - return -ENOENT; -} - -static int __if_usb_prog_firmware(struct if_usb_card *cardp, - const char *fwname, int cmd) +static void if_usb_prog_firmware(struct lbs_private *priv, int ret, + const struct firmware *fw, + const struct firmware *unused) { + struct if_usb_card *cardp = priv->card; int i = 0; static int reset_count = 10; - int ret = 0; lbs_deb_enter(LBS_DEB_USB); - ret = get_fw(cardp, fwname); if (ret) { pr_err("failed to find firmware (%d)\n", ret); goto done; } + cardp->fw = fw; if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) { ret = -EINVAL; goto release_fw; @@ -1053,7 +868,7 @@ restart: do { int j = 0; i++; - if_usb_issue_boot_command(cardp, cmd); + if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB); /* wait for command response */ do { j++; @@ -1109,13 +924,27 @@ restart: goto release_fw; } + cardp->priv->fw_ready = 1; + if_usb_submit_rx_urb(cardp); + + if (lbs_start_card(priv)) + goto release_fw; + + if_usb_setup_firmware(priv); + + /* + * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware. + */ + priv->wol_criteria = EHS_REMOVE_WAKEUP; + if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL)) + priv->ehs_remove_supported = false; + release_fw: release_firmware(cardp->fw); cardp->fw = NULL; done: - lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); - return ret; + lbs_deb_leave(LBS_DEB_USB); } @@ -1128,8 +957,10 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) lbs_deb_enter(LBS_DEB_USB); - if (priv->psstate != PS_STATE_FULL_POWER) - return -1; + if (priv->psstate != PS_STATE_FULL_POWER) { + ret = -1; + goto out; + } #ifdef CONFIG_OLPC if (machine_is_olpc()) { diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 957681dede1..e96ee0aa843 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -878,6 +878,7 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->is_host_sleep_configured = 0; priv->is_host_sleep_activated = 0; init_waitqueue_head(&priv->host_sleep_q); + init_waitqueue_head(&priv->fw_waitq); mutex_init(&priv->lock); setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, @@ -1033,7 +1034,11 @@ void lbs_remove_card(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); lbs_remove_mesh(priv); - lbs_scan_deinit(priv); + + if (priv->wiphy_registered) + lbs_scan_deinit(priv); + + lbs_wait_for_firmware_load(priv); /* worker thread destruction blocks on the in-flight command which * should have been cleared already in lbs_stop_card(). @@ -1128,6 +1133,11 @@ void lbs_stop_card(struct lbs_private *priv) goto out; dev = priv->dev; + /* If the netdev isn't registered, it means that lbs_start_card() was + * never called so we have nothing to do here. */ + if (dev->reg_state != NETREG_REGISTERED) + goto out; + netif_stop_queue(dev); netif_carrier_off(dev); @@ -1177,111 +1187,6 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx) } EXPORT_SYMBOL_GPL(lbs_notify_command_response); -/** - * lbs_get_firmware - Retrieves two-stage firmware - * - * @dev: A pointer to &device structure - * @user_helper: User-defined helper firmware file - * @user_mainfw: User-defined main firmware file - * @card_model: Bus-specific card model ID used to filter firmware table - * elements - * @fw_table: Table of firmware file names and device model numbers - * terminated by an entry with a NULL helper name - * @helper: On success, the helper firmware; caller must free - * @mainfw: On success, the main firmware; caller must free - * - * returns: 0 on success, non-zero on failure - */ -int lbs_get_firmware(struct device *dev, const char *user_helper, - const char *user_mainfw, u32 card_model, - const struct lbs_fw_table *fw_table, - const struct firmware **helper, - const struct firmware **mainfw) -{ - const struct lbs_fw_table *iter; - int ret; - - BUG_ON(helper == NULL); - BUG_ON(mainfw == NULL); - - /* Try user-specified firmware first */ - if (user_helper) { - ret = request_firmware(helper, user_helper, dev); - if (ret) { - dev_err(dev, "couldn't find helper firmware %s\n", - user_helper); - goto fail; - } - } - if (user_mainfw) { - ret = request_firmware(mainfw, user_mainfw, dev); - if (ret) { - dev_err(dev, "couldn't find main firmware %s\n", - user_mainfw); - goto fail; - } - } - - if (*helper && *mainfw) - return 0; - - /* Otherwise search for firmware to use. If neither the helper or - * the main firmware were specified by the user, then we need to - * make sure that found helper & main are from the same entry in - * fw_table. - */ - iter = fw_table; - while (iter && iter->helper) { - if (iter->model != card_model) - goto next; - - if (*helper == NULL) { - ret = request_firmware(helper, iter->helper, dev); - if (ret) - goto next; - - /* If the device has one-stage firmware (ie cf8305) and - * we've got it then we don't need to bother with the - * main firmware. - */ - if (iter->fwname == NULL) - return 0; - } - - if (*mainfw == NULL) { - ret = request_firmware(mainfw, iter->fwname, dev); - if (ret && !user_helper) { - /* Clear the helper if it wasn't user-specified - * and the main firmware load failed, to ensure - * we don't have mismatched firmware pairs. - */ - release_firmware(*helper); - *helper = NULL; - } - } - - if (*helper && *mainfw) - return 0; - - next: - iter++; - } - - fail: - /* Failed */ - if (*helper) { - release_firmware(*helper); - *helper = NULL; - } - if (*mainfw) { - release_firmware(*mainfw); - *mainfw = NULL; - } - - return -ENOENT; -} -EXPORT_SYMBOL_GPL(lbs_get_firmware); - static int __init lbs_init_module(void) { lbs_deb_enter(LBS_DEB_MAIN); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index b7ce6a6e355..03c0c6b1372 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -582,11 +582,13 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; } - NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER, - sizeof(struct mac_address), data->addresses[1].addr); + if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, + sizeof(struct mac_address), data->addresses[1].addr)) + goto nla_put_failure; /* We get the skb->data */ - NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data); + if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) + goto nla_put_failure; /* We get the flags for this transmission, and we translate them to wmediumd flags */ @@ -597,7 +599,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, if (info->flags & IEEE80211_TX_CTL_NO_ACK) hwsim_flags |= HWSIM_TX_CTL_NO_ACK; - NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags); + if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) + goto nla_put_failure; /* We get the tx control (rate and retries) info*/ @@ -606,12 +609,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, tx_attempts[i].count = info->status.rates[i].count; } - NLA_PUT(skb, HWSIM_ATTR_TX_INFO, - sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, - tx_attempts); + if (nla_put(skb, HWSIM_ATTR_TX_INFO, + sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, + tx_attempts)) + goto nla_put_failure; /* We create a cookie to identify this skb */ - NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb); + if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb)) + goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_unicast(&init_net, skb, dst_pid); @@ -632,6 +637,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; + struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); if (data->idle) { wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); @@ -666,6 +672,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, spin_lock(&hwsim_radio_lock); list_for_each_entry(data2, &hwsim_radios, list) { struct sk_buff *nskb; + struct ieee80211_mgmt *mgmt; if (data == data2) continue; @@ -683,8 +690,18 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, if (mac80211_hwsim_addr_match(data2, hdr->addr1)) ack = true; + + /* set bcn timestamp relative to receiver mactime */ rx_status.mactime = - le64_to_cpu(__mac80211_hwsim_get_tsf(data2)); + le64_to_cpu(__mac80211_hwsim_get_tsf(data2)); + mgmt = (struct ieee80211_mgmt *) nskb->data; + if (ieee80211_is_beacon(mgmt->frame_control) || + ieee80211_is_probe_resp(mgmt->frame_control)) + mgmt->u.beacon.timestamp = cpu_to_le64( + rx_status.mactime + + (data->tsf_offset - data2->tsf_offset) + + 24 * 8 * 10 / txrate->bitrate); + memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(data2->hw, nskb); } @@ -698,12 +715,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) bool ack; struct ieee80211_tx_info *txi; u32 _pid; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data; - struct mac80211_hwsim_data *data = hw->priv; - - if (ieee80211_is_beacon(mgmt->frame_control) || - ieee80211_is_probe_resp(mgmt->frame_control)) - mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data); mac80211_hwsim_monitor_rx(hw, skb); @@ -800,11 +811,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_vif *vif) { struct ieee80211_hw *hw = arg; - struct mac80211_hwsim_data *data = hw->priv; struct sk_buff *skb; struct ieee80211_tx_info *info; u32 _pid; - struct ieee80211_mgmt *mgmt; hwsim_check_magic(vif); @@ -818,9 +827,6 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, return; info = IEEE80211_SKB_CB(skb); - mgmt = (struct ieee80211_mgmt *) skb->data; - mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data); - mac80211_hwsim_monitor_rx(hw, skb); /* wmediumd mode check */ @@ -1108,7 +1114,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, nla_total_size(sizeof(u32))); if (!skb) return -ENOMEM; - NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps); + if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) + goto nla_put_failure; return cfg80211_testmode_reply(skb); default: return -EOPNOTSUPP; @@ -1444,7 +1451,7 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group, hwsim_fops_group_read, hwsim_fops_group_write, "%llx\n"); -struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr( +static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr( struct mac_address *addr) { struct mac80211_hwsim_data *data; @@ -1789,9 +1796,11 @@ static int __init init_mac80211_hwsim(void) IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_STATIC_SMPS | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_AMPDU_AGGREGATION; + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_WANT_MONITOR_VIF; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index a5e182b5e94..fe8ebfebcc0 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -350,25 +350,26 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, ret_len += sizeof(struct mwifiex_ie_types_htcap); } - if (bss_desc->bcn_ht_info) { + if (bss_desc->bcn_ht_oper) { if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { ht_info = (struct mwifiex_ie_types_htinfo *) *buffer; memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); ht_info->header.type = - cpu_to_le16(WLAN_EID_HT_INFORMATION); + cpu_to_le16(WLAN_EID_HT_OPERATION); ht_info->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_info)); + cpu_to_le16( + sizeof(struct ieee80211_ht_operation)); memcpy((u8 *) ht_info + sizeof(struct mwifiex_ie_types_header), - (u8 *) bss_desc->bcn_ht_info + + (u8 *) bss_desc->bcn_ht_oper + sizeof(struct ieee_types_header), le16_to_cpu(ht_info->header.len)); if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) - ht_info->ht_info.ht_param &= + ht_info->ht_oper.ht_param &= ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY | IEEE80211_HT_PARAM_CHA_SEC_OFFSET); @@ -385,16 +386,16 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_chan_list_param_set) - sizeof(struct mwifiex_ie_types_header)); chan_list->chan_scan_param[0].chan_number = - bss_desc->bcn_ht_info->control_chan; + bss_desc->bcn_ht_oper->primary_chan; chan_list->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && - bss_desc->bcn_ht_info->ht_param & + bss_desc->bcn_ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. radio_type, - (bss_desc->bcn_ht_info->ht_param & + (bss_desc->bcn_ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set); diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 9eefb2a0ce9..ab84eb94374 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -233,21 +233,27 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_push(skb_aggr, headroom); - /* - * Padding per MSDU will affect the length of next - * packet and hence the exact length of next packet - * is uncertain here. - * - * Also, aggregation of transmission buffer, while - * downloading the data to the card, wont gain much - * on the AMSDU packets as the AMSDU packets utilizes - * the transmission buffer space to the maximum - * (adapter->tx_buf_size). - */ - tx_param.next_pkt_len = 0; - - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, - skb_aggr, &tx_param); + if (adapter->iface_type == MWIFIEX_USB) { + adapter->data_sent = true; + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, + skb_aggr, NULL); + } else { + /* + * Padding per MSDU will affect the length of next + * packet and hence the exact length of next packet + * is uncertain here. + * + * Also, aggregation of transmission buffer, while + * downloading the data to the card, wont gain much + * on the AMSDU packets as the AMSDU packets utilizes + * the transmission buffer space to the maximum + * (adapter->tx_buf_size). + */ + tx_param.next_pkt_len = 0; + + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, + skb_aggr, &tx_param); + } switch (ret) { case -EBUSY: spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 2a078cea830..8e384fae3e6 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -10,12 +10,12 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8787/SD8797" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797" depends on MWIFIEX && MMC select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8787/8797 chipsets with SDIO interface. + 8786/8787/8797 chipsets with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. @@ -30,3 +30,14 @@ config MWIFIEX_PCIE If you choose to build it as a module, it will be called mwifiex_pcie. + +config MWIFIEX_USB + tristate "Marvell WiFi-Ex Driver for USB8797" + depends on MWIFIEX && USB + select FW_LOADER + ---help--- + This adds support for wireless adapters based on Marvell + Avastar 88W8797 chipset with USB interface. + + If you choose to build it as a module, it will be called + mwifiex_usb. diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile index b0257ad1bbe..5c1a46bf1e1 100644 --- a/drivers/net/wireless/mwifiex/Makefile +++ b/drivers/net/wireless/mwifiex/Makefile @@ -42,3 +42,6 @@ obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o mwifiex_pcie-y += pcie.o obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o + +mwifiex_usb-y += usb.o +obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 65050384c42..c78ea873a63 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -516,25 +516,23 @@ static int mwifiex_dump_station_info(struct mwifiex_private *priv, struct station_info *sinfo) { - struct mwifiex_ds_get_signal signal; struct mwifiex_rate_cfg rate; - int ret = 0; sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | - STATION_INFO_RX_PACKETS | - STATION_INFO_TX_PACKETS - | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE; + STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS | + STATION_INFO_TX_BITRATE | + STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; /* Get signal information from the firmware */ - memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal)); - if (mwifiex_get_signal_info(priv, &signal)) { - dev_err(priv->adapter->dev, "getting signal information\n"); - ret = -EFAULT; + if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO, + HostCmd_ACT_GEN_GET, 0, NULL)) { + dev_err(priv->adapter->dev, "failed to get signal information\n"); + return -EFAULT; } if (mwifiex_drv_get_data_rate(priv, &rate)) { dev_err(priv->adapter->dev, "getting data rate\n"); - ret = -EFAULT; + return -EFAULT; } /* Get DTIM period information from firmware */ @@ -557,11 +555,12 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } + sinfo->signal_avg = priv->bcn_rssi_avg; sinfo->rx_bytes = priv->stats.rx_bytes; sinfo->tx_bytes = priv->stats.tx_bytes; sinfo->rx_packets = priv->stats.rx_packets; sinfo->tx_packets = priv->stats.tx_packets; - sinfo->signal = priv->qual_level; + sinfo->signal = priv->bcn_rssi_avg; /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */ sinfo->txrate.legacy = rate.rate * 5; @@ -581,7 +580,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, priv->curr_bss_params.bss_descriptor.beacon_period; } - return ret; + return 0; } /* @@ -604,6 +603,23 @@ mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, return mwifiex_dump_station_info(priv, sinfo); } +/* + * CFG802.11 operation handler to dump station information. + */ +static int +mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + if (!priv->media_connected || idx) + return -ENOENT; + + memcpy(mac, priv->cfg_bssid, ETH_ALEN); + + return mwifiex_dump_station_info(priv, sinfo); +} + /* Supported rates to be advertised to the cfg80211 */ static struct ieee80211_rate mwifiex_rates[] = { @@ -750,6 +766,45 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, } /* + * CFG802.11 operation handler for connection quality monitoring. + * + * This function subscribes/unsubscribes HIGH_RSSI and LOW_RSSI + * events to FW. + */ +static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy, + struct net_device *dev, + s32 rssi_thold, u32 rssi_hyst) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_ds_misc_subsc_evt subsc_evt; + + priv->cqm_rssi_thold = rssi_thold; + priv->cqm_rssi_hyst = rssi_hyst; + + memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt)); + subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH; + + /* Subscribe/unsubscribe low and high rssi events */ + if (rssi_thold && rssi_hyst) { + subsc_evt.action = HostCmd_ACT_BITWISE_SET; + subsc_evt.bcn_l_rssi_cfg.abs_value = abs(rssi_thold); + subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold); + subsc_evt.bcn_l_rssi_cfg.evt_freq = 1; + subsc_evt.bcn_h_rssi_cfg.evt_freq = 1; + return mwifiex_send_cmd_sync(priv, + HostCmd_CMD_802_11_SUBSCRIBE_EVENT, + 0, 0, &subsc_evt); + } else { + subsc_evt.action = HostCmd_ACT_BITWISE_CLR; + return mwifiex_send_cmd_sync(priv, + HostCmd_CMD_802_11_SUBSCRIBE_EVENT, + 0, 0, &subsc_evt); + } + + return 0; +} + +/* * CFG802.11 operation handler for disconnection request. * * This function does not work when there is already a disconnection @@ -1107,6 +1162,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev, priv->user_scan_cfg->num_ssids = request->n_ssids; priv->user_scan_cfg->ssid_list = request->ssids; + if (request->ie && request->ie_len) { + for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { + if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) + continue; + priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN; + memcpy(&priv->vs_ie[i].ie, request->ie, + request->ie_len); + break; + } + } + for (i = 0; i < request->n_channels; i++) { chan = request->channels[i]; priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; @@ -1124,6 +1190,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev, if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) return -EFAULT; + if (request->ie && request->ie_len) { + for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { + if (priv->vs_ie[i].mask == MWIFIEX_VSIE_MASK_SCAN) { + priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_CLEAR; + memset(&priv->vs_ie[i].ie, 0, + MWIFIEX_MAX_VSIE_LEN); + } + } + } return 0; } @@ -1340,6 +1415,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .connect = mwifiex_cfg80211_connect, .disconnect = mwifiex_cfg80211_disconnect, .get_station = mwifiex_cfg80211_get_station, + .dump_station = mwifiex_cfg80211_dump_station, .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params, .set_channel = mwifiex_cfg80211_set_channel, .join_ibss = mwifiex_cfg80211_join_ibss, @@ -1350,6 +1426,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, .set_tx_power = mwifiex_cfg80211_set_tx_power, .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, + .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, }; /* @@ -1365,6 +1442,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) void *wdev_priv; struct wireless_dev *wdev; struct ieee80211_sta_ht_cap *ht_info; + u8 *country_code; wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) { @@ -1381,6 +1459,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) } wdev->iftype = NL80211_IFTYPE_STATION; wdev->wiphy->max_scan_ssids = 10; + wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); @@ -1403,8 +1482,8 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - /* Reserve space for bss band information */ - wdev->wiphy->bss_priv_size = sizeof(u8); + /* Reserve space for mwifiex specific private data for BSS */ + wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); wdev->wiphy->reg_notifier = mwifiex_reg_notifier; @@ -1427,6 +1506,11 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv) "info: successfully registered wiphy device\n"); } + country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); + if (country_code && regulatory_hint(wdev->wiphy, country_code)) + dev_err(priv->adapter->dev, + "%s: regulatory_hint failed\n", __func__); + priv->wdev = wdev; return ret; diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index 2fe1c33765b..560871b0e23 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -71,6 +71,37 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30, static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; +struct region_code_mapping { + u8 code; + u8 region[IEEE80211_COUNTRY_STRING_LEN]; +}; + +static struct region_code_mapping region_code_mapping_t[] = { + { 0x10, "US " }, /* US FCC */ + { 0x20, "CA " }, /* IC Canada */ + { 0x30, "EU " }, /* ETSI */ + { 0x31, "ES " }, /* Spain */ + { 0x32, "FR " }, /* France */ + { 0x40, "JP " }, /* Japan */ + { 0x41, "JP " }, /* Japan */ + { 0x50, "CN " }, /* China */ +}; + +/* This function converts integer code to region string */ +u8 *mwifiex_11d_code_2_region(u8 code) +{ + u8 i; + u8 size = sizeof(region_code_mapping_t)/ + sizeof(struct region_code_mapping); + + /* Look for code in mapping table */ + for (i = 0; i < size; i++) + if (region_code_mapping_t[i].code == code) + return region_code_mapping_t[i].region; + + return NULL; +} + /* * This function maps an index in supported rates table into * the corresponding data rate. diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 07f6e009255..1710beffb93 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -139,6 +139,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, uint16_t cmd_size; struct timeval tstamp; unsigned long flags; + __le32 tmp; if (!adapter || !cmd_node) return -1; @@ -178,15 +179,28 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, le16_to_cpu(host_cmd->seq_num)); - skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); - - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, - cmd_node->cmd_skb, NULL); - - skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); + if (adapter->iface_type == MWIFIEX_USB) { + tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); + skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); + memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN); + adapter->cmd_sent = true; + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_USB_EP_CMD_EVENT, + cmd_node->cmd_skb, NULL); + skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); + if (ret == -EBUSY) + cmd_node->cmd_skb = NULL; + } else { + skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, + cmd_node->cmd_skb, NULL); + skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); + } if (ret == -1) { dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); + if (adapter->iface_type == MWIFIEX_USB) + adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); @@ -232,6 +246,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) adapter->sleep_cfm->data; + struct sk_buff *sleep_cfm_tmp; + __le32 tmp; + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); sleep_cfm_buf->seq_num = @@ -240,10 +257,28 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) priv->bss_type))); adapter->seq_num++; - skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, - adapter->sleep_cfm, NULL); - skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); + if (adapter->iface_type == MWIFIEX_USB) { + sleep_cfm_tmp = + dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm) + + MWIFIEX_TYPE_LEN); + skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm) + + MWIFIEX_TYPE_LEN); + tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); + memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN); + memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN, + adapter->sleep_cfm->data, + sizeof(struct mwifiex_opt_sleep_confirm)); + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_USB_EP_CMD_EVENT, + sleep_cfm_tmp, NULL); + if (ret != -EBUSY) + dev_kfree_skb_any(sleep_cfm_tmp); + } else { + skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, + adapter->sleep_cfm, NULL); + skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); + } if (ret == -1) { dev_err(adapter->dev, "SLEEP_CFM: failed\n"); @@ -343,7 +378,12 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) } if (!cmd_array[i].resp_skb) continue; - dev_kfree_skb_any(cmd_array[i].resp_skb); + + if (adapter->iface_type == MWIFIEX_USB) + adapter->if_ops.cmdrsp_complete(adapter, + cmd_array[i].resp_skb); + else + dev_kfree_skb_any(cmd_array[i].resp_skb); } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { @@ -1083,6 +1123,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) MWIFIEX_BSS_ROLE_ANY), false); } +EXPORT_SYMBOL_GPL(mwifiex_process_hs_config); /* * This function handles the command response of a sleep confirm command. diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 1a845074c52..a870b5885c0 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -212,7 +212,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf, p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); - p += sprintf(p, "region_code = \"%02x\"\n", info.region_code); + p += sprintf(p, "country_code = \"%s\"\n", info.country_code); netdev_for_each_mc_addr(ha, netdev) p += sprintf(p, "multicast_address[%d]=\"%pM\"\n", diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index be5fd1652e5..d04aba4131d 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -53,6 +53,7 @@ #define MWIFIEX_RATE_BITMAP_MCS127 159 #define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024) +#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024) #define MWIFIEX_RTS_MIN_VALUE (0) #define MWIFIEX_RTS_MAX_VALUE (2347) diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index e98fc5af73d..5f6adeb9b95 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -81,6 +81,11 @@ enum KEY_TYPE_ID { #define FIRMWARE_READY_SDIO 0xfedc #define FIRMWARE_READY_PCIE 0xfedcba00 +enum mwifiex_usb_ep { + MWIFIEX_USB_EP_CMD_EVENT = 1, + MWIFIEX_USB_EP_DATA = 2, +}; + enum MWIFIEX_802_11_PRIVACY_FILTER { MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL, MWIFIEX_802_11_PRIV_FILTER_8021X_WEP @@ -92,16 +97,19 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) #define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1) #define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2) +#define TLV_TYPE_RSSI_LOW (PROPRIETARY_TLV_BASE_ID + 4) #define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10) #define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16) #define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) #define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) +#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) #define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) #define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82) #define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83) #define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84) #define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) +#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105) #define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) #define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114) @@ -194,6 +202,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e #define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c #define HostCmd_CMD_WMM_GET_STATUS 0x0071 +#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 #define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 #define HostCmd_CMD_VERSION_EXT 0x0097 @@ -228,6 +237,8 @@ enum ENH_PS_MODES { #define HostCmd_RET_BIT 0x8000 #define HostCmd_ACT_GEN_GET 0x0000 #define HostCmd_ACT_GEN_SET 0x0001 +#define HostCmd_ACT_BITWISE_SET 0x0002 +#define HostCmd_ACT_BITWISE_CLR 0x0003 #define HostCmd_RESULT_OK 0x0000 #define HostCmd_ACT_MAC_RX_ON 0x0001 @@ -813,7 +824,7 @@ struct host_cmd_ds_txpwr_cfg { struct mwifiex_bcn_param { u8 bssid[ETH_ALEN]; u8 rssi; - __le32 timestamp[2]; + __le64 timestamp; __le16 beacon_period; __le16 cap_info_bitmap; } __packed; @@ -982,8 +993,7 @@ struct mwifiex_ie_types_wmm_queue_status { struct ieee_types_vendor_header { u8 element_id; u8 len; - u8 oui[3]; - u8 oui_type; + u8 oui[4]; /* 0~2: oui, 3: oui_type */ u8 oui_subtype; u8 version; } __packed; @@ -1007,7 +1017,7 @@ struct ieee_types_wmm_parameter { struct ieee_types_vendor_header vend_hdr; u8 qos_info_bitmap; u8 reserved; - struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES]; + struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; } __packed; struct ieee_types_wmm_info { @@ -1028,7 +1038,7 @@ struct ieee_types_wmm_info { struct host_cmd_ds_wmm_get_status { u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) * - IEEE80211_MAX_QUEUES]; + IEEE80211_NUM_ACS]; u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2]; } __packed; @@ -1045,7 +1055,7 @@ struct mwifiex_ie_types_htcap { struct mwifiex_ie_types_htinfo { struct mwifiex_ie_types_header header; - struct ieee80211_ht_info ht_info; + struct ieee80211_ht_operation ht_oper; } __packed; struct mwifiex_ie_types_2040bssco { @@ -1146,6 +1156,17 @@ struct host_cmd_ds_pcie_details { u32 sleep_cookie_addr_hi; } __packed; +struct mwifiex_ie_types_rssi_threshold { + struct mwifiex_ie_types_header header; + u8 abs_value; + u8 evt_freq; +} __packed; + +struct host_cmd_ds_802_11_subsc_evt { + __le16 action; + __le16 events; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -1195,6 +1216,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_set_bss_mode bss_mode; struct host_cmd_ds_pcie_details pcie_host_spec; struct host_cmd_ds_802_11_eeprom_access eeprom; + struct host_cmd_ds_802_11_subsc_evt subsc_evt; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 54bb4839b57..d440c3eb640 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -131,6 +131,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv) priv->wmm_qosinfo = 0; priv->curr_bcn_buf = NULL; priv->curr_bcn_size = 0; + priv->wps_ie = NULL; + priv->wps_ie_len = 0; priv->scan_block = false; @@ -186,10 +188,10 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->cmd_sent = false; - if (adapter->iface_type == MWIFIEX_PCIE) - adapter->data_sent = false; - else + if (adapter->iface_type == MWIFIEX_SDIO) adapter->data_sent = true; + else + adapter->data_sent = false; adapter->cmd_resp_received = false; adapter->event_received = false; @@ -377,7 +379,8 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: free scan table\n"); - adapter->if_ops.cleanup_if(adapter); + if (adapter->if_ops.cleanup_if) + adapter->if_ops.cleanup_if(adapter); if (adapter->sleep_cfm) dev_kfree_skb_any(adapter->sleep_cfm); @@ -417,6 +420,8 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->cmd_pending_q_lock); spin_lock_init(&adapter->scan_pending_q_lock); + skb_queue_head_init(&adapter->usb_rx_data_q); + for (i = 0; i < adapter->priv_num; ++i) { INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); adapter->bss_prio_tbl[i].bss_prio_cur = NULL; @@ -572,6 +577,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) struct mwifiex_private *priv; s32 i; unsigned long flags; + struct sk_buff *skb; /* mwifiex already shutdown */ if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) @@ -599,6 +605,18 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) spin_lock_irqsave(&adapter->mwifiex_lock, flags); + if (adapter->if_ops.data_complete) { + while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) { + struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); + + priv = adapter->priv[rx_info->bss_num]; + if (priv) + priv->stats.rx_dropped++; + + adapter->if_ops.data_complete(adapter, skb); + } + } + /* Free adapter structure */ mwifiex_free_adapter(adapter); @@ -628,24 +646,28 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, int ret; u32 poll_num = 1; - adapter->winner = 0; + if (adapter->if_ops.check_fw_status) { + adapter->winner = 0; - /* Check if firmware is already running */ - ret = adapter->if_ops.check_fw_status(adapter, poll_num); - if (!ret) { - dev_notice(adapter->dev, - "WLAN FW already running! Skip FW download\n"); - goto done; - } - poll_num = MAX_FIRMWARE_POLL_TRIES; - - /* Check if we are the winner for downloading FW */ - if (!adapter->winner) { - dev_notice(adapter->dev, - "Other intf already running! Skip FW download\n"); - poll_num = MAX_MULTI_INTERFACE_POLL_TRIES; - goto poll_fw; + /* check if firmware is already running */ + ret = adapter->if_ops.check_fw_status(adapter, poll_num); + if (!ret) { + dev_notice(adapter->dev, + "WLAN FW already running! Skip FW dnld\n"); + goto done; + } + + poll_num = MAX_FIRMWARE_POLL_TRIES; + + /* check if we are the winner for downloading FW */ + if (!adapter->winner) { + dev_notice(adapter->dev, + "FW already running! Skip FW dnld\n"); + poll_num = MAX_MULTI_INTERFACE_POLL_TRIES; + goto poll_fw; + } } + if (pmfw) { /* Download firmware with helper */ ret = adapter->if_ops.prog_fw(adapter, pmfw); @@ -664,6 +686,8 @@ poll_fw: } done: /* re-enable host interrupt for mwifiex after fw dnld is successful */ - adapter->if_ops.enable_int(adapter); + if (adapter->if_ops.enable_int) + adapter->if_ops.enable_int(adapter); + return ret; } diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 7ca4e8234f3..f0f95524e96 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -85,34 +85,6 @@ struct mwifiex_ds_get_stats { u32 wep_icv_error[4]; }; -#define BCN_RSSI_AVG_MASK 0x00000002 -#define BCN_NF_AVG_MASK 0x00000200 -#define ALL_RSSI_INFO_MASK 0x00000fff - -struct mwifiex_ds_get_signal { - /* - * Bit0: Last Beacon RSSI, Bit1: Average Beacon RSSI, - * Bit2: Last Data RSSI, Bit3: Average Data RSSI, - * Bit4: Last Beacon SNR, Bit5: Average Beacon SNR, - * Bit6: Last Data SNR, Bit7: Average Data SNR, - * Bit8: Last Beacon NF, Bit9: Average Beacon NF, - * Bit10: Last Data NF, Bit11: Average Data NF - */ - u16 selector; - s16 bcn_rssi_last; - s16 bcn_rssi_avg; - s16 data_rssi_last; - s16 data_rssi_avg; - s16 bcn_snr_last; - s16 bcn_snr_avg; - s16 data_snr_last; - s16 data_snr_avg; - s16 bcn_nf_last; - s16 bcn_nf_avg; - s16 data_nf_last; - s16 data_nf_avg; -}; - #define MWIFIEX_MAX_VER_STR_LEN 128 struct mwifiex_ver_ext { @@ -124,7 +96,7 @@ struct mwifiex_bss_info { u32 bss_mode; struct cfg80211_ssid ssid; u32 bss_chan; - u32 region_code; + u8 country_code[3]; u32 media_connected; u32 max_power_level; u32 min_power_level; @@ -308,8 +280,30 @@ struct mwifiex_ds_misc_cmd { u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER]; }; +#define BITMASK_BCN_RSSI_LOW BIT(0) +#define BITMASK_BCN_RSSI_HIGH BIT(4) + +enum subsc_evt_rssi_state { + EVENT_HANDLED, + RSSI_LOW_RECVD, + RSSI_HIGH_RECVD +}; + +struct subsc_evt_cfg { + u8 abs_value; + u8 evt_freq; +}; + +struct mwifiex_ds_misc_subsc_evt { + u16 action; + u16 events; + struct subsc_evt_cfg bcn_l_rssi_cfg; + struct subsc_evt_cfg bcn_h_rssi_cfg; +}; + #define MWIFIEX_MAX_VSIE_LEN (256) #define MWIFIEX_MAX_VSIE_NUM (8) +#define MWIFIEX_VSIE_MASK_CLEAR 0x00 #define MWIFIEX_VSIE_MASK_SCAN 0x01 #define MWIFIEX_VSIE_MASK_ASSOC 0x02 #define MWIFIEX_VSIE_MASK_ADHOC 0x04 diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 8f9382b9c3c..8a390982463 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -118,15 +118,15 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer, *buffer += sizeof(tsf_tlv.header); /* TSF at the time when beacon/probe_response was received */ - tsf_val = cpu_to_le64(bss_desc->network_tsf); + tsf_val = cpu_to_le64(bss_desc->fw_tsf); memcpy(*buffer, &tsf_val, sizeof(tsf_val)); *buffer += sizeof(tsf_val); - memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val)); + tsf_val = cpu_to_le64(bss_desc->timestamp); dev_dbg(priv->adapter->dev, "info: %s: TSF offset calc: %016llx - %016llx\n", - __func__, tsf_val, bss_desc->network_tsf); + __func__, bss_desc->timestamp, bss_desc->fw_tsf); memcpy(*buffer, &tsf_val, sizeof(tsf_val)); *buffer += sizeof(tsf_val); @@ -225,6 +225,48 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv, } /* + * This function appends a WPS IE. It is called from the network join command + * preparation routine. + * + * If the IE buffer has been setup by the application, this routine appends + * the buffer as a WPS TLV type to the request. + */ +static int +mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer) +{ + int retLen = 0; + struct mwifiex_ie_types_header ie_header; + + if (!buffer || !*buffer) + return 0; + + /* + * If there is a wps ie buffer setup, append it to the return + * parameter buffer pointer. + */ + if (priv->wps_ie_len) { + dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n", + priv->wps_ie_len, *buffer); + + /* Wrap the generic IE buffer with a pass through TLV type */ + ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE); + ie_header.len = cpu_to_le16(priv->wps_ie_len); + memcpy(*buffer, &ie_header, sizeof(ie_header)); + *buffer += sizeof(ie_header); + retLen += sizeof(ie_header); + + memcpy(*buffer, priv->wps_ie, priv->wps_ie_len); + *buffer += priv->wps_ie_len; + retLen += priv->wps_ie_len; + + } + + kfree(priv->wps_ie); + priv->wps_ie_len = 0; + return retLen; +} + +/* * This function appends a WAPI IE. * * This function is called from the network join command preparation routine. @@ -480,6 +522,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, if (priv->sec_info.wapi_enabled && priv->wapi_ie_len) mwifiex_cmd_append_wapi_ie(priv, &pos); + if (priv->wps.session_enable && priv->wps_ie_len) + mwifiex_cmd_append_wps_ie(priv, &pos); mwifiex_cmd_append_generic_ie(priv, &pos); @@ -932,20 +976,20 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, /* Fill HT INFORMATION */ ht_info = (struct mwifiex_ie_types_htinfo *) pos; memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); - ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION); + ht_info->header.type = cpu_to_le16(WLAN_EID_HT_OPERATION); ht_info->header.len = - cpu_to_le16(sizeof(struct ieee80211_ht_info)); + cpu_to_le16(sizeof(struct ieee80211_ht_operation)); - ht_info->ht_info.control_chan = + ht_info->ht_oper.primary_chan = (u8) priv->curr_bss_params.bss_descriptor.channel; if (adapter->sec_chan_offset) { - ht_info->ht_info.ht_param = adapter->sec_chan_offset; - ht_info->ht_info.ht_param |= + ht_info->ht_oper.ht_param = adapter->sec_chan_offset; + ht_info->ht_oper.ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; } - ht_info->ht_info.operation_mode = + ht_info->ht_oper.operation_mode = cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - ht_info->ht_info.basic_set[0] = 0xff; + ht_info->ht_oper.basic_set[0] = 0xff; pos += sizeof(struct mwifiex_ie_types_htinfo); cmd_append_size += sizeof(struct mwifiex_ie_types_htinfo); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 9d1b3ca6334..be0f0e583f7 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -58,8 +58,9 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); /* card specific initialization has been deferred until now .. */ - if (adapter->if_ops.init_if(adapter)) - goto error; + if (adapter->if_ops.init_if) + if (adapter->if_ops.init_if(adapter)) + goto error; adapter->priv_num = 0; @@ -140,6 +141,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) { int ret = 0; unsigned long flags; + struct sk_buff *skb; spin_lock_irqsave(&adapter->main_proc_lock, flags); @@ -161,7 +163,8 @@ process_start: if (adapter->int_status) { if (adapter->hs_activated) mwifiex_process_hs_config(adapter); - adapter->if_ops.process_int_status(adapter); + if (adapter->if_ops.process_int_status) + adapter->if_ops.process_int_status(adapter); } /* Need to wake up the card ? */ @@ -174,6 +177,7 @@ process_start: adapter->if_ops.wakeup(adapter); continue; } + if (IS_CARD_RX_RCVD(adapter)) { adapter->pm_wakeup_fw_try = false; if (adapter->ps_state == PS_STATE_SLEEP) @@ -194,6 +198,11 @@ process_start: } } + /* Check Rx data for USB */ + if (adapter->iface_type == MWIFIEX_USB) + while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) + mwifiex_handle_rx_packet(adapter, skb); + /* Check for Cmd Resp */ if (adapter->cmd_resp_received) { adapter->cmd_resp_received = false; @@ -292,33 +301,35 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter) } /* - * This function initializes the hardware and firmware. + * This function gets firmware and initializes it. * * The main initialization steps followed are - * - Download the correct firmware to card - * - Allocate and initialize the adapter structure - * - Initialize the private structures * - Issue the init commands to firmware */ -static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) +static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { - int ret, err; + int ret; + char fmt[64]; + struct mwifiex_private *priv; + struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; - memset(&fw, 0, sizeof(struct mwifiex_fw_image)); - - err = request_firmware(&adapter->firmware, adapter->fw_name, - adapter->dev); - if (err < 0) { - dev_err(adapter->dev, "request_firmware() returned" - " error code %#x\n", err); - ret = -1; + if (!firmware) { + dev_err(adapter->dev, + "Failed to get firmware %s\n", adapter->fw_name); goto done; } + + memset(&fw, 0, sizeof(struct mwifiex_fw_image)); + adapter->firmware = firmware; fw.fw_buf = (u8 *) adapter->firmware->data; fw.fw_len = adapter->firmware->size; - ret = mwifiex_dnld_fw(adapter, &fw); + if (adapter->if_ops.dnld_fw) + ret = adapter->if_ops.dnld_fw(adapter, &fw); + else + ret = mwifiex_dnld_fw(adapter, &fw); if (ret == -1) goto done; @@ -335,17 +346,54 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) /* Wait for mwifiex_init to complete */ wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); - if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) { - ret = -1; + if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) goto done; + + priv = adapter->priv[0]; + if (mwifiex_register_cfg80211(priv) != 0) { + dev_err(adapter->dev, "cannot register with cfg80211\n"); + goto err_init_fw; } - ret = 0; + rtnl_lock(); + /* Create station interface by default */ + if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d", + NL80211_IFTYPE_STATION, NULL, NULL)) { + dev_err(adapter->dev, "cannot create default STA interface\n"); + goto err_add_intf; + } + rtnl_unlock(); + + mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); + dev_notice(adapter->dev, "driver_version = %s\n", fmt); + goto done; + +err_add_intf: + mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev); + rtnl_unlock(); +err_init_fw: + pr_debug("info: %s: unregister device\n", __func__); + adapter->if_ops.unregister_dev(adapter); done: - if (adapter->firmware) - release_firmware(adapter->firmware); - if (ret) - ret = -1; + release_firmware(adapter->firmware); + complete(&adapter->fw_load); + return; +} + +/* + * This function initializes the hardware and gets firmware. + */ +static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) +{ + int ret; + + init_completion(&adapter->fw_load); + ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, + adapter->dev, GFP_KERNEL, adapter, + mwifiex_fw_dpc); + if (ret < 0) + dev_err(adapter->dev, + "request_firmware_nowait() returned error %d\n", ret); return ret; } @@ -650,8 +698,6 @@ mwifiex_add_card(void *card, struct semaphore *sem, struct mwifiex_if_ops *if_ops, u8 iface_type) { struct mwifiex_adapter *adapter; - char fmt[64]; - struct mwifiex_private *priv; if (down_interruptible(sem)) goto exit_sem_err; @@ -692,40 +738,13 @@ mwifiex_add_card(void *card, struct semaphore *sem, goto err_init_fw; } - priv = adapter->priv[0]; - - if (mwifiex_register_cfg80211(priv) != 0) { - dev_err(adapter->dev, "cannot register netdevice" - " with cfg80211\n"); - goto err_init_fw; - } - - rtnl_lock(); - /* Create station interface by default */ - if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d", - NL80211_IFTYPE_STATION, NULL, NULL)) { - rtnl_unlock(); - dev_err(adapter->dev, "cannot create default station" - " interface\n"); - goto err_add_intf; - } - - rtnl_unlock(); - up(sem); - - mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); - dev_notice(adapter->dev, "driver_version = %s\n", fmt); - return 0; -err_add_intf: - rtnl_lock(); - mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev); - rtnl_unlock(); err_init_fw: pr_debug("info: %s: unregister device\n", __func__); - adapter->if_ops.unregister_dev(adapter); + if (adapter->if_ops.unregister_dev) + adapter->if_ops.unregister_dev(adapter); err_registerdev: adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); @@ -830,7 +849,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) /* Unregister device */ dev_dbg(adapter->dev, "info: unregister device\n"); - adapter->if_ops.unregister_dev(adapter); + if (adapter->if_ops.unregister_dev) + adapter->if_ops.unregister_dev(adapter); /* Free adapter structure */ dev_dbg(adapter->dev, "info: free adapter\n"); mwifiex_free_adapter(adapter); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 35225e9b108..324ad390cac 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -92,9 +92,16 @@ enum { #define MWIFIEX_OUI_NOT_PRESENT 0 #define MWIFIEX_OUI_PRESENT 1 +/* + * Do not check for data_received for USB, as data_received + * is handled in mwifiex_usb_recv for USB + */ #define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \ - adapter->event_received || \ - adapter->data_received) + adapter->event_received || \ + ((adapter->iface_type != MWIFIEX_USB) && \ + adapter->data_received) || \ + ((adapter->iface_type == MWIFIEX_USB) && \ + !skb_queue_empty(&adapter->usb_rx_data_q))) #define MWIFIEX_TYPE_CMD 1 #define MWIFIEX_TYPE_DATA 0 @@ -110,6 +117,11 @@ enum { #define MWIFIEX_EVENT_HEADER_LEN 4 +#define MWIFIEX_TYPE_LEN 4 +#define MWIFIEX_USB_TYPE_CMD 0xF00DFACE +#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE +#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -162,6 +174,7 @@ enum MWIFIEX_PS_STATE { enum mwifiex_iface_type { MWIFIEX_SDIO, MWIFIEX_PCIE, + MWIFIEX_USB }; struct mwifiex_add_ba_param { @@ -201,10 +214,10 @@ struct mwifiex_wmm_desc { u32 packets_out[MAX_NUM_TID]; /* spin lock to protect ra_list */ spinlock_t ra_list_spinlock; - struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES]; - enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES]; + struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS]; + enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_NUM_ACS]; u32 drv_pkt_delay_max; - u8 queue_priority[IEEE80211_MAX_QUEUES]; + u8 queue_priority[IEEE80211_NUM_ACS]; u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ /* Number of transmit packets queued */ atomic_t tx_pkts_queued; @@ -260,8 +273,8 @@ struct mwifiex_bssdescriptor { * BAND_A(0X04): 'a' band */ u16 bss_band; - u64 network_tsf; - u8 time_stamp[8]; + u64 fw_tsf; + u64 timestamp; union ieee_types_phy_param_set phy_param_set; union ieee_types_ss_param_set ss_param_set; u16 cap_info_bitmap; @@ -269,7 +282,7 @@ struct mwifiex_bssdescriptor { u8 disable_11n; struct ieee80211_ht_cap *bcn_ht_cap; u16 ht_cap_offset; - struct ieee80211_ht_info *bcn_ht_info; + struct ieee80211_ht_operation *bcn_ht_oper; u16 ht_info_offset; u8 *bcn_bss_co_2040; u16 bss_co_2040_offset; @@ -407,6 +420,8 @@ struct mwifiex_private { struct host_cmd_ds_802_11_key_material aes_key; u8 wapi_ie[256]; u8 wapi_ie_len; + u8 *wps_ie; + u8 wps_ie_len; u8 wmm_required; u8 wmm_enabled; u8 wmm_qosinfo; @@ -448,7 +463,6 @@ struct mwifiex_private { struct dentry *dfs_dev_dir; #endif u8 nick_name[16]; - u8 qual_level, qual_noise; u16 current_key_index; struct semaphore async_sem; u8 scan_pending_on_block; @@ -459,6 +473,9 @@ struct mwifiex_private { u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; struct wps wps; u8 scan_block; + s32 cqm_rssi_thold; + u32 cqm_rssi_hyst; + u8 subsc_evt_rssi_state; }; enum mwifiex_ba_status { @@ -518,6 +535,11 @@ struct cmd_ctrl_node { u8 cmd_wait_q_woken; }; +struct mwifiex_bss_priv { + u8 band; + u64 fw_tsf; +}; + struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); @@ -537,6 +559,8 @@ struct mwifiex_if_ops { void (*cleanup_mpa_buf) (struct mwifiex_adapter *); int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); }; struct mwifiex_adapter { @@ -599,6 +623,7 @@ struct mwifiex_adapter { struct list_head scan_pending_q; /* spin lock for scan_pending_q */ spinlock_t scan_pending_q_lock; + struct sk_buff_head usb_rx_data_q; u32 scan_processing; u16 region_code; struct mwifiex_802_11d_domain_reg domain_reg; @@ -651,6 +676,7 @@ struct mwifiex_adapter { u8 scan_wait_q_woken; struct cmd_ctrl_node *cmd_queued; spinlock_t queue_lock; /* lock for tx queues */ + struct completion fw_load; }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); @@ -896,8 +922,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); int mwifiex_enable_hs(struct mwifiex_adapter *adapter); int mwifiex_disable_auto_ds(struct mwifiex_private *priv); -int mwifiex_get_signal_info(struct mwifiex_private *priv, - struct mwifiex_ds_get_signal *signal); int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, struct mwifiex_rate_cfg *rate); int mwifiex_request_scan(struct mwifiex_private *priv, @@ -950,13 +974,10 @@ int mwifiex_bss_set_channel(struct mwifiex_private *, int mwifiex_get_bss_info(struct mwifiex_private *, struct mwifiex_bss_info *); int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, - u8 *bssid, s32 rssi, u8 *ie_buf, - size_t ie_len, u16 beacon_period, - u16 cap_info_bitmap, u8 band, + struct cfg80211_bss *bss, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, - struct mwifiex_bssdescriptor *bss_entry, - u8 *ie_buf, u32 ie_len); + struct mwifiex_bssdescriptor *bss_entry); int mwifiex_check_network_compatibility(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); @@ -965,6 +986,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, u32 *flags, struct vif_params *params); int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev); +u8 *mwifiex_11d_code_2_region(u8 code); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5867facd415..13fbc4eb159 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -119,6 +119,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) if (!adapter || !adapter->priv_num) return; + /* In case driver is removed when asynchronous FW load is in progress */ + wait_for_completion(&adapter->fw_load); + if (user_rmmod) { #ifdef CONFIG_PM if (adapter->is_suspended) diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index aff9cd763f2..74f04571572 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1048,10 +1048,8 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, * This function parses provided beacon buffer and updates * respective fields in bss descriptor structure. */ -int -mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, - struct mwifiex_bssdescriptor *bss_entry, - u8 *ie_buf, u32 ie_len) +int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, + struct mwifiex_bssdescriptor *bss_entry) { int ret = 0; u8 element_id; @@ -1073,10 +1071,8 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, found_data_rate_ie = false; rate_size = 0; - current_ptr = ie_buf; - bytes_left = ie_len; - bss_entry->beacon_buf = ie_buf; - bss_entry->beacon_buf_size = ie_len; + current_ptr = bss_entry->beacon_buf; + bytes_left = bss_entry->beacon_buf_size; /* Process variable IE */ while (bytes_left >= 2) { @@ -1221,9 +1217,9 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; - case WLAN_EID_HT_INFORMATION: - bss_entry->bcn_ht_info = (struct ieee80211_ht_info *) - (current_ptr + + case WLAN_EID_HT_OPERATION: + bss_entry->bcn_ht_oper = + (struct ieee80211_ht_operation *)(current_ptr + sizeof(struct ieee_types_header)); bss_entry->ht_info_offset = (u16) (current_ptr + sizeof(struct ieee_types_header) - @@ -1447,15 +1443,12 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, return ret; } -static int -mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, - s32 rssi, const u8 *ie_buf, size_t ie_len, - u16 beacon_period, u16 cap_info_bitmap, u8 band) +static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, + struct cfg80211_bss *bss) { struct mwifiex_bssdescriptor *bss_desc; int ret; unsigned long flags; - u8 *beacon_ie; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), @@ -1465,16 +1458,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, return -ENOMEM; } - beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); - if (!beacon_ie) { - kfree(bss_desc); - dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); - return -ENOMEM; - } - - ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie, - ie_len, beacon_period, - cap_info_bitmap, band, bss_desc); + ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) goto done; @@ -1493,7 +1477,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL; priv->curr_bss_params.bss_descriptor.ht_cap_offset = 0; - priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL; + priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL; priv->curr_bss_params.bss_descriptor.ht_info_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = @@ -1514,7 +1498,6 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, done: kfree(bss_desc); - kfree(beacon_ie); return 0; } @@ -1620,14 +1603,16 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, const u8 *ie_buf; size_t ie_len; u16 channel = 0; - u64 network_tsf = 0; + u64 fw_tsf = 0; u16 beacon_size = 0; u32 curr_bcn_bytes; u32 freq; u16 beacon_period; u16 cap_info_bitmap; u8 *current_ptr; + u64 timestamp; struct mwifiex_bcn_param *bcn_param; + struct mwifiex_bss_priv *bss_priv; if (bytes_left >= sizeof(beacon_size)) { /* Extract & convert beacon size from command buffer */ @@ -1667,9 +1652,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, memcpy(bssid, bcn_param->bssid, ETH_ALEN); - rssi = (s32) (bcn_param->rssi); - dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi); + rssi = (s32) bcn_param->rssi; + rssi = (-rssi) * 100; /* Convert dBm to mBm */ + dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi); + timestamp = le64_to_cpu(bcn_param->timestamp); beacon_period = le16_to_cpu(bcn_param->beacon_period); cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); @@ -1709,14 +1696,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, /* * If the TSF TLV was appended to the scan results, save this - * entry's TSF value in the networkTSF field.The networkTSF is - * the firmware's TSF value at the time the beacon or probe - * response was received. + * entry's TSF value in the fw_tsf field. It is the firmware's + * TSF value at the time the beacon or probe response was + * received. */ if (tsf_tlv) - memcpy(&network_tsf, - &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], - sizeof(network_tsf)); + memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], + sizeof(fw_tsf)); if (channel) { struct ieee80211_channel *chan; @@ -1739,21 +1725,19 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(priv->wdev->wiphy, - chan, bssid, network_tsf, + chan, bssid, timestamp, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); - *(u8 *)bss->priv = band; - cfg80211_put_bss(bss); - + bss_priv = (struct mwifiex_bss_priv *)bss->priv; + bss_priv->band = band; + bss_priv->fw_tsf = fw_tsf; if (priv->media_connected && !memcmp(bssid, priv->curr_bss_params.bss_descriptor .mac_address, ETH_ALEN)) - mwifiex_update_curr_bss_params - (priv, bssid, rssi, - ie_buf, ie_len, - beacon_period, - cap_info_bitmap, band); + mwifiex_update_curr_bss_params(priv, + bss); + cfg80211_put_bss(bss); } } else { dev_dbg(adapter->dev, "missing BSS channel IE\n"); @@ -2019,8 +2003,8 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) (curr_bss->beacon_buf + curr_bss->ht_cap_offset); - if (curr_bss->bcn_ht_info) - curr_bss->bcn_ht_info = (struct ieee80211_ht_info *) + if (curr_bss->bcn_ht_oper) + curr_bss->bcn_ht_oper = (struct ieee80211_ht_operation *) (curr_bss->beacon_buf + curr_bss->ht_info_offset); diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index f8012e2b7f7..e0377473282 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -123,6 +123,9 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; + /* In case driver is removed when asynchronous FW load is in progress */ + wait_for_completion(&adapter->fw_load); + if (user_rmmod) { if (adapter->is_suspended) mwifiex_sdio_resume(adapter->dev); @@ -250,6 +253,8 @@ static int mwifiex_sdio_resume(struct device *dev) return 0; } +/* Device ID for SD8786 */ +#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116) /* Device ID for SD8787 */ #define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) /* Device ID for SD8797 */ @@ -257,6 +262,7 @@ static int mwifiex_sdio_resume(struct device *dev) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, {}, @@ -1596,6 +1602,9 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &func->dev; switch (func->device) { + case SDIO_DEVICE_ID_MARVELL_8786: + strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME); + break; case SDIO_DEVICE_ID_MARVELL_8797: strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); break; @@ -1804,5 +1813,6 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); MODULE_VERSION(SDIO_VERSION); MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index a3fb322205b..21033738ef0 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -28,6 +28,7 @@ #include "main.h" +#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin" #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" @@ -193,7 +194,7 @@ a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ a->mp_end_port))); \ a->mpa_tx.pkt_cnt++; \ -} while (0); +} while (0) /* SDIO Tx aggregation limit ? */ #define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ @@ -211,7 +212,7 @@ a->mpa_tx.buf_len = 0; \ a->mpa_tx.ports = 0; \ a->mpa_tx.start_port = 0; \ -} while (0); +} while (0) /* SDIO Rx aggregation limit ? */ #define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ @@ -242,7 +243,7 @@ a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \ a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \ a->mpa_rx.pkt_cnt++; \ -} while (0); +} while (0) /* Reset SDIO Rx aggregation buffer parameters */ #define MP_RX_AGGR_BUF_RESET(a) do { \ @@ -250,7 +251,7 @@ a->mpa_rx.buf_len = 0; \ a->mpa_rx.ports = 0; \ a->mpa_rx.start_port = 0; \ -} while (0); +} while (0) /* data structure for SDIO MPA TX */ diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 6c8e4594b48..87ed2a1f6cd 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -907,6 +907,101 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv, } /* + * This function prepares command for event subscription, configuration + * and query. Events can be subscribed or unsubscribed. Current subscribed + * events can be queried. Also, current subscribed events are reported in + * every FW response. + */ +static int +mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + struct mwifiex_ds_misc_subsc_evt *subsc_evt_cfg) +{ + struct host_cmd_ds_802_11_subsc_evt *subsc_evt = &cmd->params.subsc_evt; + struct mwifiex_ie_types_rssi_threshold *rssi_tlv; + u16 event_bitmap; + u8 *pos; + + cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SUBSCRIBE_EVENT); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_subsc_evt) + + S_DS_GEN); + + subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action); + dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action); + + /*For query requests, no configuration TLV structures are to be added.*/ + if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET) + return 0; + + subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events); + + event_bitmap = subsc_evt_cfg->events; + dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n", + event_bitmap); + + if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) || + (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) && + (event_bitmap == 0)) { + dev_dbg(priv->adapter->dev, "Error: No event specified " + "for bitwise action type\n"); + return -EINVAL; + } + + /* + * Append TLV structures for each of the specified events for + * subscribing or re-configuring. This is not required for + * bitwise unsubscribing request. + */ + if (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) + return 0; + + pos = ((u8 *)subsc_evt) + + sizeof(struct host_cmd_ds_802_11_subsc_evt); + + if (event_bitmap & BITMASK_BCN_RSSI_LOW) { + rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos; + + rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_LOW); + rssi_tlv->header.len = + cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) - + sizeof(struct mwifiex_ie_types_header)); + rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value; + rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq; + + dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, " + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_l_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); + + pos += sizeof(struct mwifiex_ie_types_rssi_threshold); + le16_add_cpu(&cmd->size, + sizeof(struct mwifiex_ie_types_rssi_threshold)); + } + + if (event_bitmap & BITMASK_BCN_RSSI_HIGH) { + rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos; + + rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_HIGH); + rssi_tlv->header.len = + cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) - + sizeof(struct mwifiex_ie_types_header)); + rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value; + rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq; + + dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, " + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_h_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); + + pos += sizeof(struct mwifiex_ie_types_rssi_threshold); + le16_add_cpu(&cmd->size, + sizeof(struct mwifiex_ie_types_rssi_threshold)); + } + + return 0; +} + +/* * This function prepares the commands before sending them to the firmware. * * This is a generic function which calls specific command preparation @@ -1086,6 +1181,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_PCIE_DESC_DETAILS: ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action); break; + case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: + ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf); + break; default: dev_err(priv->adapter->dev, "PREP_CMD: unknown cmd- %#x\n", cmd_no); @@ -1195,7 +1293,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) if (ret) return -1; - if (first_sta) { + if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) { /* Enable auto deep sleep */ auto_ds.auto_ds = DEEP_SLEEP_ON; auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 4da19ed0f07..3aa54243dea 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -119,11 +119,11 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, * calculated SNR values. */ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv, - struct host_cmd_ds_command *resp, - struct mwifiex_ds_get_signal *signal) + struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp = &resp->params.rssi_info_rsp; + struct mwifiex_ds_misc_subsc_evt subsc_evt; priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last); priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last); @@ -137,34 +137,29 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv, priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg); priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg); - /* Need to indicate IOCTL complete */ - if (signal) { - memset(signal, 0, sizeof(*signal)); - - signal->selector = ALL_RSSI_INFO_MASK; - - /* RSSI */ - signal->bcn_rssi_last = priv->bcn_rssi_last; - signal->bcn_rssi_avg = priv->bcn_rssi_avg; - signal->data_rssi_last = priv->data_rssi_last; - signal->data_rssi_avg = priv->data_rssi_avg; - - /* SNR */ - signal->bcn_snr_last = - CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last); - signal->bcn_snr_avg = - CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg); - signal->data_snr_last = - CAL_SNR(priv->data_rssi_last, priv->data_nf_last); - signal->data_snr_avg = - CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg); - - /* NF */ - signal->bcn_nf_last = priv->bcn_nf_last; - signal->bcn_nf_avg = priv->bcn_nf_avg; - signal->data_nf_last = priv->data_nf_last; - signal->data_nf_avg = priv->data_nf_avg; + if (priv->subsc_evt_rssi_state == EVENT_HANDLED) + return 0; + + /* Resubscribe low and high rssi events with new thresholds */ + memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt)); + subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH; + subsc_evt.action = HostCmd_ACT_BITWISE_SET; + if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) { + subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg - + priv->cqm_rssi_hyst); + subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); + } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) { + subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); + subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg + + priv->cqm_rssi_hyst); } + subsc_evt.bcn_l_rssi_cfg.evt_freq = 1; + subsc_evt.bcn_h_rssi_cfg.evt_freq = 1; + + priv->subsc_evt_rssi_state = EVENT_HANDLED; + + mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT, + 0, 0, &subsc_evt); return 0; } @@ -785,6 +780,28 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv, } /* + * This function handles the command response for subscribe event command. + */ +static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct mwifiex_ds_misc_subsc_evt *sub_event) +{ + struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event = + (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt; + + /* For every subscribe event command (Get/Set/Clear), FW reports the + * current set of subscribed events*/ + dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n", + le16_to_cpu(cmd_sub_event->events)); + + /*Return the subscribed event info for a Get request*/ + if (sub_event) + sub_event->events = le16_to_cpu(cmd_sub_event->events); + + return 0; +} + +/* * This function handles the command responses. * * This is a generic function, which calls command specific @@ -853,7 +870,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, ret = mwifiex_ret_get_log(priv, resp, data_buf); break; case HostCmd_CMD_RSSI_INFO: - ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf); + ret = mwifiex_ret_802_11_rssi_info(priv, resp); break; case HostCmd_CMD_802_11_SNMP_MIB: ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf); @@ -924,6 +941,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_PCIE_DESC_DETAILS: break; + case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: + ret = mwifiex_ret_subsc_evt(priv, resp, data_buf); + break; default: dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", resp->command); diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index cc531b536a5..f6bbb9307f8 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -128,9 +128,6 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); - /* Reset wireless stats signal info */ - priv->qual_level = 0; - priv->qual_noise = 0; } /* @@ -317,6 +314,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_RSSI_LOW: + cfg80211_cqm_rssi_notify(priv->netdev, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + GFP_KERNEL); + mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO, + HostCmd_ACT_GEN_GET, 0, NULL); + priv->subsc_evt_rssi_state = RSSI_LOW_RECVD; dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); break; case EVENT_SNR_LOW: @@ -326,6 +329,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) dev_dbg(adapter->dev, "event: MAX_FAIL\n"); break; case EVENT_RSSI_HIGH: + cfg80211_cqm_rssi_notify(priv->netdev, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + GFP_KERNEL); + mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO, + HostCmd_ACT_GEN_GET, 0, NULL); + priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD; dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); break; case EVENT_SNR_HIGH: diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d7b11defafe..58970e0f7d1 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -155,20 +155,29 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, * information. */ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, - u8 *bssid, s32 rssi, u8 *ie_buf, - size_t ie_len, u16 beacon_period, - u16 cap_info_bitmap, u8 band, + struct cfg80211_bss *bss, struct mwifiex_bssdescriptor *bss_desc) { int ret; + u8 *beacon_ie; + struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; - memcpy(bss_desc->mac_address, bssid, ETH_ALEN); - bss_desc->rssi = rssi; - bss_desc->beacon_buf = ie_buf; - bss_desc->beacon_buf_size = ie_len; - bss_desc->beacon_period = beacon_period; - bss_desc->cap_info_bitmap = cap_info_bitmap; - bss_desc->bss_band = band; + beacon_ie = kmemdup(bss->information_elements, bss->len_beacon_ies, + GFP_KERNEL); + if (!beacon_ie) { + dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); + return -ENOMEM; + } + + memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN); + bss_desc->rssi = bss->signal; + bss_desc->beacon_buf = beacon_ie; + bss_desc->beacon_buf_size = bss->len_beacon_ies; + bss_desc->beacon_period = bss->beacon_interval; + bss_desc->cap_info_bitmap = bss->capability; + bss_desc->bss_band = bss_priv->band; + bss_desc->fw_tsf = bss_priv->fw_tsf; + bss_desc->timestamp = bss->tsf; if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; @@ -180,9 +189,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, else bss_desc->bss_mode = NL80211_IFTYPE_STATION; - ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc, - ie_buf, ie_len); + ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); + kfree(beacon_ie); return ret; } @@ -197,7 +206,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, int ret; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_bssdescriptor *bss_desc = NULL; - u8 *beacon_ie = NULL; priv->scan_block = false; @@ -210,19 +218,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, return -ENOMEM; } - beacon_ie = kmemdup(bss->information_elements, - bss->len_beacon_ies, GFP_KERNEL); - if (!beacon_ie) { - kfree(bss_desc); - dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); - return -ENOMEM; - } - - ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal, - beacon_ie, bss->len_beacon_ies, - bss->beacon_interval, - bss->capability, - *(u8 *)bss->priv, bss_desc); + ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) goto done; } @@ -269,7 +265,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. ssid, &bss_desc->ssid))) { kfree(bss_desc); - kfree(beacon_ie); return 0; } @@ -304,7 +299,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, done: kfree(bss_desc); - kfree(beacon_ie); return ret; } @@ -468,7 +462,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, info->bss_chan = bss_desc->channel; - info->region_code = adapter->region_code; + memcpy(info->country_code, priv->country_code, + IEEE80211_COUNTRY_STRING_LEN); info->media_connected = priv->media_connected; @@ -996,6 +991,39 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv, } /* + * IOCTL request handler to set/reset WPS IE. + * + * The supplied WPS IE is treated as a opaque buffer. Only the first field + * is checked to internally enable WPS. If buffer length is zero, the existing + * WPS IE is reset. + */ +static int mwifiex_set_wps_ie(struct mwifiex_private *priv, + u8 *ie_data_ptr, u16 ie_len) +{ + if (ie_len) { + priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL); + if (!priv->wps_ie) + return -ENOMEM; + if (ie_len > sizeof(priv->wps_ie)) { + dev_dbg(priv->adapter->dev, + "info: failed to copy WPS IE, too big\n"); + kfree(priv->wps_ie); + return -1; + } + memcpy(priv->wps_ie, ie_data_ptr, ie_len); + priv->wps_ie_len = ie_len; + dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", + priv->wps_ie_len, priv->wps_ie[0]); + } else { + kfree(priv->wps_ie); + priv->wps_ie_len = ie_len; + dev_dbg(priv->adapter->dev, + "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); + } + return 0; +} + +/* * IOCTL request handler to set WAPI key. * * This function prepares the correct firmware command and @@ -1185,39 +1213,6 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, } /* - * Sends IOCTL request to get signal information. - * - * This function allocates the IOCTL request buffer, fills it - * with requisite parameters and calls the IOCTL handler. - */ -int mwifiex_get_signal_info(struct mwifiex_private *priv, - struct mwifiex_ds_get_signal *signal) -{ - int status; - - signal->selector = ALL_RSSI_INFO_MASK; - - /* Signal info can be obtained only if connected */ - if (!priv->media_connected) { - dev_dbg(priv->adapter->dev, - "info: Can not get signal in disconnected state\n"); - return -1; - } - - status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO, - HostCmd_ACT_GEN_GET, 0, signal); - - if (!status) { - if (signal->selector & BCN_RSSI_AVG_MASK) - priv->qual_level = signal->bcn_rssi_avg; - if (signal->selector & BCN_NF_AVG_MASK) - priv->qual_noise = signal->bcn_nf_avg; - } - - return status; -} - -/* * Sends IOCTL request to set encoding parameters. * * This function allocates the IOCTL request buffer, fills it @@ -1441,6 +1436,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, priv->wps.session_enable = true; dev_dbg(priv->adapter->dev, "info: WPS Session Enabled.\n"); + ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len); } /* Append the passed data to the end of the diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index 750b695aca1..02ce3b77d3e 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -145,7 +145,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length); priv->stats.rx_dropped++; - dev_kfree_skb_any(skb); + + if (adapter->if_ops.data_complete) + adapter->if_ops.data_complete(adapter, skb); + else + dev_kfree_skb_any(skb); + return ret; } @@ -196,8 +201,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, (u8) local_rx_pd->rx_pkt_type, skb); - if (ret || (rx_pkt_type == PKT_TYPE_BAR)) - dev_kfree_skb_any(skb); + if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { + if (adapter->if_ops.data_complete) + adapter->if_ops.data_complete(adapter, skb); + else + dev_kfree_skb_any(skb); + } if (ret) priv->stats.rx_dropped++; diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index 7af534feb42..0a046d3a0c1 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -149,10 +149,14 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; - skb_push(skb, INTF_HEADER_LEN); - - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, - skb, NULL); + if (adapter->iface_type == MWIFIEX_USB) { + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, + skb, NULL); + } else { + skb_push(skb, INTF_HEADER_LEN); + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, + skb, NULL); + } switch (ret) { case -EBUSY: adapter->data_sent = true; diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index d2af8cb9854..e2faec4db10 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -77,12 +77,23 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) local_tx_pd = (struct txpd *) (head_ptr + INTF_HEADER_LEN); - - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, - skb, tx_param); + if (adapter->iface_type == MWIFIEX_USB) { + adapter->data_sent = true; + skb_pull(skb, INTF_HEADER_LEN); + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_USB_EP_DATA, + skb, NULL); + } else { + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_TYPE_DATA, + skb, tx_param); + } } switch (ret) { + case -ENOSR: + dev_err(adapter->dev, "data: -ENOSR is returned\n"); + break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { @@ -135,6 +146,9 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, if (!priv) goto done; + if (adapter->iface_type == MWIFIEX_USB) + adapter->data_sent = false; + mwifiex_set_trans_start(priv->netdev); if (!status) { priv->stats.tx_packets++; @@ -162,4 +176,5 @@ done: return 0; } +EXPORT_SYMBOL_GPL(mwifiex_write_data_complete); diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c new file mode 100644 index 00000000000..49ebf20c56e --- /dev/null +++ b/drivers/net/wireless/mwifiex/usb.c @@ -0,0 +1,1052 @@ +/* + * Marvell Wireless LAN device driver: USB specific handling + * + * Copyright (C) 2012, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "main.h" +#include "usb.h" + +#define USB_VERSION "1.0" + +static const char usbdriver_name[] = "usb8797"; + +static u8 user_rmmod; +static struct mwifiex_if_ops usb_ops; +static struct semaphore add_remove_card_sem; + +static struct usb_device_id mwifiex_usb_table[] = { + {USB_DEVICE(USB8797_VID, USB8797_PID_1)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0xff)}, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, mwifiex_usb_table); + +static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size); + +/* This function handles received packet. Necessary action is taken based on + * cmd/event/data. + */ +static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, + struct sk_buff *skb, u8 ep) +{ + struct device *dev = adapter->dev; + u32 recv_type; + __le32 tmp; + + if (adapter->hs_activated) + mwifiex_process_hs_config(adapter); + + if (skb->len < INTF_HEADER_LEN) { + dev_err(dev, "%s: invalid skb->len\n", __func__); + return -1; + } + + switch (ep) { + case MWIFIEX_USB_EP_CMD_EVENT: + dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__); + skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN); + recv_type = le32_to_cpu(tmp); + skb_pull(skb, INTF_HEADER_LEN); + + switch (recv_type) { + case MWIFIEX_USB_TYPE_CMD: + if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) { + dev_err(dev, "CMD: skb->len too large\n"); + return -1; + } else if (!adapter->curr_cmd) { + dev_dbg(dev, "CMD: no curr_cmd\n"); + if (adapter->ps_state == PS_STATE_SLEEP_CFM) { + mwifiex_process_sleep_confirm_resp( + adapter, skb->data, + skb->len); + return 0; + } + return -1; + } + + adapter->curr_cmd->resp_skb = skb; + adapter->cmd_resp_received = true; + break; + case MWIFIEX_USB_TYPE_EVENT: + if (skb->len < sizeof(u32)) { + dev_err(dev, "EVENT: skb->len too small\n"); + return -1; + } + skb_copy_from_linear_data(skb, &tmp, sizeof(u32)); + adapter->event_cause = le32_to_cpu(tmp); + skb_pull(skb, sizeof(u32)); + dev_dbg(dev, "event_cause %#x\n", adapter->event_cause); + + if (skb->len > MAX_EVENT_SIZE) { + dev_err(dev, "EVENT: event body too large\n"); + return -1; + } + + skb_copy_from_linear_data(skb, adapter->event_body, + skb->len); + adapter->event_received = true; + adapter->event_skb = skb; + break; + default: + dev_err(dev, "unknown recv_type %#x\n", recv_type); + return -1; + } + break; + case MWIFIEX_USB_EP_DATA: + dev_dbg(dev, "%s: EP_DATA\n", __func__); + if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) { + dev_err(dev, "DATA: skb->len too large\n"); + return -1; + } + skb_queue_tail(&adapter->usb_rx_data_q, skb); + adapter->data_received = true; + break; + default: + dev_err(dev, "%s: unknown endport %#x\n", __func__, ep); + return -1; + } + + return -EINPROGRESS; +} + +static void mwifiex_usb_rx_complete(struct urb *urb) +{ + struct urb_context *context = (struct urb_context *)urb->context; + struct mwifiex_adapter *adapter = context->adapter; + struct sk_buff *skb = context->skb; + struct usb_card_rec *card; + int recv_length = urb->actual_length; + int size, status; + + if (!adapter || !adapter->card) { + pr_err("mwifiex adapter or card structure is not valid\n"); + return; + } + + card = (struct usb_card_rec *)adapter->card; + if (card->rx_cmd_ep == context->ep) + atomic_dec(&card->rx_cmd_urb_pending); + else + atomic_dec(&card->rx_data_urb_pending); + + if (recv_length) { + if (urb->status || (adapter->surprise_removed)) { + dev_err(adapter->dev, + "URB status is failed: %d\n", urb->status); + /* Do not free skb in case of command ep */ + if (card->rx_cmd_ep != context->ep) + dev_kfree_skb_any(skb); + goto setup_for_next; + } + if (skb->len > recv_length) + skb_trim(skb, recv_length); + else + skb_put(skb, recv_length - skb->len); + + atomic_inc(&adapter->rx_pending); + status = mwifiex_usb_recv(adapter, skb, context->ep); + + dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", + recv_length, status); + if (status == -EINPROGRESS) { + queue_work(adapter->workqueue, &adapter->main_work); + + /* urb for data_ep is re-submitted now; + * urb for cmd_ep will be re-submitted in callback + * mwifiex_usb_recv_complete + */ + if (card->rx_cmd_ep == context->ep) + return; + } else { + atomic_dec(&adapter->rx_pending); + if (status == -1) + dev_err(adapter->dev, + "received data processing failed!\n"); + + /* Do not free skb in case of command ep */ + if (card->rx_cmd_ep != context->ep) + dev_kfree_skb_any(skb); + } + } else if (urb->status) { + if (!adapter->is_suspended) { + dev_warn(adapter->dev, + "Card is removed: %d\n", urb->status); + adapter->surprise_removed = true; + } + dev_kfree_skb_any(skb); + return; + } else { + /* Do not free skb in case of command ep */ + if (card->rx_cmd_ep != context->ep) + dev_kfree_skb_any(skb); + + /* fall through setup_for_next */ + } + +setup_for_next: + if (card->rx_cmd_ep == context->ep) + size = MWIFIEX_RX_CMD_BUF_SIZE; + else + size = MWIFIEX_RX_DATA_BUF_SIZE; + + mwifiex_usb_submit_rx_urb(context, size); + + return; +} + +static void mwifiex_usb_tx_complete(struct urb *urb) +{ + struct urb_context *context = (struct urb_context *)(urb->context); + struct mwifiex_adapter *adapter = context->adapter; + struct usb_card_rec *card = adapter->card; + + dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status); + + if (context->ep == card->tx_cmd_ep) { + dev_dbg(adapter->dev, "%s: CMD\n", __func__); + atomic_dec(&card->tx_cmd_urb_pending); + adapter->cmd_sent = false; + } else { + dev_dbg(adapter->dev, "%s: DATA\n", __func__); + atomic_dec(&card->tx_data_urb_pending); + mwifiex_write_data_complete(adapter, context->skb, + urb->status ? -1 : 0); + } + + queue_work(adapter->workqueue, &adapter->main_work); + + return; +} + +static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) +{ + struct mwifiex_adapter *adapter = ctx->adapter; + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + + if (card->rx_cmd_ep != ctx->ep) { + ctx->skb = dev_alloc_skb(size); + if (!ctx->skb) { + dev_err(adapter->dev, + "%s: dev_alloc_skb failed\n", __func__); + return -ENOMEM; + } + } + + usb_fill_bulk_urb(ctx->urb, card->udev, + usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data, + size, mwifiex_usb_rx_complete, (void *)ctx); + + if (card->rx_cmd_ep == ctx->ep) + atomic_inc(&card->rx_cmd_urb_pending); + else + atomic_inc(&card->rx_data_urb_pending); + + if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) { + dev_err(adapter->dev, "usb_submit_urb failed\n"); + dev_kfree_skb_any(ctx->skb); + ctx->skb = NULL; + + if (card->rx_cmd_ep == ctx->ep) + atomic_dec(&card->rx_cmd_urb_pending); + else + atomic_dec(&card->rx_data_urb_pending); + + return -1; + } + + return 0; +} + +static void mwifiex_usb_free(struct usb_card_rec *card) +{ + int i; + + if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) + usb_kill_urb(card->rx_cmd.urb); + + usb_free_urb(card->rx_cmd.urb); + card->rx_cmd.urb = NULL; + + if (atomic_read(&card->rx_data_urb_pending)) + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) + if (card->rx_data_list[i].urb) + usb_kill_urb(card->rx_data_list[i].urb); + + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) { + usb_free_urb(card->rx_data_list[i].urb); + card->rx_data_list[i].urb = NULL; + } + + for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) { + usb_free_urb(card->tx_data_list[i].urb); + card->tx_data_list[i].urb = NULL; + } + + usb_free_urb(card->tx_cmd.urb); + card->tx_cmd.urb = NULL; + + return; +} + +/* This function probes an mwifiex device and registers it. It allocates + * the card structure, initiates the device registration and initialization + * procedure by adding a logical interface. + */ +static int mwifiex_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_host_interface *iface_desc = intf->cur_altsetting; + struct usb_endpoint_descriptor *epd; + int ret, i; + struct usb_card_rec *card; + u16 id_vendor, id_product, bcd_device, bcd_usb; + + card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL); + if (!card) + return -ENOMEM; + + id_vendor = le16_to_cpu(udev->descriptor.idVendor); + id_product = le16_to_cpu(udev->descriptor.idProduct); + bcd_device = le16_to_cpu(udev->descriptor.bcdDevice); + bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB); + pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n", + id_vendor, id_product, bcd_device); + + /* PID_1 is used for firmware downloading only */ + if (id_product == USB8797_PID_1) + card->usb_boot_state = USB8797_FW_DNLD; + else + card->usb_boot_state = USB8797_FW_READY; + + card->udev = udev; + card->intf = intf; + + pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocl=%#x\n", + udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, + udev->descriptor.bDeviceSubClass, + udev->descriptor.bDeviceProtocol); + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + epd = &iface_desc->endpoint[i].desc; + if (usb_endpoint_dir_in(epd) && + usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT && + usb_endpoint_xfer_bulk(epd)) { + pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n", + le16_to_cpu(epd->wMaxPacketSize), + epd->bEndpointAddress); + card->rx_cmd_ep = usb_endpoint_num(epd); + atomic_set(&card->rx_cmd_urb_pending, 0); + } + if (usb_endpoint_dir_in(epd) && + usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA && + usb_endpoint_xfer_bulk(epd)) { + pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n", + le16_to_cpu(epd->wMaxPacketSize), + epd->bEndpointAddress); + card->rx_data_ep = usb_endpoint_num(epd); + atomic_set(&card->rx_data_urb_pending, 0); + } + if (usb_endpoint_dir_out(epd) && + usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA && + usb_endpoint_xfer_bulk(epd)) { + pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n", + le16_to_cpu(epd->wMaxPacketSize), + epd->bEndpointAddress); + card->tx_data_ep = usb_endpoint_num(epd); + atomic_set(&card->tx_data_urb_pending, 0); + } + if (usb_endpoint_dir_out(epd) && + usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT && + usb_endpoint_xfer_bulk(epd)) { + pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n", + le16_to_cpu(epd->wMaxPacketSize), + epd->bEndpointAddress); + card->tx_cmd_ep = usb_endpoint_num(epd); + atomic_set(&card->tx_cmd_urb_pending, 0); + card->bulk_out_maxpktsize = + le16_to_cpu(epd->wMaxPacketSize); + } + } + + usb_set_intfdata(intf, card); + + ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops, + MWIFIEX_USB); + if (ret) { + pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret); + usb_reset_device(udev); + kfree(card); + return ret; + } + + usb_get_dev(udev); + + return 0; +} + +/* Kernel needs to suspend all functions separately. Therefore all + * registered functions must have drivers with suspend and resume + * methods. Failing that the kernel simply removes the whole card. + * + * If already not suspended, this function allocates and sends a + * 'host sleep activate' request to the firmware and turns off the traffic. + */ +static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct usb_card_rec *card = usb_get_intfdata(intf); + struct mwifiex_adapter *adapter; + int i; + + if (!card || !card->adapter) { + pr_err("%s: card or card->adapter is NULL\n", __func__); + return 0; + } + adapter = card->adapter; + + if (unlikely(adapter->is_suspended)) + dev_warn(adapter->dev, "Device already suspended\n"); + + mwifiex_enable_hs(adapter); + + /* 'is_suspended' flag indicates device is suspended. + * It must be set here before the usb_kill_urb() calls. Reason + * is in the complete handlers, urb->status(= -ENOENT) and + * this flag is used in combination to distinguish between a + * 'suspended' state and a 'disconnect' one. + */ + adapter->is_suspended = true; + + for (i = 0; i < adapter->priv_num; i++) + netif_carrier_off(adapter->priv[i]->netdev); + + if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) + usb_kill_urb(card->rx_cmd.urb); + + if (atomic_read(&card->rx_data_urb_pending)) + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) + if (card->rx_data_list[i].urb) + usb_kill_urb(card->rx_data_list[i].urb); + + for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) + if (card->tx_data_list[i].urb) + usb_kill_urb(card->tx_data_list[i].urb); + + if (card->tx_cmd.urb) + usb_kill_urb(card->tx_cmd.urb); + + return 0; +} + +/* Kernel needs to suspend all functions separately. Therefore all + * registered functions must have drivers with suspend and resume + * methods. Failing that the kernel simply removes the whole card. + * + * If already not resumed, this function turns on the traffic and + * sends a 'host sleep cancel' request to the firmware. + */ +static int mwifiex_usb_resume(struct usb_interface *intf) +{ + struct usb_card_rec *card = usb_get_intfdata(intf); + struct mwifiex_adapter *adapter; + int i; + + if (!card || !card->adapter) { + pr_err("%s: card or card->adapter is NULL\n", __func__); + return 0; + } + adapter = card->adapter; + + if (unlikely(!adapter->is_suspended)) { + dev_warn(adapter->dev, "Device already resumed\n"); + return 0; + } + + /* Indicate device resumed. The netdev queue will be resumed only + * after the urbs have been re-submitted + */ + adapter->is_suspended = false; + + if (!atomic_read(&card->rx_data_urb_pending)) + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) + mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], + MWIFIEX_RX_DATA_BUF_SIZE); + + if (!atomic_read(&card->rx_cmd_urb_pending)) { + card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); + if (card->rx_cmd.skb) + mwifiex_usb_submit_rx_urb(&card->rx_cmd, + MWIFIEX_RX_CMD_BUF_SIZE); + } + + for (i = 0; i < adapter->priv_num; i++) + if (adapter->priv[i]->media_connected) + netif_carrier_on(adapter->priv[i]->netdev); + + /* Disable Host Sleep */ + if (adapter->hs_activated) + mwifiex_cancel_hs(mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_ASYNC_CMD); + +#ifdef CONFIG_PM + /* Resume handler may be called due to remote wakeup, + * force to exit suspend anyway + */ + usb_disable_autosuspend(card->udev); +#endif /* CONFIG_PM */ + + return 0; +} + +static void mwifiex_usb_disconnect(struct usb_interface *intf) +{ + struct usb_card_rec *card = usb_get_intfdata(intf); + struct mwifiex_adapter *adapter; + int i; + + if (!card || !card->adapter) { + pr_err("%s: card or card->adapter is NULL\n", __func__); + return; + } + + adapter = card->adapter; + if (!adapter->priv_num) + return; + + /* In case driver is removed when asynchronous FW downloading is + * in progress + */ + wait_for_completion(&adapter->fw_load); + + if (user_rmmod) { +#ifdef CONFIG_PM + if (adapter->is_suspended) + mwifiex_usb_resume(intf); +#endif + for (i = 0; i < adapter->priv_num; i++) + if ((GET_BSS_ROLE(adapter->priv[i]) == + MWIFIEX_BSS_ROLE_STA) && + adapter->priv[i]->media_connected) + mwifiex_deauthenticate(adapter->priv[i], NULL); + + mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_FUNC_SHUTDOWN); + } + + mwifiex_usb_free(card); + + dev_dbg(adapter->dev, "%s: removing card\n", __func__); + mwifiex_remove_card(adapter, &add_remove_card_sem); + + usb_set_intfdata(intf, NULL); + usb_put_dev(interface_to_usbdev(intf)); + kfree(card); + + return; +} + +static struct usb_driver mwifiex_usb_driver = { + .name = usbdriver_name, + .probe = mwifiex_usb_probe, + .disconnect = mwifiex_usb_disconnect, + .id_table = mwifiex_usb_table, + .suspend = mwifiex_usb_suspend, + .resume = mwifiex_usb_resume, + .supports_autosuspend = 1, +}; + +static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + int i; + + card->tx_cmd.adapter = adapter; + card->tx_cmd.ep = card->tx_cmd_ep; + + card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); + if (!card->tx_cmd.urb) { + dev_err(adapter->dev, "tx_cmd.urb allocation failed\n"); + return -ENOMEM; + } + + card->tx_data_ix = 0; + + for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) { + card->tx_data_list[i].adapter = adapter; + card->tx_data_list[i].ep = card->tx_data_ep; + + card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); + if (!card->tx_data_list[i].urb) { + dev_err(adapter->dev, + "tx_data_list[] urb allocation failed\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + int i; + + card->rx_cmd.adapter = adapter; + card->rx_cmd.ep = card->rx_cmd_ep; + + card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); + if (!card->rx_cmd.urb) { + dev_err(adapter->dev, "rx_cmd.urb allocation failed\n"); + return -ENOMEM; + } + + card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); + if (!card->rx_cmd.skb) { + dev_err(adapter->dev, "rx_cmd.skb allocation failed\n"); + return -ENOMEM; + } + + if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE)) + return -1; + + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) { + card->rx_data_list[i].adapter = adapter; + card->rx_data_list[i].ep = card->rx_data_ep; + + card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); + if (!card->rx_data_list[i].urb) { + dev_err(adapter->dev, + "rx_data_list[] urb allocation failed\n"); + return -1; + } + if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], + MWIFIEX_RX_DATA_BUF_SIZE)) + return -1; + } + + return 0; +} + +static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, + u32 *len, u8 ep, u32 timeout) +{ + struct usb_card_rec *card = adapter->card; + int actual_length, ret; + + if (!(*len % card->bulk_out_maxpktsize)) + (*len)++; + + /* Send the data block */ + ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf, + *len, &actual_length, timeout); + if (ret) { + dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret); + ret = -1; + } + + *len = actual_length; + + return ret; +} + +static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, + u32 *len, u8 ep, u32 timeout) +{ + struct usb_card_rec *card = adapter->card; + int actual_length, ret; + + /* Receive the data response */ + ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf, + *len, &actual_length, timeout); + if (ret) { + dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret); + ret = -1; + } + + *len = actual_length; + + return ret; +} + +/* This function write a command/data packet to card. */ +static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, + struct sk_buff *skb, + struct mwifiex_tx_param *tx_param) +{ + struct usb_card_rec *card = adapter->card; + struct urb_context *context; + u8 *data = (u8 *)skb->data; + struct urb *tx_urb; + + if (adapter->is_suspended) { + dev_err(adapter->dev, + "%s: not allowed while suspended\n", __func__); + return -1; + } + + if (adapter->surprise_removed) { + dev_err(adapter->dev, "%s: device removed\n", __func__); + return -1; + } + + if (ep == card->tx_data_ep && + atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) { + return -EBUSY; + } + + dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep); + + if (ep == card->tx_cmd_ep) { + context = &card->tx_cmd; + } else { + if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB) + card->tx_data_ix = 0; + context = &card->tx_data_list[card->tx_data_ix++]; + } + + context->adapter = adapter; + context->ep = ep; + context->skb = skb; + tx_urb = context->urb; + + usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep), + data, skb->len, mwifiex_usb_tx_complete, + (void *)context); + + tx_urb->transfer_flags |= URB_ZERO_PACKET; + + if (ep == card->tx_cmd_ep) + atomic_inc(&card->tx_cmd_urb_pending); + else + atomic_inc(&card->tx_data_urb_pending); + + if (usb_submit_urb(tx_urb, GFP_ATOMIC)) { + dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__); + if (ep == card->tx_cmd_ep) { + atomic_dec(&card->tx_cmd_urb_pending); + } else { + atomic_dec(&card->tx_data_urb_pending); + if (card->tx_data_ix) + card->tx_data_ix--; + else + card->tx_data_ix = MWIFIEX_TX_DATA_URB; + } + + return -1; + } else { + if (ep == card->tx_data_ep && + atomic_read(&card->tx_data_urb_pending) == + MWIFIEX_TX_DATA_URB) + return -ENOSR; + } + + return -EINPROGRESS; +} + +/* This function register usb device and initialize parameter. */ +static int mwifiex_register_dev(struct mwifiex_adapter *adapter) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + + card->adapter = adapter; + adapter->dev = &card->udev->dev; + strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME); + + return 0; +} + +/* This function reads one block of firmware data. */ +static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter, + u32 offset, u32 len, u8 *buf) +{ + if (!buf || !len) + return -1; + + if (offset + len > adapter->firmware->size) + return -1; + + memcpy(buf, adapter->firmware->data + offset, len); + + return 0; +} + +static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, + struct mwifiex_fw_image *fw) +{ + int ret = 0; + u8 *firmware = fw->fw_buf, *recv_buff; + u32 retries = USB8797_FW_MAX_RETRY, dlen; + u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0; + struct fw_data *fwdata; + struct fw_sync_header sync_fw; + u8 check_winner = 1; + + if (!firmware) { + dev_err(adapter->dev, + "No firmware image found! Terminating download\n"); + ret = -1; + goto fw_exit; + } + + /* Allocate memory for transmit */ + fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL); + if (!fwdata) + goto fw_exit; + + /* Allocate memory for receive */ + recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL); + if (!recv_buff) + goto cleanup; + + do { + /* Send pseudo data to check winner status first */ + if (check_winner) { + memset(&fwdata->fw_hdr, 0, sizeof(struct fw_header)); + dlen = 0; + } else { + /* copy the header of the fw_data to get the length */ + if (firmware) + memcpy(&fwdata->fw_hdr, &firmware[tlen], + sizeof(struct fw_header)); + else + mwifiex_get_fw_data(adapter, tlen, + sizeof(struct fw_header), + (u8 *)&fwdata->fw_hdr); + + dlen = le32_to_cpu(fwdata->fw_hdr.data_len); + dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd); + tlen += sizeof(struct fw_header); + + if (firmware) + memcpy(fwdata->data, &firmware[tlen], dlen); + else + mwifiex_get_fw_data(adapter, tlen, dlen, + (u8 *)fwdata->data); + + fwdata->seq_num = cpu_to_le32(fw_seqnum); + tlen += dlen; + } + + /* If the send/receive fails or CRC occurs then retry */ + while (retries--) { + u8 *buf = (u8 *)fwdata; + u32 len = FW_DATA_XMIT_SIZE; + + /* send the firmware block */ + ret = mwifiex_write_data_sync(adapter, buf, &len, + MWIFIEX_USB_EP_CMD_EVENT, + MWIFIEX_USB_TIMEOUT); + if (ret) { + dev_err(adapter->dev, + "write_data_sync: failed: %d\n", ret); + continue; + } + + buf = recv_buff; + len = FW_DNLD_RX_BUF_SIZE; + + /* Receive the firmware block response */ + ret = mwifiex_read_data_sync(adapter, buf, &len, + MWIFIEX_USB_EP_CMD_EVENT, + MWIFIEX_USB_TIMEOUT); + if (ret) { + dev_err(adapter->dev, + "read_data_sync: failed: %d\n", ret); + continue; + } + + memcpy(&sync_fw, recv_buff, + sizeof(struct fw_sync_header)); + + /* check 1st firmware block resp for highest bit set */ + if (check_winner) { + if (le32_to_cpu(sync_fw.cmd) & 0x80000000) { + dev_warn(adapter->dev, + "USB is not the winner %#x\n", + sync_fw.cmd); + + /* returning success */ + ret = 0; + goto cleanup; + } + + dev_dbg(adapter->dev, + "USB is the winner, start to download FW\n"); + + check_winner = 0; + break; + } + + /* check the firmware block response for CRC errors */ + if (sync_fw.cmd) { + dev_err(adapter->dev, + "FW received block with CRC %#x\n", + sync_fw.cmd); + ret = -1; + continue; + } + + retries = USB8797_FW_MAX_RETRY; + break; + } + fw_seqnum++; + } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries); + +cleanup: + dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen); + + kfree(recv_buff); + kfree(fwdata); + + if (retries) + ret = 0; +fw_exit: + return ret; +} + +static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter, + struct mwifiex_fw_image *fw) +{ + int ret; + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + + if (card->usb_boot_state == USB8797_FW_DNLD) { + ret = mwifiex_prog_fw_w_helper(adapter, fw); + if (ret) + return -1; + + /* Boot state changes after successful firmware download */ + if (card->usb_boot_state == USB8797_FW_DNLD) + return -1; + } + + ret = mwifiex_usb_rx_init(adapter); + if (!ret) + ret = mwifiex_usb_tx_init(adapter); + + return ret; +} + +static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + + skb_push(card->rx_cmd.skb, INTF_HEADER_LEN); + if ((ep == card->rx_cmd_ep) && + (!atomic_read(&card->rx_cmd_urb_pending))) + mwifiex_usb_submit_rx_urb(&card->rx_cmd, + MWIFIEX_RX_CMD_BUF_SIZE); + + return; +} + +static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter, + struct sk_buff *skb) +{ + atomic_dec(&adapter->rx_pending); + mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT); + + return 0; +} + +static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter, + struct sk_buff *skb) +{ + atomic_dec(&adapter->rx_pending); + dev_kfree_skb_any(skb); + + return 0; +} + +/* This function wakes up the card. */ +static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) +{ + /* Simulation of HS_AWAKE event */ + adapter->pm_wakeup_fw_try = false; + adapter->pm_wakeup_card_req = false; + adapter->ps_state = PS_STATE_AWAKE; + + return 0; +} + +static struct mwifiex_if_ops usb_ops = { + .register_dev = mwifiex_register_dev, + .wakeup = mwifiex_pm_wakeup_card, + .wakeup_complete = mwifiex_pm_wakeup_card_complete, + + /* USB specific */ + .dnld_fw = mwifiex_usb_dnld_fw, + .cmdrsp_complete = mwifiex_usb_cmd_event_complete, + .event_complete = mwifiex_usb_cmd_event_complete, + .data_complete = mwifiex_usb_data_complete, + .host_to_card = mwifiex_usb_host_to_card, +}; + +/* This function initializes the USB driver module. + * + * This initiates the semaphore and registers the device with + * USB bus. + */ +static int mwifiex_usb_init_module(void) +{ + int ret; + + pr_debug("Marvell USB8797 Driver\n"); + + sema_init(&add_remove_card_sem, 1); + + ret = usb_register(&mwifiex_usb_driver); + if (ret) + pr_err("Driver register failed!\n"); + else + pr_debug("info: Driver registered successfully!\n"); + + return ret; +} + +/* This function cleans up the USB driver. + * + * The following major steps are followed in .disconnect for cleanup: + * - Resume the device if its suspended + * - Disconnect the device if connected + * - Shutdown the firmware + * - Unregister the device from USB bus. + */ +static void mwifiex_usb_cleanup_module(void) +{ + if (!down_interruptible(&add_remove_card_sem)) + up(&add_remove_card_sem); + + /* set the flag as user is removing this module */ + user_rmmod = 1; + + usb_deregister(&mwifiex_usb_driver); +} + +module_init(mwifiex_usb_init_module); +module_exit(mwifiex_usb_cleanup_module); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION); +MODULE_VERSION(USB_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin"); diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h new file mode 100644 index 00000000000..98c4316cd1a --- /dev/null +++ b/drivers/net/wireless/mwifiex/usb.h @@ -0,0 +1,99 @@ +/* + * This file contains definitions for mwifiex USB interface driver. + * + * Copyright (C) 2012, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#ifndef _MWIFIEX_USB_H +#define _MWIFIEX_USB_H + +#include <linux/usb.h> + +#define USB8797_VID 0x1286 +#define USB8797_PID_1 0x2043 +#define USB8797_PID_2 0x2044 + +#define USB8797_FW_DNLD 1 +#define USB8797_FW_READY 2 +#define USB8797_FW_MAX_RETRY 3 + +#define MWIFIEX_TX_DATA_URB 6 +#define MWIFIEX_RX_DATA_URB 6 +#define MWIFIEX_USB_TIMEOUT 100 + +#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin" + +#define FW_DNLD_TX_BUF_SIZE 620 +#define FW_DNLD_RX_BUF_SIZE 2048 +#define FW_HAS_LAST_BLOCK 0x00000004 + +#define FW_DATA_XMIT_SIZE \ + (sizeof(struct fw_header) + dlen + sizeof(u32)) + +struct urb_context { + struct mwifiex_adapter *adapter; + struct sk_buff *skb; + struct urb *urb; + u8 ep; +}; + +struct usb_card_rec { + struct mwifiex_adapter *adapter; + struct usb_device *udev; + struct usb_interface *intf; + u8 rx_cmd_ep; + struct urb_context rx_cmd; + atomic_t rx_cmd_urb_pending; + struct urb_context rx_data_list[MWIFIEX_RX_DATA_URB]; + u8 usb_boot_state; + u8 rx_data_ep; + atomic_t rx_data_urb_pending; + u8 tx_data_ep; + u8 tx_cmd_ep; + atomic_t tx_data_urb_pending; + atomic_t tx_cmd_urb_pending; + int bulk_out_maxpktsize; + struct urb_context tx_cmd; + int tx_data_ix; + struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB]; +}; + +struct fw_header { + __le32 dnld_cmd; + __le32 base_addr; + __le32 data_len; + __le32 crc; +}; + +struct fw_sync_header { + __le32 cmd; + __le32 seq_num; +}; + +struct fw_data { + struct fw_header fw_hdr; + __le32 seq_num; + u8 data[1]; +}; + +/* This function is called after the card has woken up. */ +static inline int +mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) +{ + return 0; +} + +#endif /*_MWIFIEX_USB_H */ diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 6b399976d6c..2864c74bdb6 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -167,6 +167,28 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb) skb->dev = priv->netdev; skb->protocol = eth_type_trans(skb, priv->netdev); skb->ip_summed = CHECKSUM_NONE; + + /* This is required only in case of 11n and USB as we alloc + * a buffer of 4K only if its 11N (to be able to receive 4K + * AMSDU packets). In case of SD we allocate buffers based + * on the size of packet and hence this is not needed. + * + * Modifying the truesize here as our allocation for each + * skb is 4K but we only receive 2K packets and this cause + * the kernel to start dropping packets in case where + * application has allocated buffer based on 2K size i.e. + * if there a 64K packet received (in IP fragments and + * application allocates 64K to receive this packet but + * this packet would almost double up because we allocate + * each 1.5K fragment in 4K and pass it up. As soon as the + * 64K limit hits kernel will start to drop rest of the + * fragments. Currently we fail the Filesndl-ht.scr script + * for UDP, hence this fix + */ + if ((adapter->iface_type == MWIFIEX_USB) && + (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) + skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); + priv->stats.rx_bytes += skb->len; priv->stats.rx_packets++; if (in_interrupt()) diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 5a7316c6f12..429a1dee2d2 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -1120,11 +1120,19 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, tx_info = MWIFIEX_SKB_TXCB(skb); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); - tx_param.next_pkt_len = - ((skb_next) ? skb_next->len + - sizeof(struct txpd) : 0); - ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, - &tx_param); + + if (adapter->iface_type == MWIFIEX_USB) { + adapter->data_sent = true; + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, + skb, NULL); + } else { + tx_param.next_pkt_len = + ((skb_next) ? skb_next->len + + sizeof(struct txpd) : 0); + ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, + skb, &tx_param); + } + switch (ret) { case -EBUSY: dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index b48674b577e..cf7bdc66f82 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1235,7 +1235,7 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh) { return priv->capture_beacon && ieee80211_is_beacon(wh->frame_control) && - !compare_ether_addr(wh->addr3, priv->capture_bssid); + ether_addr_equal(wh->addr3, priv->capture_bssid); } static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, @@ -5893,18 +5893,7 @@ static struct pci_driver mwl8k_driver = { .shutdown = __devexit_p(mwl8k_shutdown), }; -static int __init mwl8k_init(void) -{ - return pci_register_driver(&mwl8k_driver); -} - -static void __exit mwl8k_exit(void) -{ - pci_unregister_driver(&mwl8k_driver); -} - -module_init(mwl8k_init); -module_exit(mwl8k_exit); +module_pci_driver(mwl8k_driver); MODULE_DESCRIPTION(MWL8K_DESC); MODULE_VERSION(MWL8K_VERSION); diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c index 4df8cf64b56..400a3521764 100644 --- a/drivers/net/wireless/orinoco/fw.c +++ b/drivers/net/wireless/orinoco/fw.c @@ -379,11 +379,8 @@ void orinoco_cache_fw(struct orinoco_private *priv, int ap) void orinoco_uncache_fw(struct orinoco_private *priv) { - if (priv->cached_pri_fw) - release_firmware(priv->cached_pri_fw); - if (priv->cached_fw) - release_firmware(priv->cached_fw); - + release_firmware(priv->cached_pri_fw); + release_firmware(priv->cached_fw); priv->cached_pri_fw = NULL; priv->cached_fw = NULL; } diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index ee8af1f047c..7cffea795ad 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -796,11 +796,14 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) dev_err(pdev, "Cannot register device (%d).\n", err); return err; } + priv->registered = true; #ifdef CONFIG_P54_LEDS err = p54_init_leds(priv); - if (err) + if (err) { + p54_unregister_common(dev); return err; + } #endif /* CONFIG_P54_LEDS */ dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); @@ -840,7 +843,11 @@ void p54_unregister_common(struct ieee80211_hw *dev) p54_unregister_leds(priv); #endif /* CONFIG_P54_LEDS */ - ieee80211_unregister_hw(dev); + if (priv->registered) { + priv->registered = false; + ieee80211_unregister_hw(dev); + } + mutex_destroy(&priv->conf_mutex); mutex_destroy(&priv->eeprom_mutex); } diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h index 452fa3a64aa..40b401ed684 100644 --- a/drivers/net/wireless/p54/p54.h +++ b/drivers/net/wireless/p54/p54.h @@ -173,6 +173,7 @@ struct p54_common { struct sk_buff_head tx_pending; struct sk_buff_head tx_queue; struct mutex conf_mutex; + bool registered; /* memory management (as seen by the firmware) */ u32 rx_start; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 45df728183f..89318adc8c7 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -667,15 +667,4 @@ static struct pci_driver p54p_driver = { .driver.pm = P54P_PM_OPS, }; -static int __init p54p_init(void) -{ - return pci_register_driver(&p54p_driver); -} - -static void __exit p54p_exit(void) -{ - pci_unregister_driver(&p54p_driver); -} - -module_init(p54p_init); -module_exit(p54p_exit); +module_pci_driver(p54p_driver); diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index f4d28c39aac..e1eac830e2f 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -117,21 +117,18 @@ static const struct { u32 intf; enum p54u_hw_type type; const char *fw; - const char *fw_legacy; char hw[20]; } p54u_fwlist[__NUM_P54U_HWTYPES] = { { .type = P54U_NET2280, .intf = FW_LM86, .fw = "isl3886usb", - .fw_legacy = "isl3890usb", .hw = "ISL3886 + net2280", }, { .type = P54U_3887, .intf = FW_LM87, .fw = "isl3887usb", - .fw_legacy = "isl3887usb_bare", .hw = "ISL3887", }, }; @@ -208,6 +205,16 @@ static void p54u_free_urbs(struct ieee80211_hw *dev) usb_kill_anchored_urbs(&priv->submitted); } +static void p54u_stop(struct ieee80211_hw *dev) +{ + /* + * TODO: figure out how to reliably stop the 3887 and net2280 so + * the hardware is still usable next time we want to start it. + * until then, we just stop listening to the hardware.. + */ + p54u_free_urbs(dev); +} + static int p54u_init_urbs(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; @@ -257,6 +264,16 @@ static int p54u_init_urbs(struct ieee80211_hw *dev) return ret; } +static int p54u_open(struct ieee80211_hw *dev) +{ + /* + * TODO: Because we don't know how to reliably stop the 3887 and + * the isl3886+net2280, other than brutally cut off all + * communications. We have to reinitialize the urbs on every start. + */ + return p54u_init_urbs(dev); +} + static __le32 p54u_lm87_chksum(const __le32 *data, size_t length) { u32 chk = 0; @@ -836,70 +853,137 @@ fail: return err; } -static int p54u_load_firmware(struct ieee80211_hw *dev) +static int p54_find_type(struct p54u_priv *priv) { - struct p54u_priv *priv = dev->priv; - int err, i; - - BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES); + int i; for (i = 0; i < __NUM_P54U_HWTYPES; i++) if (p54u_fwlist[i].type == priv->hw_type) break; - if (i == __NUM_P54U_HWTYPES) return -EOPNOTSUPP; - err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev); - if (err) { - dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " - "(%d)!\n", p54u_fwlist[i].fw, err); + return i; +} - err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy, - &priv->udev->dev); - if (err) - return err; - } +static int p54u_start_ops(struct p54u_priv *priv) +{ + struct ieee80211_hw *dev = priv->common.hw; + int ret; - err = p54_parse_firmware(dev, priv->fw); - if (err) - goto out; + ret = p54_parse_firmware(dev, priv->fw); + if (ret) + goto err_out; + + ret = p54_find_type(priv); + if (ret < 0) + goto err_out; - if (priv->common.fw_interface != p54u_fwlist[i].intf) { + if (priv->common.fw_interface != p54u_fwlist[ret].intf) { dev_err(&priv->udev->dev, "wrong firmware, please get " "a firmware for \"%s\" and try again.\n", - p54u_fwlist[i].hw); - err = -EINVAL; + p54u_fwlist[ret].hw); + ret = -ENODEV; + goto err_out; } -out: - if (err) - release_firmware(priv->fw); + ret = priv->upload_fw(dev); + if (ret) + goto err_out; - return err; + ret = p54u_open(dev); + if (ret) + goto err_out; + + ret = p54_read_eeprom(dev); + if (ret) + goto err_stop; + + p54u_stop(dev); + + ret = p54_register_common(dev, &priv->udev->dev); + if (ret) + goto err_stop; + + return 0; + +err_stop: + p54u_stop(dev); + +err_out: + /* + * p54u_disconnect will do the rest of the + * cleanup + */ + return ret; } -static int p54u_open(struct ieee80211_hw *dev) +static void p54u_load_firmware_cb(const struct firmware *firmware, + void *context) { - struct p54u_priv *priv = dev->priv; + struct p54u_priv *priv = context; + struct usb_device *udev = priv->udev; int err; - err = p54u_init_urbs(dev); - if (err) { - return err; + complete(&priv->fw_wait_load); + if (firmware) { + priv->fw = firmware; + err = p54u_start_ops(priv); + } else { + err = -ENOENT; + dev_err(&udev->dev, "Firmware not found.\n"); } - priv->common.open = p54u_init_urbs; + if (err) { + struct device *parent = priv->udev->dev.parent; - return 0; + dev_err(&udev->dev, "failed to initialize device (%d)\n", err); + + if (parent) + device_lock(parent); + + device_release_driver(&udev->dev); + /* + * At this point p54u_disconnect has already freed + * the "priv" context. Do not use it anymore! + */ + priv = NULL; + + if (parent) + device_unlock(parent); + } + + usb_put_dev(udev); } -static void p54u_stop(struct ieee80211_hw *dev) +static int p54u_load_firmware(struct ieee80211_hw *dev, + struct usb_interface *intf) { - /* TODO: figure out how to reliably stop the 3887 and net2280 so - the hardware is still usable next time we want to start it. - until then, we just stop listening to the hardware.. */ - p54u_free_urbs(dev); + struct usb_device *udev = interface_to_usbdev(intf); + struct p54u_priv *priv = dev->priv; + struct device *device = &udev->dev; + int err, i; + + BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES); + + init_completion(&priv->fw_wait_load); + i = p54_find_type(priv); + if (i < 0) + return i; + + dev_info(&priv->udev->dev, "Loading firmware file %s\n", + p54u_fwlist[i].fw); + + usb_get_dev(udev); + err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw, + device, GFP_KERNEL, priv, + p54u_load_firmware_cb); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " + "(%d)!\n", p54u_fwlist[i].fw, err); + } + + return err; } static int __devinit p54u_probe(struct usb_interface *intf, @@ -969,33 +1053,7 @@ static int __devinit p54u_probe(struct usb_interface *intf, priv->common.tx = p54u_tx_net2280; priv->upload_fw = p54u_upload_firmware_net2280; } - err = p54u_load_firmware(dev); - if (err) - goto err_free_dev; - - err = priv->upload_fw(dev); - if (err) - goto err_free_fw; - - p54u_open(dev); - err = p54_read_eeprom(dev); - p54u_stop(dev); - if (err) - goto err_free_fw; - - err = p54_register_common(dev, &udev->dev); - if (err) - goto err_free_fw; - - return 0; - -err_free_fw: - release_firmware(priv->fw); - -err_free_dev: - p54_free_common(dev); - usb_set_intfdata(intf, NULL); - usb_put_dev(udev); + err = p54u_load_firmware(dev, intf); return err; } @@ -1007,9 +1065,10 @@ static void __devexit p54u_disconnect(struct usb_interface *intf) if (!dev) return; + priv = dev->priv; + wait_for_completion(&priv->fw_wait_load); p54_unregister_common(dev); - priv = dev->priv; usb_put_dev(interface_to_usbdev(intf)); release_firmware(priv->fw); p54_free_common(dev); @@ -1072,7 +1131,7 @@ static struct usb_driver p54u_driver = { .name = "p54usb", .id_table = p54u_table, .probe = p54u_probe, - .disconnect = p54u_disconnect, + .disconnect = __devexit_p(p54u_disconnect), .pre_reset = p54u_pre_reset, .post_reset = p54u_post_reset, #ifdef CONFIG_PM diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h index ed4034ade59..d273be7272b 100644 --- a/drivers/net/wireless/p54/p54usb.h +++ b/drivers/net/wireless/p54/p54usb.h @@ -143,6 +143,9 @@ struct p54u_priv { struct sk_buff_head rx_queue; struct usb_anchor submitted; const struct firmware *fw; + + /* asynchronous firmware callback */ + struct completion fw_wait_load; }; #endif /* P54USB_H */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index a08a6f0e4dd..82a1cac920b 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -308,7 +308,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb) return; /* only consider beacons from the associated BSSID */ - if (compare_ether_addr(hdr->addr3, priv->bssid)) + if (!ether_addr_equal(hdr->addr3, priv->bssid)) return; tim = p54_find_ie(skb, WLAN_EID_TIM); @@ -914,8 +914,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) txhdr->hw_queue = queue; txhdr->backlog = priv->tx_stats[queue].len - 1; memset(txhdr->durations, 0, sizeof(txhdr->durations)); - txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? - 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask; + txhdr->tx_antenna = 2 & priv->tx_diversity_mask; if (priv->rxhw == 5) { txhdr->longbow.cts_rate = cts_rate; txhdr->longbow.output_power = cpu_to_le16(priv->output_power); diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c index 9b796cae4af..a01606b36e0 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/prism54/oid_mgt.c @@ -693,8 +693,6 @@ mgt_update_addr(islpci_private *priv) return ret; } -#define VEC_SIZE(a) ARRAY_SIZE(a) - int mgt_commit(islpci_private *priv) { @@ -704,10 +702,10 @@ mgt_commit(islpci_private *priv) if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; - rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1)); + rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1)); if (priv->iw_mode != IW_MODE_MONITOR) - rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2)); + rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2)); u = OID_INL_MODE; rvalue |= mgt_commit_list(priv, &u, 1); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d66e2980bc2..b91d1bb30b4 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -88,49 +88,6 @@ module_param_named(workaround_interval, modparam_workaround_interval, MODULE_PARM_DESC(workaround_interval, "set stall workaround interval in msecs (0=disabled) (default: 0)"); - -/* various RNDIS OID defs */ -#define OID_GEN_LINK_SPEED cpu_to_le32(0x00010107) -#define OID_GEN_RNDIS_CONFIG_PARAMETER cpu_to_le32(0x0001021b) - -#define OID_GEN_XMIT_OK cpu_to_le32(0x00020101) -#define OID_GEN_RCV_OK cpu_to_le32(0x00020102) -#define OID_GEN_XMIT_ERROR cpu_to_le32(0x00020103) -#define OID_GEN_RCV_ERROR cpu_to_le32(0x00020104) -#define OID_GEN_RCV_NO_BUFFER cpu_to_le32(0x00020105) - -#define OID_802_3_CURRENT_ADDRESS cpu_to_le32(0x01010102) -#define OID_802_3_MULTICAST_LIST cpu_to_le32(0x01010103) -#define OID_802_3_MAXIMUM_LIST_SIZE cpu_to_le32(0x01010104) - -#define OID_802_11_BSSID cpu_to_le32(0x0d010101) -#define OID_802_11_SSID cpu_to_le32(0x0d010102) -#define OID_802_11_INFRASTRUCTURE_MODE cpu_to_le32(0x0d010108) -#define OID_802_11_ADD_WEP cpu_to_le32(0x0d010113) -#define OID_802_11_REMOVE_WEP cpu_to_le32(0x0d010114) -#define OID_802_11_DISASSOCIATE cpu_to_le32(0x0d010115) -#define OID_802_11_AUTHENTICATION_MODE cpu_to_le32(0x0d010118) -#define OID_802_11_PRIVACY_FILTER cpu_to_le32(0x0d010119) -#define OID_802_11_BSSID_LIST_SCAN cpu_to_le32(0x0d01011a) -#define OID_802_11_ENCRYPTION_STATUS cpu_to_le32(0x0d01011b) -#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d) -#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e) -#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f) -#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122) -#define OID_802_11_PMKID cpu_to_le32(0x0d010123) -#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203) -#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204) -#define OID_802_11_TX_POWER_LEVEL cpu_to_le32(0x0d010205) -#define OID_802_11_RSSI cpu_to_le32(0x0d010206) -#define OID_802_11_RSSI_TRIGGER cpu_to_le32(0x0d010207) -#define OID_802_11_FRAGMENTATION_THRESHOLD cpu_to_le32(0x0d010209) -#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a) -#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e) -#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211) -#define OID_802_11_POWER_MODE cpu_to_le32(0x0d010216) -#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217) - - /* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ #define WL_NOISE -96 /* typical noise level in dBm */ #define WL_SIGMAX -32 /* typical maximum signal level in dBm */ @@ -149,12 +106,6 @@ MODULE_PARM_DESC(workaround_interval, #define BCM4320_DEFAULT_TXPOWER_DBM_50 10 #define BCM4320_DEFAULT_TXPOWER_DBM_25 7 - -/* codes for "status" field of completion messages */ -#define RNDIS_STATUS_ADAPTER_NOT_READY cpu_to_le32(0xc0010011) -#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012) - - /* Known device types */ #define RNDIS_UNKNOWN 0 #define RNDIS_BCM4320A 1 @@ -515,7 +466,7 @@ struct rndis_wlan_private { int infra_mode; bool connected; u8 bssid[ETH_ALEN]; - __le32 current_command_oid; + u32 current_command_oid; /* encryption stuff */ u8 encr_tx_key_index; @@ -670,63 +621,63 @@ static int rndis_akm_suite_to_key_mgmt(u32 akm_suite) } #ifdef DEBUG -static const char *oid_to_string(__le32 oid) +static const char *oid_to_string(u32 oid) { switch (oid) { #define OID_STR(oid) case oid: return(#oid) /* from rndis_host.h */ - OID_STR(OID_802_3_PERMANENT_ADDRESS); - OID_STR(OID_GEN_MAXIMUM_FRAME_SIZE); - OID_STR(OID_GEN_CURRENT_PACKET_FILTER); - OID_STR(OID_GEN_PHYSICAL_MEDIUM); + OID_STR(RNDIS_OID_802_3_PERMANENT_ADDRESS); + OID_STR(RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE); + OID_STR(RNDIS_OID_GEN_CURRENT_PACKET_FILTER); + OID_STR(RNDIS_OID_GEN_PHYSICAL_MEDIUM); /* from rndis_wlan.c */ - OID_STR(OID_GEN_LINK_SPEED); - OID_STR(OID_GEN_RNDIS_CONFIG_PARAMETER); - - OID_STR(OID_GEN_XMIT_OK); - OID_STR(OID_GEN_RCV_OK); - OID_STR(OID_GEN_XMIT_ERROR); - OID_STR(OID_GEN_RCV_ERROR); - OID_STR(OID_GEN_RCV_NO_BUFFER); - - OID_STR(OID_802_3_CURRENT_ADDRESS); - OID_STR(OID_802_3_MULTICAST_LIST); - OID_STR(OID_802_3_MAXIMUM_LIST_SIZE); - - OID_STR(OID_802_11_BSSID); - OID_STR(OID_802_11_SSID); - OID_STR(OID_802_11_INFRASTRUCTURE_MODE); - OID_STR(OID_802_11_ADD_WEP); - OID_STR(OID_802_11_REMOVE_WEP); - OID_STR(OID_802_11_DISASSOCIATE); - OID_STR(OID_802_11_AUTHENTICATION_MODE); - OID_STR(OID_802_11_PRIVACY_FILTER); - OID_STR(OID_802_11_BSSID_LIST_SCAN); - OID_STR(OID_802_11_ENCRYPTION_STATUS); - OID_STR(OID_802_11_ADD_KEY); - OID_STR(OID_802_11_REMOVE_KEY); - OID_STR(OID_802_11_ASSOCIATION_INFORMATION); - OID_STR(OID_802_11_CAPABILITY); - OID_STR(OID_802_11_PMKID); - OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED); - OID_STR(OID_802_11_NETWORK_TYPE_IN_USE); - OID_STR(OID_802_11_TX_POWER_LEVEL); - OID_STR(OID_802_11_RSSI); - OID_STR(OID_802_11_RSSI_TRIGGER); - OID_STR(OID_802_11_FRAGMENTATION_THRESHOLD); - OID_STR(OID_802_11_RTS_THRESHOLD); - OID_STR(OID_802_11_SUPPORTED_RATES); - OID_STR(OID_802_11_CONFIGURATION); - OID_STR(OID_802_11_POWER_MODE); - OID_STR(OID_802_11_BSSID_LIST); + OID_STR(RNDIS_OID_GEN_LINK_SPEED); + OID_STR(RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER); + + OID_STR(RNDIS_OID_GEN_XMIT_OK); + OID_STR(RNDIS_OID_GEN_RCV_OK); + OID_STR(RNDIS_OID_GEN_XMIT_ERROR); + OID_STR(RNDIS_OID_GEN_RCV_ERROR); + OID_STR(RNDIS_OID_GEN_RCV_NO_BUFFER); + + OID_STR(RNDIS_OID_802_3_CURRENT_ADDRESS); + OID_STR(RNDIS_OID_802_3_MULTICAST_LIST); + OID_STR(RNDIS_OID_802_3_MAXIMUM_LIST_SIZE); + + OID_STR(RNDIS_OID_802_11_BSSID); + OID_STR(RNDIS_OID_802_11_SSID); + OID_STR(RNDIS_OID_802_11_INFRASTRUCTURE_MODE); + OID_STR(RNDIS_OID_802_11_ADD_WEP); + OID_STR(RNDIS_OID_802_11_REMOVE_WEP); + OID_STR(RNDIS_OID_802_11_DISASSOCIATE); + OID_STR(RNDIS_OID_802_11_AUTHENTICATION_MODE); + OID_STR(RNDIS_OID_802_11_PRIVACY_FILTER); + OID_STR(RNDIS_OID_802_11_BSSID_LIST_SCAN); + OID_STR(RNDIS_OID_802_11_ENCRYPTION_STATUS); + OID_STR(RNDIS_OID_802_11_ADD_KEY); + OID_STR(RNDIS_OID_802_11_REMOVE_KEY); + OID_STR(RNDIS_OID_802_11_ASSOCIATION_INFORMATION); + OID_STR(RNDIS_OID_802_11_CAPABILITY); + OID_STR(RNDIS_OID_802_11_PMKID); + OID_STR(RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED); + OID_STR(RNDIS_OID_802_11_NETWORK_TYPE_IN_USE); + OID_STR(RNDIS_OID_802_11_TX_POWER_LEVEL); + OID_STR(RNDIS_OID_802_11_RSSI); + OID_STR(RNDIS_OID_802_11_RSSI_TRIGGER); + OID_STR(RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD); + OID_STR(RNDIS_OID_802_11_RTS_THRESHOLD); + OID_STR(RNDIS_OID_802_11_SUPPORTED_RATES); + OID_STR(RNDIS_OID_802_11_CONFIGURATION); + OID_STR(RNDIS_OID_802_11_POWER_MODE); + OID_STR(RNDIS_OID_802_11_BSSID_LIST); #undef OID_STR } return "?"; } #else -static const char *oid_to_string(__le32 oid) +static const char *oid_to_string(u32 oid) { return "?"; } @@ -736,7 +687,7 @@ static const char *oid_to_string(__le32 oid) static int rndis_error_status(__le32 rndis_status) { int ret = -EINVAL; - switch (rndis_status) { + switch (le32_to_cpu(rndis_status)) { case RNDIS_STATUS_SUCCESS: ret = 0; break; @@ -755,7 +706,7 @@ static int rndis_error_status(__le32 rndis_status) return ret; } -static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) +static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); union { @@ -782,9 +733,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) mutex_lock(&priv->command_lock); memset(u.get, 0, sizeof *u.get); - u.get->msg_type = RNDIS_MSG_QUERY; + u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY); u.get->msg_len = cpu_to_le32(sizeof *u.get); - u.get->oid = oid; + u.get->oid = cpu_to_le32(oid); priv->current_command_oid = oid; ret = rndis_command(dev, u.header, buflen); @@ -839,7 +790,7 @@ exit_unlock: return ret; } -static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data, +static int rndis_set_oid(struct usbnet *dev, u32 oid, const void *data, int len) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); @@ -866,9 +817,9 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data, mutex_lock(&priv->command_lock); memset(u.set, 0, sizeof *u.set); - u.set->msg_type = RNDIS_MSG_SET; + u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET); u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); - u.set->oid = oid; + u.set->oid = cpu_to_le32(oid); u.set->len = cpu_to_le32(len); u.set->offset = cpu_to_le32(sizeof(*u.set) - 8); u.set->handle = cpu_to_le32(0); @@ -908,7 +859,7 @@ static int rndis_reset(struct usbnet *usbdev) reset = (void *)priv->command_buffer; memset(reset, 0, sizeof(*reset)); - reset->msg_type = RNDIS_MSG_RESET; + reset->msg_type = cpu_to_le32(RNDIS_MSG_RESET); reset->msg_len = cpu_to_le32(sizeof(*reset)); priv->current_command_oid = 0; ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE); @@ -994,7 +945,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param, } #endif - ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, + ret = rndis_set_oid(dev, RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER, infobuf, info_len); if (ret != 0) netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n", @@ -1031,9 +982,9 @@ static int rndis_start_bssid_list_scan(struct usbnet *usbdev) { __le32 tmp; - /* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */ + /* Note: RNDIS_OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */ tmp = cpu_to_le32(1); - return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, + return rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp, sizeof(tmp)); } @@ -1042,7 +993,8 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int ret; - ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); + ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_SSID, + ssid, sizeof(*ssid)); if (ret < 0) { netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret); return ret; @@ -1059,7 +1011,8 @@ static int set_bssid(struct usbnet *usbdev, const u8 *bssid) { int ret; - ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); + ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID, + bssid, ETH_ALEN); if (ret < 0) { netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n", bssid, ret); @@ -1083,7 +1036,8 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) int ret, len; len = ETH_ALEN; - ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID, + bssid, &len); if (ret != 0) memset(bssid, 0, ETH_ALEN); @@ -1094,8 +1048,9 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) static int get_association_info(struct usbnet *usbdev, struct ndis_80211_assoc_info *info, int len) { - return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, - info, &len); + return rndis_query_oid(usbdev, + RNDIS_OID_802_11_ASSOCIATION_INFORMATION, + info, &len); } static bool is_associated(struct usbnet *usbdev) @@ -1119,7 +1074,9 @@ static int disassociate(struct usbnet *usbdev, bool reset_ssid) int i, ret = 0; if (priv->radio_on) { - ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_DISASSOCIATE, + NULL, 0); if (ret == 0) { priv->radio_on = false; netdev_dbg(usbdev->net, "%s(): radio_on = false\n", @@ -1181,8 +1138,9 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version, return -ENOTSUPP; tmp = cpu_to_le32(auth_mode); - ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, - sizeof(tmp)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_AUTHENTICATION_MODE, + &tmp, sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n", ret); @@ -1208,8 +1166,9 @@ static int set_priv_filter(struct usbnet *usbdev) else tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL); - return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, - sizeof(tmp)); + return rndis_set_oid(usbdev, + RNDIS_OID_802_11_PRIVACY_FILTER, &tmp, + sizeof(tmp)); } static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) @@ -1234,8 +1193,9 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) encr_mode = NDIS_80211_ENCR_DISABLED; tmp = cpu_to_le32(encr_mode); - ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, - sizeof(tmp)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_ENCRYPTION_STATUS, &tmp, + sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n", ret); @@ -1255,8 +1215,9 @@ static int set_infra_mode(struct usbnet *usbdev, int mode) __func__, priv->infra_mode); tmp = cpu_to_le32(mode); - ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, - sizeof(tmp)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_INFRASTRUCTURE_MODE, + &tmp, sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n", ret); @@ -1282,8 +1243,9 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) rts_threshold = 2347; tmp = cpu_to_le32(rts_threshold); - return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, - sizeof(tmp)); + return rndis_set_oid(usbdev, + RNDIS_OID_802_11_RTS_THRESHOLD, + &tmp, sizeof(tmp)); } static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) @@ -1296,8 +1258,9 @@ static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) frag_threshold = 2346; tmp = cpu_to_le32(frag_threshold); - return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, - sizeof(tmp)); + return rndis_set_oid(usbdev, + RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD, + &tmp, sizeof(tmp)); } static void set_default_iw_params(struct usbnet *usbdev) @@ -1333,7 +1296,9 @@ static int set_channel(struct usbnet *usbdev, int channel) dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000; len = sizeof(config); - ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); + ret = rndis_query_oid(usbdev, + RNDIS_OID_802_11_CONFIGURATION, + &config, &len); if (ret < 0) { netdev_dbg(usbdev->net, "%s(): querying configuration failed\n", __func__); @@ -1341,8 +1306,9 @@ static int set_channel(struct usbnet *usbdev, int channel) } config.ds_config = cpu_to_le32(dsconfig); - ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, - sizeof(config)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_CONFIGURATION, + &config, sizeof(config)); netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret); @@ -1359,8 +1325,10 @@ static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev, /* Get channel and beacon interval */ len = sizeof(config); - ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); - netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", + ret = rndis_query_oid(usbdev, + RNDIS_OID_802_11_CONFIGURATION, + &config, &len); + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_CONFIGURATION -> %d\n", __func__, ret); if (ret < 0) return NULL; @@ -1413,8 +1381,9 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, ret); } - ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, - sizeof(ndis_key)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_ADD_WEP, &ndis_key, + sizeof(ndis_key)); if (ret != 0) { netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n", index + 1, ret); @@ -1504,9 +1473,10 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, get_bssid(usbdev, ndis_key.bssid); } - ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, - le32_to_cpu(ndis_key.size)); - netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n", + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_ADD_KEY, &ndis_key, + le32_to_cpu(ndis_key.size)); + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_ADD_KEY -> %08X\n", __func__, ret); if (ret != 0) return ret; @@ -1594,14 +1564,16 @@ static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid) memset(remove_key.bssid, 0xff, sizeof(remove_key.bssid)); - ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, - sizeof(remove_key)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_REMOVE_KEY, + &remove_key, sizeof(remove_key)); if (ret != 0) return ret; } else { keyindex = cpu_to_le32(index); - ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, - sizeof(keyindex)); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_11_REMOVE_WEP, + &keyindex, sizeof(keyindex)); if (ret != 0) { netdev_warn(usbdev->net, "removing encryption key %d failed (%08X)\n", @@ -1626,14 +1598,14 @@ static void set_multicast_list(struct usbnet *usbdev) char *mc_addrs = NULL; int mc_count; - basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED | - RNDIS_PACKET_TYPE_BROADCAST; + basefilter = filter = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | + RNDIS_PACKET_TYPE_BROADCAST); if (usbdev->net->flags & IFF_PROMISC) { - filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | - RNDIS_PACKET_TYPE_ALL_LOCAL; + filter |= cpu_to_le32(RNDIS_PACKET_TYPE_PROMISCUOUS | + RNDIS_PACKET_TYPE_ALL_LOCAL); } else if (usbdev->net->flags & IFF_ALLMULTI) { - filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); } if (filter != basefilter) @@ -1646,7 +1618,7 @@ static void set_multicast_list(struct usbnet *usbdev) netif_addr_lock_bh(usbdev->net); mc_count = netdev_mc_count(usbdev->net); if (mc_count > priv->multicast_size) { - filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); } else if (mc_count) { int i = 0; @@ -1669,27 +1641,28 @@ static void set_multicast_list(struct usbnet *usbdev) goto set_filter; if (mc_count) { - ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs, - mc_count * ETH_ALEN); + ret = rndis_set_oid(usbdev, + RNDIS_OID_802_3_MULTICAST_LIST, + mc_addrs, mc_count * ETH_ALEN); kfree(mc_addrs); if (ret == 0) - filter |= RNDIS_PACKET_TYPE_MULTICAST; + filter |= cpu_to_le32(RNDIS_PACKET_TYPE_MULTICAST); else - filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); - netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", + netdev_dbg(usbdev->net, "RNDIS_OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", mc_count, priv->multicast_size, ret); } set_filter: - ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + ret = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter, sizeof(filter)); if (ret < 0) { netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n", le32_to_cpu(filter)); } - netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", + netdev_dbg(usbdev->net, "RNDIS_OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", le32_to_cpu(filter), ret); } @@ -1748,9 +1721,10 @@ static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev) pmkids->length = cpu_to_le32(len); pmkids->bssid_info_count = cpu_to_le32(max_pmkids); - ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_PMKID, + pmkids, &len); if (ret < 0) { - netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)" + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d)" " -> %d\n", __func__, len, max_pmkids, ret); kfree(pmkids); @@ -1776,10 +1750,10 @@ static int set_device_pmkids(struct usbnet *usbdev, debug_print_pmkids(usbdev, pmkids, __func__); - ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids, - le32_to_cpu(pmkids->length)); + ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids, + le32_to_cpu(pmkids->length)); if (ret < 0) { - netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d" + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d) -> %d" "\n", __func__, len, num_pmkids, ret); } @@ -1801,8 +1775,8 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev, count = max_pmkids; for (i = 0; i < count; i++) - if (!compare_ether_addr(pmkids->bssid_info[i].bssid, - pmksa->bssid)) + if (ether_addr_equal(pmkids->bssid_info[i].bssid, + pmksa->bssid)) break; /* pmkid not found */ @@ -1843,8 +1817,8 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, /* update with new pmkid */ for (i = 0; i < count; i++) { - if (compare_ether_addr(pmkids->bssid_info[i].bssid, - pmksa->bssid)) + if (!ether_addr_equal(pmkids->bssid_info[i].bssid, + pmksa->bssid)) continue; memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid, @@ -2113,7 +2087,8 @@ resize_buf: * resizing until it won't get any bigger. */ new_len = len; - ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &new_len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST, + buf, &new_len); if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex)) goto out; @@ -2139,7 +2114,7 @@ resize_buf: while (check_bssid_list_item(bssid, bssid_len, buf, len)) { if (rndis_bss_info_update(usbdev, bssid) && match_bssid && matched) { - if (compare_ether_addr(bssid->mac, match_bssid)) + if (!ether_addr_equal(bssid->mac, match_bssid)) *matched = true; } @@ -2511,14 +2486,15 @@ static void rndis_fill_station_info(struct usbnet *usbdev, memset(sinfo, 0, sizeof(*sinfo)); len = sizeof(linkspeed); - ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &linkspeed, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len); if (ret == 0) { sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; sinfo->filled |= STATION_INFO_TX_BITRATE; } len = sizeof(rssi); - ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, + &rssi, &len); if (ret == 0) { sinfo->signal = level_to_qual(le32_to_cpu(rssi)); sinfo->filled |= STATION_INFO_SIGNAL; @@ -2531,7 +2507,7 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; - if (compare_ether_addr(priv->bssid, mac)) + if (!ether_addr_equal(priv->bssid, mac)) return -ENOENT; rndis_fill_station_info(usbdev, sinfo); @@ -2624,7 +2600,8 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) pmkid.length = cpu_to_le32(sizeof(pmkid)); pmkid.bssid_info_count = cpu_to_le32(0); - return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid)); + return rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, + &pmkid, sizeof(pmkid)); } static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, @@ -2654,9 +2631,10 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, priv->power_mode = power_mode; mode = cpu_to_le32(power_mode); - ret = rndis_set_oid(usbdev, OID_802_11_POWER_MODE, &mode, sizeof(mode)); + ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_POWER_MODE, + &mode, sizeof(mode)); - netdev_dbg(usbdev->net, "%s(): OID_802_11_POWER_MODE -> %d\n", + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_POWER_MODE -> %d\n", __func__, ret); return ret; @@ -2693,10 +2671,11 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, /* Get signal quality, in case of error use rssi=0 and ignore error. */ len = sizeof(rssi); rssi = 0; - ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, + &rssi, &len); signal = level_to_qual(le32_to_cpu(rssi)); - netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, " + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_RSSI -> %d, " "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); @@ -2720,8 +2699,9 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, /* Get SSID, in case of error, use zero length SSID and ignore error. */ len = sizeof(ssid); memset(&ssid, 0, sizeof(ssid)); - ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len); - netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: " + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_SSID, + &ssid, &len); + netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_SSID -> %d, len: %d, ssid: " "'%.32s'\n", __func__, ret, le32_to_cpu(ssid.length), ssid.essid); @@ -2843,7 +2823,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) * NDIS spec says: "If the device is associated, but the associated * BSSID is not in its BSSID scan list, then the driver must add an * entry for the BSSID at the end of the data that it returns in - * response to query of OID_802_11_BSSID_LIST." + * response to query of RNDIS_OID_802_11_BSSID_LIST." * * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a. */ @@ -3095,15 +3075,15 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen) struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct rndis_indicate *msg = ind; - switch (msg->status) { + switch (le32_to_cpu(msg->status)) { case RNDIS_STATUS_MEDIA_CONNECT: - if (priv->current_command_oid == OID_802_11_ADD_KEY) { - /* OID_802_11_ADD_KEY causes sometimes extra + if (priv->current_command_oid == RNDIS_OID_802_11_ADD_KEY) { + /* RNDIS_OID_802_11_ADD_KEY causes sometimes extra * "media connect" indications which confuses driver * and userspace to think that device is * roaming/reassociating when it isn't. */ - netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n"); + netdev_dbg(usbdev->net, "ignored RNDIS_OID_802_11_ADD_KEY triggered 'media connect'\n"); return; } @@ -3148,8 +3128,9 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy) /* determine supported modes */ len = sizeof(networks_supported); - retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED, - &networks_supported, &len); + retval = rndis_query_oid(usbdev, + RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED, + &networks_supported, &len); if (retval >= 0) { n = le32_to_cpu(networks_supported.num_items); if (n > 8) @@ -3173,9 +3154,11 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy) /* get device 802.11 capabilities, number of PMKIDs */ caps = (struct ndis_80211_capability *)caps_buf; len = sizeof(caps_buf); - retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len); + retval = rndis_query_oid(usbdev, + RNDIS_OID_802_11_CAPABILITY, + caps, &len); if (retval >= 0) { - netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, " + netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, " "ver %d, pmkids %d, auth-encr-pairs %d\n", le32_to_cpu(caps->length), le32_to_cpu(caps->version), @@ -3247,13 +3230,14 @@ static void rndis_device_poller(struct work_struct *work) } len = sizeof(rssi); - ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, + &rssi, &len); if (ret == 0) { priv->last_qual = level_to_qual(le32_to_cpu(rssi)); rndis_do_cqm(usbdev, le32_to_cpu(rssi)); } - netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", + netdev_dbg(usbdev->net, "dev-poller: RNDIS_OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); /* Workaround transfer stalls on poor quality links. @@ -3275,15 +3259,18 @@ static void rndis_device_poller(struct work_struct *work) * working. */ tmp = cpu_to_le32(1); - rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, - sizeof(tmp)); + rndis_set_oid(usbdev, + RNDIS_OID_802_11_BSSID_LIST_SCAN, + &tmp, sizeof(tmp)); len = CONTROL_BUFFER_SIZE; buf = kmalloc(len, GFP_KERNEL); if (!buf) goto end; - rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); + rndis_query_oid(usbdev, + RNDIS_OID_802_11_BSSID_LIST, + buf, &len); kfree(buf); } @@ -3465,13 +3452,15 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) */ usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; - tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; - retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, - sizeof(tmp)); + tmp = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST); + retval = rndis_set_oid(usbdev, + RNDIS_OID_GEN_CURRENT_PACKET_FILTER, + &tmp, sizeof(tmp)); len = sizeof(tmp); - retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp, - &len); + retval = rndis_query_oid(usbdev, + RNDIS_OID_802_3_MAXIMUM_LIST_SIZE, + &tmp, &len); priv->multicast_size = le32_to_cpu(tmp); if (retval < 0 || priv->multicast_size < 0) priv->multicast_size = 0; @@ -3601,7 +3590,7 @@ static int rndis_wlan_stop(struct usbnet *usbdev) /* Set current packet filter zero to block receiving data packets from device. */ filter = 0; - rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter, sizeof(filter)); return retval; diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 3a6b40239bc..5e6b5014316 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1828,15 +1828,4 @@ static struct pci_driver rt2400pci_driver = { .resume = rt2x00pci_resume, }; -static int __init rt2400pci_init(void) -{ - return pci_register_driver(&rt2400pci_driver); -} - -static void __exit rt2400pci_exit(void) -{ - pci_unregister_driver(&rt2400pci_driver); -} - -module_init(rt2400pci_init); -module_exit(rt2400pci_exit); +module_pci_driver(rt2400pci_driver); diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index dcc0e1fcca7..136b849f11b 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -2119,15 +2119,4 @@ static struct pci_driver rt2500pci_driver = { .resume = rt2x00pci_resume, }; -static int __init rt2500pci_init(void) -{ - return pci_register_driver(&rt2500pci_driver); -} - -static void __exit rt2500pci_exit(void) -{ - pci_unregister_driver(&rt2500pci_driver); -} - -module_init(rt2500pci_init); -module_exit(rt2500pci_exit); +module_pci_driver(rt2500pci_driver); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 1de9c752c88..c88fd3e6109 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1912,7 +1912,7 @@ static struct usb_device_id rt2500usb_device_table[] = { { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ - { USB_DEVICE(0x050d, 0x7050) }, + { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 063bfa8b91f..9348521e083 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -83,6 +83,7 @@ #define REV_RT3090E 0x0211 #define REV_RT3390E 0x0211 #define REV_RT5390F 0x0502 +#define REV_RT5390R 0x1502 /* * Signal information. @@ -98,9 +99,11 @@ #define EEPROM_BASE 0x0000 #define EEPROM_SIZE 0x0110 #define BBP_BASE 0x0000 -#define BBP_SIZE 0x0080 +#define BBP_SIZE 0x00ff #define RF_BASE 0x0004 #define RF_SIZE 0x0010 +#define RFCSR_BASE 0x0000 +#define RFCSR_SIZE 0x0040 /* * Number of TX queues. diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 6c0a12ea6a1..dfc90d34be6 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -290,11 +290,25 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) msleep(10); } - ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); + ERROR(rt2x00dev, "WPDMA TX/RX busy [0x%08x].\n", reg); return -EACCES; } EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); +void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); +} +EXPORT_SYMBOL_GPL(rt2800_disable_wpdma); + static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) { u16 fw_crc; @@ -412,6 +426,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); } + rt2800_disable_wpdma(rt2x00dev); + /* * Write firmware to the device. */ @@ -436,10 +452,7 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, * Disable DMA, will be reenabled later when enabling * the radio. */ - rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + rt2800_disable_wpdma(rt2x00dev); /* * Initialize firmware. @@ -823,6 +836,13 @@ const struct rt2x00debug rt2800_rt2x00debug = { .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, + .rfcsr = { + .read = rt2800_rfcsr_read, + .write = rt2800_rfcsr_write, + .word_base = RFCSR_BASE, + .word_size = sizeof(u8), + .word_count = RFCSR_SIZE / sizeof(u8), + }, }; EXPORT_SYMBOL_GPL(rt2800_rt2x00debug); #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ @@ -2717,13 +2737,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) unsigned int i; int ret; - rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); - rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + rt2800_disable_wpdma(rt2x00dev); ret = rt2800_drv_init_registers(rt2x00dev); if (ret) @@ -3349,6 +3363,13 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); } + /* This chip has hardware antenna diversity*/ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ + } + rt2800_bbp_read(rt2x00dev, 152, &value); if (ant == 0) rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); @@ -3997,10 +4018,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; - rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + rt2800_disable_wpdma(rt2x00dev); /* Wait for DMA, ignore error */ rt2800_wait_wpdma_ready(rt2x00dev); @@ -4287,6 +4305,11 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00dev->default_ant.rx = ANTENNA_A; } + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */ + rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */ + } + /* * Determine external LNA informations. */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h index 419e36cb06b..18a0b67b4c6 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -208,5 +208,6 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 buf_size); int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); +void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); #endif /* RT2800LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 0397bbf0ce0..931331d9521 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -361,7 +361,6 @@ static void rt2800pci_clear_entry(struct queue_entry *entry) static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_pci *entry_priv; - u32 reg; /* * Initialize registers. @@ -394,6 +393,16 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0); + rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0); + rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0); + rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0); + rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0); + + rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0); + rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0); + rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0); + rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0); + entry_priv = rt2x00dev->rx->entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, @@ -402,14 +411,7 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) rt2x00dev->rx[0].limit - 1); rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0); - /* - * Enable global DMA configuration - */ - rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); - rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + rt2800_disable_wpdma(rt2x00dev); rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0); @@ -504,8 +506,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) { int retval; - if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || - rt2800pci_init_queues(rt2x00dev))) + /* Wait for DMA, ignore error until we initialize queues. */ + rt2800_wait_wpdma_ready(rt2x00dev); + + if (unlikely(rt2800pci_init_queues(rt2x00dev))) return -EIO; retval = rt2800_enable_radio(rt2x00dev); @@ -1184,7 +1188,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3593) }, #endif #ifdef CONFIG_RT2800PCI_RT53XX + { PCI_DEVICE(0x1814, 0x5362) }, { PCI_DEVICE(0x1814, 0x5390) }, + { PCI_DEVICE(0x1814, 0x5392) }, { PCI_DEVICE(0x1814, 0x539a) }, { PCI_DEVICE(0x1814, 0x539f) }, #endif diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 001735f7a66..5601302d09a 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x1482, 0x3c09) }, /* AirTies */ { USB_DEVICE(0x1eda, 0x2012) }, + { USB_DEVICE(0x1eda, 0x2210) }, { USB_DEVICE(0x1eda, 0x2310) }, /* Allwin */ { USB_DEVICE(0x8516, 0x2070) }, @@ -991,6 +992,7 @@ static struct usb_device_id rt2800usb_device_table[] = { /* DVICO */ { USB_DEVICE(0x0fe9, 0xb307) }, /* Edimax */ + { USB_DEVICE(0x7392, 0x4085) }, { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, { USB_DEVICE(0x7392, 0x7718) }, @@ -1066,6 +1068,7 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Philips */ { USB_DEVICE(0x0471, 0x200f) }, /* Planex */ + { USB_DEVICE(0x2019, 0x5201) }, { USB_DEVICE(0x2019, 0xab25) }, { USB_DEVICE(0x2019, 0xed06) }, /* Quanta */ @@ -1134,6 +1137,10 @@ static struct usb_device_id rt2800usb_device_table[] = { #ifdef CONFIG_RT2800USB_RT33XX /* Belkin */ { USB_DEVICE(0x050d, 0x945b) }, + /* Panasonic */ + { USB_DEVICE(0x083a, 0xb511) }, + /* Philips */ + { USB_DEVICE(0x0471, 0x20dd) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3370) }, { USB_DEVICE(0x148f, 0x8070) }, @@ -1145,6 +1152,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x8516, 0x3572) }, /* Askey */ { USB_DEVICE(0x1690, 0x0744) }, + { USB_DEVICE(0x1690, 0x0761) }, + { USB_DEVICE(0x1690, 0x0764) }, /* Cisco */ { USB_DEVICE(0x167b, 0x4001) }, /* EnGenius */ @@ -1159,20 +1168,25 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Sitecom */ { USB_DEVICE(0x0df6, 0x0041) }, { USB_DEVICE(0x0df6, 0x0062) }, + { USB_DEVICE(0x0df6, 0x0065) }, + { USB_DEVICE(0x0df6, 0x0066) }, + { USB_DEVICE(0x0df6, 0x0068) }, /* Toshiba */ { USB_DEVICE(0x0930, 0x0a07) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0284) }, #endif #ifdef CONFIG_RT2800USB_RT53XX - /* Alpha */ - { USB_DEVICE(0x2001, 0x3c15) }, - { USB_DEVICE(0x2001, 0x3c19) }, /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a12) }, /* Azurewave */ { USB_DEVICE(0x13d3, 0x3329) }, { USB_DEVICE(0x13d3, 0x3365) }, + /* D-Link */ + { USB_DEVICE(0x2001, 0x3c15) }, + { USB_DEVICE(0x2001, 0x3c19) }, + { USB_DEVICE(0x2001, 0x3c1c) }, + { USB_DEVICE(0x2001, 0x3c1d) }, /* LG innotek */ { USB_DEVICE(0x043e, 0x7a22) }, /* Panasonic */ @@ -1224,12 +1238,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c0b) }, { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3c17) }, - /* Edimax */ - { USB_DEVICE(0x7392, 0x4085) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, - /* Fujitsu Stylistic 550 */ - { USB_DEVICE(0x1690, 0x0761) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ @@ -1250,7 +1260,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x05a6, 0x0101) }, { USB_DEVICE(0x1d4d, 0x0010) }, /* Planex */ - { USB_DEVICE(0x2019, 0x5201) }, { USB_DEVICE(0x2019, 0xab24) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6259) }, diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 471f87cab4a..ca36cccaba3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -692,6 +692,8 @@ enum rt2x00_state_flags { */ CONFIG_CHANNEL_HT40, CONFIG_POWERSAVING, + CONFIG_HT_DISABLED, + CONFIG_QOS_DISABLED, /* * Mark we currently are sequentially reading TX_STA_FIFO register @@ -1280,7 +1282,7 @@ void rt2x00lib_dmadone(struct queue_entry *entry); void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc); void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status); -void rt2x00lib_rxdone(struct queue_entry *entry); +void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp); /* * mac80211 handlers. diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index 293676bfa57..e7361d913e8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -217,6 +217,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, libconf.conf = conf; if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { + if (!conf_is_ht(conf)) + set_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags); + else + clear_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags); + if (conf_is_ht40(conf)) { set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); hw_value = rt2x00ht_center_channel(rt2x00dev, conf); diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index 78787fcc919..3bb8cafbac5 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -70,6 +70,7 @@ struct rt2x00debug_intf { * - eeprom offset/value files * - bbp offset/value files * - rf offset/value files + * - rfcsr offset/value files * - queue folder * - frame dump file * - queue stats file @@ -89,6 +90,8 @@ struct rt2x00debug_intf { struct dentry *bbp_val_entry; struct dentry *rf_off_entry; struct dentry *rf_val_entry; + struct dentry *rfcsr_off_entry; + struct dentry *rfcsr_val_entry; struct dentry *queue_folder; struct dentry *queue_frame_dump_entry; struct dentry *queue_stats_entry; @@ -131,6 +134,7 @@ struct rt2x00debug_intf { unsigned int offset_eeprom; unsigned int offset_bbp; unsigned int offset_rf; + unsigned int offset_rfcsr; }; void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, @@ -525,6 +529,7 @@ RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); +RT2X00DEBUGFS_OPS(rfcsr, "0x%.2x\n", u8); static ssize_t rt2x00debug_read_dev_flags(struct file *file, char __user *buf, @@ -614,7 +619,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, const struct rt2x00debug *debug = intf->debug; char *data; - data = kzalloc(8 * MAX_LINE_LENGTH, GFP_KERNEL); + data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL); if (!data) return NULL; @@ -624,22 +629,22 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev); data += sprintf(data, "\n"); data += sprintf(data, "register\tbase\twords\twordsize\n"); - data += sprintf(data, "csr\t%d\t%d\t%d\n", - debug->csr.word_base, - debug->csr.word_count, - debug->csr.word_size); - data += sprintf(data, "eeprom\t%d\t%d\t%d\n", - debug->eeprom.word_base, - debug->eeprom.word_count, - debug->eeprom.word_size); - data += sprintf(data, "bbp\t%d\t%d\t%d\n", - debug->bbp.word_base, - debug->bbp.word_count, - debug->bbp.word_size); - data += sprintf(data, "rf\t%d\t%d\t%d\n", - debug->rf.word_base, - debug->rf.word_count, - debug->rf.word_size); +#define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \ +{ \ + if(debug->__name.read) \ + data += sprintf(data, __stringify(__name) \ + "\t%d\t%d\t%d\n", \ + debug->__name.word_base, \ + debug->__name.word_count, \ + debug->__name.word_size); \ +} + RT2X00DEBUGFS_SPRINTF_REGISTER(csr); + RT2X00DEBUGFS_SPRINTF_REGISTER(eeprom); + RT2X00DEBUGFS_SPRINTF_REGISTER(bbp); + RT2X00DEBUGFS_SPRINTF_REGISTER(rf); + RT2X00DEBUGFS_SPRINTF_REGISTER(rfcsr); +#undef RT2X00DEBUGFS_SPRINTF_REGISTER + blob->size = strlen(blob->data); return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); @@ -694,31 +699,34 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) if (IS_ERR(intf->register_folder) || !intf->register_folder) goto exit; -#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ -({ \ - (__intf)->__name##_off_entry = \ - debugfs_create_u32(__stringify(__name) "_offset", \ - S_IRUSR | S_IWUSR, \ - (__intf)->register_folder, \ - &(__intf)->offset_##__name); \ - if (IS_ERR((__intf)->__name##_off_entry) \ - || !(__intf)->__name##_off_entry) \ - goto exit; \ - \ - (__intf)->__name##_val_entry = \ - debugfs_create_file(__stringify(__name) "_value", \ - S_IRUSR | S_IWUSR, \ - (__intf)->register_folder, \ - (__intf), &rt2x00debug_fop_##__name);\ - if (IS_ERR((__intf)->__name##_val_entry) \ - || !(__intf)->__name##_val_entry) \ - goto exit; \ +#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ +({ \ + if(debug->__name.read) { \ + (__intf)->__name##_off_entry = \ + debugfs_create_u32(__stringify(__name) "_offset", \ + S_IRUSR | S_IWUSR, \ + (__intf)->register_folder, \ + &(__intf)->offset_##__name); \ + if (IS_ERR((__intf)->__name##_off_entry) \ + || !(__intf)->__name##_off_entry) \ + goto exit; \ + \ + (__intf)->__name##_val_entry = \ + debugfs_create_file(__stringify(__name) "_value", \ + S_IRUSR | S_IWUSR, \ + (__intf)->register_folder, \ + (__intf), &rt2x00debug_fop_##__name); \ + if (IS_ERR((__intf)->__name##_val_entry) \ + || !(__intf)->__name##_val_entry) \ + goto exit; \ + } \ }) RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf); + RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rfcsr); #undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY @@ -770,6 +778,8 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) debugfs_remove(intf->queue_stats_entry); debugfs_remove(intf->queue_frame_dump_entry); debugfs_remove(intf->queue_folder); + debugfs_remove(intf->rfcsr_val_entry); + debugfs_remove(intf->rfcsr_off_entry); debugfs_remove(intf->rf_val_entry); debugfs_remove(intf->rf_off_entry); debugfs_remove(intf->bbp_val_entry); diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h index fa11409cb5c..e11d39bdfef 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.h +++ b/drivers/net/wireless/rt2x00/rt2x00debug.h @@ -65,6 +65,7 @@ struct rt2x00debug { RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16); RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8); RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32); + RT2X00DEBUGFS_REGISTER_ENTRY(rfcsr, u8); }; #endif /* RT2X00DEBUG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 90cc5e77265..e5404e57625 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -391,9 +391,10 @@ void rt2x00lib_txdone(struct queue_entry *entry, tx_info->flags |= IEEE80211_TX_STAT_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; - - if (!success) - tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + /* + * TODO: Need to tear down BA session here + * if not successful. + */ } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { @@ -587,7 +588,7 @@ static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, return 0; } -void rt2x00lib_rxdone(struct queue_entry *entry) +void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct rxdone_entry_desc rxdesc; @@ -607,7 +608,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry) * Allocate a new sk_buffer. If no new buffer available, drop the * received frame and reuse the existing buffer. */ - skb = rt2x00queue_alloc_rxskb(entry); + skb = rt2x00queue_alloc_rxskb(entry, gfp); if (!skb) goto submit_entry; diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c index ca585e34d00..8679d781a26 100644 --- a/drivers/net/wireless/rt2x00/rt2x00leds.c +++ b/drivers/net/wireless/rt2x00/rt2x00leds.c @@ -124,17 +124,15 @@ static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) { - char dev_name[16]; - char name[32]; + char name[36]; int retval; unsigned long on_period; unsigned long off_period; - - snprintf(dev_name, sizeof(dev_name), "%s-%s", - rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy)); + const char *phy_name = wiphy_name(rt2x00dev->hw->wiphy); if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { - snprintf(name, sizeof(name), "%s::radio", dev_name); + snprintf(name, sizeof(name), "%s-%s::radio", + rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_radio, @@ -144,7 +142,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) } if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { - snprintf(name, sizeof(name), "%s::assoc", dev_name); + snprintf(name, sizeof(name), "%s-%s::assoc", + rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_assoc, @@ -154,7 +153,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) } if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { - snprintf(name, sizeof(name), "%s::quality", dev_name); + snprintf(name, sizeof(name), "%s-%s::quality", + rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_qual, diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 78bd43b8961..a0935987fa3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -103,7 +103,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes. * @entry: The entry for which the skb will be applicable. */ -struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry); +struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp); /** * rt2x00queue_free_skb - free a skb diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 2df2eb6d3e0..b49773ef72f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -709,9 +709,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); + + clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); } /* + * Check for access point which do not support 802.11e . We have to + * generate data frames sequence number in S/W for such AP, because + * of H/W bug. + */ + if (changes & BSS_CHANGED_QOS && !bss_conf->qos) + set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); + + /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. */ diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 17148bb2442..0a4653a92ca 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -92,7 +92,7 @@ bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) /* * Send the frame to rt2x00lib for further processing. */ - rt2x00lib_rxdone(entry); + rt2x00lib_rxdone(entry, GFP_ATOMIC); } return !max_rx; diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 9b1b2b7a780..4c662eccf53 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -33,7 +33,7 @@ #include "rt2x00.h" #include "rt2x00lib.h" -struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) +struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct sk_buff *skb; @@ -68,7 +68,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) /* * Allocate skbuffer. */ - skb = dev_alloc_skb(frame_size + head_size + tail_size); + skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); if (!skb) return NULL; @@ -213,8 +213,19 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); - if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) - return; + if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { + /* + * rt2800 has a H/W (or F/W) bug, device incorrectly increase + * seqno on retransmited data (non-QOS) frames. To workaround + * the problem let's generate seqno in software if QOS is + * disabled. + */ + if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) + __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); + else + /* H/W will generate sequence number */ + return; + } /* * The hardware is not able to insert a sequence number. Assign a @@ -320,14 +331,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, txdesc->u.ht.wcid = sta_priv->wcid; } - txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ - - /* - * Only one STBC stream is supported for now. - */ - if (tx_info->flags & IEEE80211_TX_CTL_STBC) - txdesc->u.ht.stbc = 1; - /* * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the * mcs rate to be used @@ -351,6 +354,24 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, txdesc->u.ht.mcs |= 0x08; } + if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { + if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) + txdesc->u.ht.txop = TXOP_SIFS; + else + txdesc->u.ht.txop = TXOP_BACKOFF; + + /* Left zero on all other settings. */ + return; + } + + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ + + /* + * Only one STBC stream is supported for now. + */ + if (tx_info->flags & IEEE80211_TX_CTL_STBC) + txdesc->u.ht.stbc = 1; + /* * This frame is eligible for an AMPDU, however, don't aggregate * frames that are intended to probe a specific tx rate. @@ -1142,7 +1163,7 @@ static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) struct sk_buff *skb; for (i = 0; i < queue->limit; i++) { - skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); + skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); if (!skb) return -ENOMEM; queue->entries[i].skb = skb; diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 66094eb21b6..d357d1ed92f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -358,7 +358,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work) /* * Send the frame to rt2x00lib for further processing. */ - rt2x00lib_rxdone(entry); + rt2x00lib_rxdone(entry, GFP_KERNEL); } } diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index e0c6d117429..ee22bd74579 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -3092,15 +3092,4 @@ static struct pci_driver rt61pci_driver = { .resume = rt2x00pci_resume, }; -static int __init rt61pci_init(void) -{ - return pci_register_driver(&rt61pci_driver); -} - -static void __exit rt61pci_exit(void) -{ - pci_unregister_driver(&rt61pci_driver); -} - -module_init(rt61pci_init); -module_exit(rt61pci_exit); +module_pci_driver(rt61pci_driver); diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index e477a964081..155136691a3 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2412,6 +2412,7 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x0b05, 0x1723) }, { USB_DEVICE(0x0b05, 0x1724) }, /* Belkin */ + { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */ { USB_DEVICE(0x050d, 0x705a) }, { USB_DEVICE(0x050d, 0x905b) }, { USB_DEVICE(0x050d, 0x905c) }, diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index 2f14a5fb0cb..2bebcb71a1e 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -1173,15 +1173,4 @@ static struct pci_driver rtl8180_driver = { #endif /* CONFIG_PM */ }; -static int __init rtl8180_init(void) -{ - return pci_register_driver(&rtl8180_driver); -} - -static void __exit rtl8180_exit(void) -{ - pci_unregister_driver(&rtl8180_driver); -} - -module_init(rtl8180_init); -module_exit(rtl8180_exit); +module_pci_driver(rtl8180_driver); diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index cf53ac9d6f2..d8114962b0c 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -294,6 +294,7 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8); hdr->tx_duration = ieee80211_generic_frame_duration(dev, priv->vif, + info->band, skb->len, txrate); buf = hdr; diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index e54488db0e1..f4c852c6749 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1460,7 +1460,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len) return; /* and only beacons from the associated BSSID, please */ - if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) + if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) return; if (rtl_find_221_ie(hw, data, len)) diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c index 5c7d57947d2..3d8cc4a0c86 100644 --- a/drivers/net/wireless/rtlwifi/cam.c +++ b/drivers/net/wireless/rtlwifi/cam.c @@ -328,10 +328,9 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n"); } - if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\ - sta_addr[4]|sta_addr[5]) == 0) { + if (is_zero_ether_addr(sta_addr)) { RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is 00:00:00:00:00:00\n"); + "sta_addr is %pM\n", sta_addr); return; } /* Does STA already exist? */ diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 67f9430ee19..2062ea1d7c8 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -34,6 +34,7 @@ #include "ps.h" #include "efuse.h" #include <linux/export.h> +#include <linux/kmemleak.h> static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { PCI_VENDOR_ID_INTEL, @@ -1099,6 +1100,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) u32 bufferaddress; if (!skb) return 0; + kmemleak_not_leak(skb); entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; /*skb->dev = dev; */ diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 5b9c3b5e8c9..5ae26647f34 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -480,7 +480,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) return; /* and only beacons from the associated BSSID, please */ - if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) + if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) return; rtlpriv->psc.last_beacon = jiffies; diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c index c66f08a0524..d5cbf01da8a 100644 --- a/drivers/net/wireless/rtlwifi/rc.c +++ b/drivers/net/wireless/rtlwifi/rc.c @@ -225,8 +225,7 @@ static void rtl_rate_init(void *ppriv, static void rtl_rate_update(void *ppriv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - u32 changed, - enum nl80211_channel_type oper_chan_type) + u32 changed) { } diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index 1208b753f62..f7f48c7ac85 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -33,9 +33,6 @@ #include "../pci.h" #include "../base.h" -struct dig_t dm_digtable; -static struct ps_t dm_pstable; - #define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) #define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) @@ -163,33 +160,37 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { static void rtl92c_dm_diginit(struct ieee80211_hw *hw) { - dm_digtable.dig_enable_flag = true; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - dm_digtable.cur_igvalue = 0x20; - dm_digtable.pre_igvalue = 0x0; - dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; - dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; - dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; - dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; - dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; - dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - dm_digtable.rx_gain_range_max = DM_DIG_MAX; - dm_digtable.rx_gain_range_min = DM_DIG_MIN; - dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; - dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; - dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; - dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + dm_digtable->dig_enable_flag = true; + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + dm_digtable->cur_igvalue = 0x20; + dm_digtable->pre_igvalue = 0x0; + dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; + dm_digtable->presta_connectstate = DIG_STA_DISCONNECT; + dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; + dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_digtable->rx_gain_range_max = DM_DIG_MAX; + dm_digtable->rx_gain_range_min = DM_DIG_MIN; + dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX; + dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN; + dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; long rssi_val_min = 0; - if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && - (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { + if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) && + (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > @@ -198,10 +199,10 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; else rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; - } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || - dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { + } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT || + dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; - } else if (dm_digtable.curmultista_connectstate == + } else if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; } @@ -260,7 +261,8 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value_igi = dm_digtable.cur_igvalue; + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + u8 value_igi = dm_digtable->cur_igvalue; if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) value_igi--; @@ -277,43 +279,44 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) if (rtlpriv->falsealm_cnt.cnt_all > 10000) value_igi = 0x32; - dm_digtable.cur_igvalue = value_igi; + dm_digtable->cur_igvalue = value_igi; rtl92c_dm_write_dig(hw); } static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { - if ((dm_digtable.backoff_val - 2) < - dm_digtable.backoff_val_range_min) - dm_digtable.backoff_val = - dm_digtable.backoff_val_range_min; + if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) { + if ((dm_digtable->backoff_val - 2) < + dm_digtable->backoff_val_range_min) + dm_digtable->backoff_val = + dm_digtable->backoff_val_range_min; else - dm_digtable.backoff_val -= 2; - } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { - if ((dm_digtable.backoff_val + 2) > - dm_digtable.backoff_val_range_max) - dm_digtable.backoff_val = - dm_digtable.backoff_val_range_max; + dm_digtable->backoff_val -= 2; + } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) { + if ((dm_digtable->backoff_val + 2) > + dm_digtable->backoff_val_range_max) + dm_digtable->backoff_val = + dm_digtable->backoff_val_range_max; else - dm_digtable.backoff_val += 2; + dm_digtable->backoff_val += 2; } - if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > - dm_digtable.rx_gain_range_max) - dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; - else if ((dm_digtable.rssi_val_min + 10 - - dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) - dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; + if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) > + dm_digtable->rx_gain_range_max) + dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max; + else if ((dm_digtable->rssi_val_min + 10 - + dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min) + dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min; else - dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - - dm_digtable.backoff_val; + dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 - + dm_digtable->backoff_val; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "rssi_val_min = %x backoff_val %x\n", - dm_digtable.rssi_val_min, dm_digtable.backoff_val); + dm_digtable->rssi_val_min, dm_digtable->backoff_val); rtl92c_dm_write_dig(hw); } @@ -322,6 +325,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) { static u8 initialized; /* initialized to false */ struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; bool multi_sta = false; @@ -330,68 +334,69 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) multi_sta = true; if (!multi_sta || - dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { + dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { initialized = false; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; } else if (initialized == false) { initialized = true; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; - dm_digtable.cur_igvalue = 0x20; + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; + dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } - if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { - if ((rssi_strength < dm_digtable.rssi_lowthresh) && - (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { + if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) { + if ((rssi_strength < dm_digtable->rssi_lowthresh) && + (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { - if (dm_digtable.dig_ext_port_stage == + if (dm_digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_2) { - dm_digtable.cur_igvalue = 0x20; + dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; - } else if (rssi_strength > dm_digtable.rssi_highthresh) { - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; + } else if (rssi_strength > dm_digtable->rssi_highthresh) { + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; rtl92c_dm_ctrl_initgain_by_fa(hw); } - } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; - dm_digtable.cur_igvalue = 0x20; + } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; + dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "curmultista_connectstate = %x dig_ext_port_stage %x\n", - dm_digtable.curmultista_connectstate, - dm_digtable.dig_ext_port_stage); + dm_digtable->curmultista_connectstate, + dm_digtable->dig_ext_port_stage); } static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_connectstate = %x, cursta_connectctate = %x\n", - dm_digtable.presta_connectstate, - dm_digtable.cursta_connectctate); + dm_digtable->presta_connectstate, + dm_digtable->cursta_connectctate); - if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate - || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT - || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { + if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate + || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT + || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { - if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { - dm_digtable.rssi_val_min = + if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { + dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { - dm_digtable.rssi_val_min = 0; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable.cur_igvalue = 0x20; - dm_digtable.pre_igvalue = 0; + dm_digtable->rssi_val_min = 0; + dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable->cur_igvalue = 0x20; + dm_digtable->pre_igvalue = 0; rtl92c_dm_write_dig(hw); } } @@ -400,40 +405,41 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { - dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); + if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { + dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); - if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { - if (dm_digtable.rssi_val_min <= 25) - dm_digtable.cur_cck_pd_state = + if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { + if (dm_digtable->rssi_val_min <= 25) + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else - dm_digtable.cur_cck_pd_state = + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } else { - if (dm_digtable.rssi_val_min <= 20) - dm_digtable.cur_cck_pd_state = + if (dm_digtable->rssi_val_min <= 20) + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else - dm_digtable.cur_cck_pd_state = + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } } else { - dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } - if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { - if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { + if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { + if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) - dm_digtable.cur_cck_fa_state = + dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_High; else - dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; + dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low; - if (dm_digtable.pre_cck_fa_state != - dm_digtable.cur_cck_fa_state) { - if (dm_digtable.cur_cck_fa_state == + if (dm_digtable->pre_cck_fa_state != + dm_digtable->cur_cck_fa_state) { + if (dm_digtable->cur_cck_fa_state == CCK_FA_STAGE_Low) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); @@ -441,8 +447,8 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); - dm_digtable.pre_cck_fa_state = - dm_digtable.cur_cck_fa_state; + dm_digtable->pre_cck_fa_state = + dm_digtable->cur_cck_fa_state; } rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); @@ -458,11 +464,11 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, MASKBYTE2, 0xd3); } - dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; + dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", - dm_digtable.cur_cck_pd_state); + dm_digtable->cur_cck_pd_state); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n", IS_92C_SERIAL(rtlhal->version)); @@ -470,31 +476,34 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); if (mac->act_scanning) return; if (mac->link_state >= MAC80211_LINKED) - dm_digtable.cursta_connectctate = DIG_STA_CONNECT; + dm_digtable->cursta_connectctate = DIG_STA_CONNECT; else - dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; + dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; rtl92c_dm_initial_gain_sta(hw); rtl92c_dm_initial_gain_multi_sta(hw); rtl92c_dm_cck_packet_detection_thresh(hw); - dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; + dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate; } static void rtl92c_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (rtlpriv->dm.dm_initialgain_enable == false) return; - if (dm_digtable.dig_enable_flag == false) + if (dm_digtable->dig_enable_flag == false) return; rtl92c_dm_ctrl_initgain_by_twoport(hw); @@ -514,23 +523,24 @@ static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) void rtl92c_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", - dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, - dm_digtable.backoff_val); + dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, + dm_digtable->backoff_val); - dm_digtable.cur_igvalue += 2; - if (dm_digtable.cur_igvalue > 0x3f) - dm_digtable.cur_igvalue = 0x3f; + dm_digtable->cur_igvalue += 2; + if (dm_digtable->cur_igvalue > 0x3f) + dm_digtable->cur_igvalue = 0x3f; - if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { + if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, - dm_digtable.cur_igvalue); + dm_digtable->cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, - dm_digtable.cur_igvalue); + dm_digtable->cur_igvalue); - dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; + dm_digtable->pre_igvalue = dm_digtable->cur_igvalue; } } EXPORT_SYMBOL(rtl92c_dm_write_dig); @@ -1223,15 +1233,20 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { - dm_pstable.pre_ccastate = CCA_MAX; - dm_pstable.cur_ccasate = CCA_MAX; - dm_pstable.pre_rfstate = RF_MAX; - dm_pstable.cur_rfstate = RF_MAX; - dm_pstable.rssi_val_min = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; + + dm_pstable->pre_ccastate = CCA_MAX; + dm_pstable->cur_ccasate = CCA_MAX; + dm_pstable->pre_rfstate = RF_MAX; + dm_pstable->cur_rfstate = RF_MAX; + dm_pstable->rssi_val_min = 0; } void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; static u8 initialize; static u32 reg_874, reg_c70, reg_85c, reg_a74; @@ -1251,27 +1266,27 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) } if (!bforce_in_normal) { - if (dm_pstable.rssi_val_min != 0) { - if (dm_pstable.pre_rfstate == RF_NORMAL) { - if (dm_pstable.rssi_val_min >= 30) - dm_pstable.cur_rfstate = RF_SAVE; + if (dm_pstable->rssi_val_min != 0) { + if (dm_pstable->pre_rfstate == RF_NORMAL) { + if (dm_pstable->rssi_val_min >= 30) + dm_pstable->cur_rfstate = RF_SAVE; else - dm_pstable.cur_rfstate = RF_NORMAL; + dm_pstable->cur_rfstate = RF_NORMAL; } else { - if (dm_pstable.rssi_val_min <= 25) - dm_pstable.cur_rfstate = RF_NORMAL; + if (dm_pstable->rssi_val_min <= 25) + dm_pstable->cur_rfstate = RF_NORMAL; else - dm_pstable.cur_rfstate = RF_SAVE; + dm_pstable->cur_rfstate = RF_SAVE; } } else { - dm_pstable.cur_rfstate = RF_MAX; + dm_pstable->cur_rfstate = RF_MAX; } } else { - dm_pstable.cur_rfstate = RF_NORMAL; + dm_pstable->cur_rfstate = RF_NORMAL; } - if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { - if (dm_pstable.cur_rfstate == RF_SAVE) { + if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) { + if (dm_pstable->cur_rfstate == RF_SAVE) { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1C0000, 0x2); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); @@ -1293,7 +1308,7 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); } - dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; + dm_pstable->pre_rfstate = dm_pstable->cur_rfstate; } } EXPORT_SYMBOL(rtl92c_dm_rf_saving); @@ -1301,36 +1316,37 @@ EXPORT_SYMBOL(rtl92c_dm_rf_saving); static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (((mac->link_state == MAC80211_NOLINK)) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { - dm_pstable.rssi_val_min = 0; + dm_pstable->rssi_val_min = 0; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state == MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { - dm_pstable.rssi_val_min = + dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Client PWDB = 0x%lx\n", - dm_pstable.rssi_val_min); + dm_pstable->rssi_val_min); } else { - dm_pstable.rssi_val_min = + dm_pstable->rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", - dm_pstable.rssi_val_min); + dm_pstable->rssi_val_min); } } else { - dm_pstable.rssi_val_min = + dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", - dm_pstable.rssi_val_min); + dm_pstable->rssi_val_min); } if (IS_92C_SERIAL(rtlhal->version)) diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h index 2178e376188..518e208c018 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h @@ -91,40 +91,6 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 -struct ps_t { - u8 pre_ccastate; - u8 cur_ccasate; - u8 pre_rfstate; - u8 cur_rfstate; - long rssi_val_min; -}; - -struct dig_t { - u8 dig_enable_flag; - u8 dig_ext_port_stage; - u32 rssi_lowthresh; - u32 rssi_highthresh; - u32 fa_lowthresh; - u32 fa_highthresh; - u8 cursta_connectctate; - u8 presta_connectstate; - u8 curmultista_connectstate; - u8 pre_igvalue; - u8 cur_igvalue; - char backoff_val; - char backoff_val_range_max; - char backoff_val_range_min; - u8 rx_gain_range_max; - u8 rx_gain_range_min; - u8 rssi_val_min; - u8 pre_cck_pd_state; - u8 cur_cck_pd_state; - u8 pre_cck_fa_state; - u8 cur_cck_fa_state; - u8 pre_ccastate; - u8 cur_ccasate; -}; - struct swat_t { u8 failure_cnt; u8 try_flag; @@ -189,7 +155,6 @@ enum dm_dig_connect_e { DIG_CONNECT_MAX }; -extern struct dig_t dm_digtable; void rtl92c_dm_init(struct ieee80211_hw *hw); void rtl92c_dm_watchdog(struct ieee80211_hw *hw); void rtl92c_dm_write_dig(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index c20b3c30f62..692c8ef5ee8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -34,6 +34,7 @@ #include "../rtl8192ce/def.h" #include "fw_common.h" #include <linux/export.h> +#include <linux/kmemleak.h> static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) { @@ -776,6 +777,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) skb = dev_alloc_skb(totalpacketlen); if (!skb) return; + kmemleak_not_leak(skb); + memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c index 4c016241f34..cdcad7d9f15 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c @@ -1881,6 +1881,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct dig_t dm_digtable = rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "--->Cmd(%#x), set_io_inprogress(%d)\n", diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h index 26747fa8600..d4a3d032c7b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h @@ -86,40 +86,6 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 -struct ps_t { - u8 pre_ccastate; - u8 cur_ccasate; - u8 pre_rfstate; - u8 cur_rfstate; - long rssi_val_min; -}; - -struct dig_t { - u8 dig_enable_flag; - u8 dig_ext_port_stage; - u32 rssi_lowthresh; - u32 rssi_highthresh; - u32 fa_lowthresh; - u32 fa_highthresh; - u8 cursta_connectctate; - u8 presta_connectstate; - u8 curmultista_connectstate; - u8 pre_igvalue; - u8 cur_igvalue; - char backoff_val; - char backoff_val_range_max; - char backoff_val_range_min; - u8 rx_gain_range_max; - u8 rx_gain_range_min; - u8 rssi_val_min; - u8 pre_cck_pd_state; - u8 cur_cck_pd_state; - u8 pre_cck_fa_state; - u8 cur_cck_fa_state; - u8 pre_ccastate; - u8 cur_ccasate; -}; - struct swat_t { u8 failure_cnt; u8 try_flag; @@ -184,7 +150,6 @@ enum dm_dig_connect_e { DIG_CONNECT_MAX }; -extern struct dig_t dm_digtable; void rtl92c_dm_init(struct ieee80211_hw *hw); void rtl92c_dm_watchdog(struct ieee80211_hw *hw); void rtl92c_dm_write_dig(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 2c3b73366cd..3aa927f8b9b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -389,21 +389,4 @@ static struct pci_driver rtl92ce_driver = { .driver.pm = &rtlwifi_pm_ops, }; -static int __init rtl92ce_module_init(void) -{ - int ret; - - ret = pci_register_driver(&rtl92ce_driver); - if (ret) - RT_ASSERT(false, "No device found\n"); - - return ret; -} - -static void __exit rtl92ce_module_exit(void) -{ - pci_unregister_driver(&rtl92ce_driver); -} - -module_init(rtl92ce_module_init); -module_exit(rtl92ce_module_exit); +module_pci_driver(rtl92ce_driver); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index 37b13636a77..3af874e6959 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -508,14 +508,14 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw, packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!compare_ether_addr(mac->bssid, - (c_fc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && + ether_addr_equal(mac->bssid, + (c_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (c_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); packet_toself = packet_matchbssid && - (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + ether_addr_equal(praddr, rtlefuse->dev_addr); if (ieee80211_is_beacon(fc)) packet_beacon = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h index efb9ab27040..c4adb977736 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h @@ -530,12 +530,7 @@ SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0); + memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)) struct rx_fwinfo_92c { u8 gain_trsw[4]; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 025bdc2eba4..7e91c76582e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -1099,14 +1099,14 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, praddr = hdr->addr1; packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!compare_ether_addr(mac->bssid, - (cpu_fc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && + ether_addr_equal(mac->bssid, + (cpu_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (cpu_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); packet_toself = packet_matchbssid && - (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + ether_addr_equal(praddr, rtlefuse->dev_addr); if (ieee80211_is_beacon(fc)) packet_beacon = true; _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 82c85286ab2..7737fb0c666 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -338,6 +338,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/ /****** 8192CU ********/ + {RTL_USB_DEVICE(0x050d, 0x1004, rtl92cu_hal_cfg)}, /*Belcom-SurfN300*/ {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/ {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h index eafdf76ed64..939c905f547 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h @@ -151,9 +151,6 @@ enum version_8192d { /* for 92D */ #define CHIP_92D_SINGLEPHY BIT(9) -#define C_CUT_VERSION BIT(13) -#define D_CUT_VERSION ((BIT(12)|BIT(13))) -#define E_CUT_VERSION BIT(14) /* Chip specific */ #define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) @@ -173,7 +170,10 @@ enum version_8192d { #define RF_TYPE_1T2R BIT(4) #define RF_TYPE_2T2R BIT(5) #define CHIP_VENDOR_UMC BIT(7) -#define B_CUT_VERSION BIT(12) +#define CHIP_92D_B_CUT BIT(12) +#define CHIP_92D_C_CUT BIT(13) +#define CHIP_92D_D_CUT (BIT(13)|BIT(12)) +#define CHIP_92D_E_CUT BIT(14) /* MASK */ #define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) @@ -205,15 +205,13 @@ enum version_8192d { CHIP_92D) ? true : false) #define IS_92D_C_CUT(version) ((IS_92D(version)) ? \ ((GET_CVID_CUT_VERSION(version) == \ - 0x2000) ? true : false) : false) + CHIP_92D_C_CUT) ? true : false) : false) #define IS_92D_D_CUT(version) ((IS_92D(version)) ? \ ((GET_CVID_CUT_VERSION(version) == \ - 0x3000) ? true : false) : false) + CHIP_92D_D_CUT) ? true : false) : false) #define IS_92D_E_CUT(version) ((IS_92D(version)) ? \ ((GET_CVID_CUT_VERSION(version) == \ - 0x4000) ? true : false) : false) -#define CHIP_92D_C_CUT BIT(10) -#define CHIP_92D_D_CUT BIT(11) + CHIP_92D_E_CUT) ? true : false) : false) enum rf_optype { RF_OP_BY_SW_3WIRE = 0, diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c index 4737018c9da..a7d63a84551 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c @@ -37,8 +37,6 @@ #define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb -struct dig_t de_digtable; - static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { 0x7f8001fe, /* 0, +6.0dB */ 0x788001e2, /* 1, +5.5dB */ @@ -159,27 +157,30 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { static void rtl92d_dm_diginit(struct ieee80211_hw *hw) { - de_digtable.dig_enable_flag = true; - de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - de_digtable.cur_igvalue = 0x20; - de_digtable.pre_igvalue = 0x0; - de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; - de_digtable.presta_connectstate = DIG_STA_DISCONNECT; - de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; - de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; - de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; - de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER; - de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER; - de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; - de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; - de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; - de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI; - de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; - de_digtable.large_fa_hit = 0; - de_digtable.recover_cnt = 0; - de_digtable.forbidden_igi = DM_DIG_FA_LOWER; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + + de_digtable->dig_enable_flag = true; + de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + de_digtable->cur_igvalue = 0x20; + de_digtable->pre_igvalue = 0x0; + de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; + de_digtable->presta_connectstate = DIG_STA_DISCONNECT; + de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; + de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; + de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; + de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER; + de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER; + de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; + de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX; + de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN; + de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI; + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; + de_digtable->large_fa_hit = 0; + de_digtable->recover_cnt = 0; + de_digtable->forbidden_igi = DM_DIG_FA_LOWER; } static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) @@ -266,68 +267,70 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtlpriv); /* Determine the minimum RSSI */ if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.UNDEC_SM_PWDB == 0)) { - de_digtable.min_undecorated_pwdb_for_dm = 0; + de_digtable->min_undecorated_pwdb_for_dm = 0; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { - de_digtable.min_undecorated_pwdb_for_dm = + de_digtable->min_undecorated_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Client PWDB = 0x%lx\n", rtlpriv->dm.UNDEC_SM_PWDB); } else { - de_digtable.min_undecorated_pwdb_for_dm = + de_digtable->min_undecorated_pwdb_for_dm = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "STA Default Port PWDB = 0x%x\n", - de_digtable.min_undecorated_pwdb_for_dm); + de_digtable->min_undecorated_pwdb_for_dm); } } else { - de_digtable.min_undecorated_pwdb_for_dm = + de_digtable->min_undecorated_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Ext Port or disconnect PWDB = 0x%x\n", - de_digtable.min_undecorated_pwdb_for_dm); + de_digtable->min_undecorated_pwdb_for_dm); } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", - de_digtable.min_undecorated_pwdb_for_dm); + de_digtable->min_undecorated_pwdb_for_dm); } static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; unsigned long flag = 0; - if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) { - if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { - if (de_digtable.min_undecorated_pwdb_for_dm <= 25) - de_digtable.cur_cck_pd_state = + if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) { + if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { + if (de_digtable->min_undecorated_pwdb_for_dm <= 25) + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; else - de_digtable.cur_cck_pd_state = + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_HIGHRSSI; } else { - if (de_digtable.min_undecorated_pwdb_for_dm <= 20) - de_digtable.cur_cck_pd_state = + if (de_digtable->min_undecorated_pwdb_for_dm <= 20) + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; else - de_digtable.cur_cck_pd_state = + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_HIGHRSSI; } } else { - de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; } - if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) { - if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { + if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) { + if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83); rtl92d_release_cckandrw_pagea_ctl(hw, &flag); @@ -336,13 +339,13 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd); rtl92d_release_cckandrw_pagea_ctl(hw, &flag); } - de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state; + de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", - de_digtable.cursta_connectctate == DIG_STA_CONNECT ? + de_digtable->cursta_connectctate == DIG_STA_CONNECT ? "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", - de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? + de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? "Low RSSI " : "High RSSI "); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); @@ -352,37 +355,40 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) void rtl92d_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", - de_digtable.cur_igvalue, de_digtable.pre_igvalue, - de_digtable.backoff_val); - if (de_digtable.dig_enable_flag == false) { + de_digtable->cur_igvalue, de_digtable->pre_igvalue, + de_digtable->backoff_val); + if (de_digtable->dig_enable_flag == false) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); - de_digtable.pre_igvalue = 0x17; + de_digtable->pre_igvalue = 0x17; return; } - if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) { + if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, - de_digtable.cur_igvalue); + de_digtable->cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, - de_digtable.cur_igvalue); - de_digtable.pre_igvalue = de_digtable.cur_igvalue; + de_digtable->cur_igvalue); + de_digtable->pre_igvalue = de_digtable->cur_igvalue; } } static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) { + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) && (rtlpriv->mac80211.vendor == PEER_CISCO)) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); - if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50 - && de_digtable.min_undecorated_pwdb_for_dm < 50) { + if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50 + && de_digtable->min_undecorated_pwdb_for_dm < 50) { rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode Off\n"); - } else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 && - de_digtable.min_undecorated_pwdb_for_dm > 55) { + } else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 && + de_digtable->min_undecorated_pwdb_for_dm > 55) { rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n"); @@ -396,14 +402,15 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) static void rtl92d_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value_igi = de_digtable.cur_igvalue; + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + u8 value_igi = de_digtable->cur_igvalue; struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); if (rtlpriv->rtlhal.earlymode_enable) { rtl92d_early_mode_enabled(rtlpriv); - de_digtable.last_min_undecorated_pwdb_for_dm = - de_digtable.min_undecorated_pwdb_for_dm; + de_digtable->last_min_undecorated_pwdb_for_dm = + de_digtable->min_undecorated_pwdb_for_dm; } if (!rtlpriv->dm.dm_initialgain_enable) return; @@ -421,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); /* Decide the current status and if modify initial gain or not */ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) - de_digtable.cursta_connectctate = DIG_STA_CONNECT; + de_digtable->cursta_connectctate = DIG_STA_CONNECT; else - de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; + de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; /* adjust initial gain according to false alarm counter */ if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) @@ -436,64 +443,64 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw) value_igi += 2; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", - de_digtable.large_fa_hit, de_digtable.forbidden_igi); + de_digtable->large_fa_hit, de_digtable->forbidden_igi); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n", - de_digtable.recover_cnt, de_digtable.rx_gain_range_min); + de_digtable->recover_cnt, de_digtable->rx_gain_range_min); /* deal with abnorally large false alarm */ if (falsealm_cnt->cnt_all > 10000) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG(): Abnormally false alarm case\n"); - de_digtable.large_fa_hit++; - if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) { - de_digtable.forbidden_igi = de_digtable.cur_igvalue; - de_digtable.large_fa_hit = 1; + de_digtable->large_fa_hit++; + if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) { + de_digtable->forbidden_igi = de_digtable->cur_igvalue; + de_digtable->large_fa_hit = 1; } - if (de_digtable.large_fa_hit >= 3) { - if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX) - de_digtable.rx_gain_range_min = DM_DIG_MAX; + if (de_digtable->large_fa_hit >= 3) { + if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX) + de_digtable->rx_gain_range_min = DM_DIG_MAX; else - de_digtable.rx_gain_range_min = - (de_digtable.forbidden_igi + 1); - de_digtable.recover_cnt = 3600; /* 3600=2hr */ + de_digtable->rx_gain_range_min = + (de_digtable->forbidden_igi + 1); + de_digtable->recover_cnt = 3600; /* 3600=2hr */ } } else { /* Recovery mechanism for IGI lower bound */ - if (de_digtable.recover_cnt != 0) { - de_digtable.recover_cnt--; + if (de_digtable->recover_cnt != 0) { + de_digtable->recover_cnt--; } else { - if (de_digtable.large_fa_hit == 0) { - if ((de_digtable.forbidden_igi - 1) < + if (de_digtable->large_fa_hit == 0) { + if ((de_digtable->forbidden_igi - 1) < DM_DIG_FA_LOWER) { - de_digtable.forbidden_igi = + de_digtable->forbidden_igi = DM_DIG_FA_LOWER; - de_digtable.rx_gain_range_min = + de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER; } else { - de_digtable.forbidden_igi--; - de_digtable.rx_gain_range_min = - (de_digtable.forbidden_igi + 1); + de_digtable->forbidden_igi--; + de_digtable->rx_gain_range_min = + (de_digtable->forbidden_igi + 1); } - } else if (de_digtable.large_fa_hit == 3) { - de_digtable.large_fa_hit = 0; + } else if (de_digtable->large_fa_hit == 3) { + de_digtable->large_fa_hit = 0; } } } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", - de_digtable.large_fa_hit, de_digtable.forbidden_igi); + de_digtable->large_fa_hit, de_digtable->forbidden_igi); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n", - de_digtable.recover_cnt, de_digtable.rx_gain_range_min); + de_digtable->recover_cnt, de_digtable->rx_gain_range_min); if (value_igi > DM_DIG_MAX) value_igi = DM_DIG_MAX; - else if (value_igi < de_digtable.rx_gain_range_min) - value_igi = de_digtable.rx_gain_range_min; - de_digtable.cur_igvalue = value_igi; + else if (value_igi < de_digtable->rx_gain_range_min) + value_igi = de_digtable->rx_gain_range_min; + de_digtable->cur_igvalue = value_igi; rtl92d_dm_write_dig(hw); if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) rtl92d_dm_cck_packet_detection_thresh(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h index 91030ec8ac3..3fea0c11c24 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h @@ -87,55 +87,6 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 #define INDEX_MAPPING_NUM 13 -struct ps_t { - u8 pre_ccastate; - u8 cur_ccasate; - - u8 pre_rfstate; - u8 cur_rfstate; - - long rssi_val_min; -}; - -struct dig_t { - u8 dig_enable_flag; - u8 dig_ext_port_stage; - - u32 rssi_lowthresh; - u32 rssi_highthresh; - - u32 fa_lowthresh; - u32 fa_highthresh; - - u8 cursta_connectctate; - u8 presta_connectstate; - u8 curmultista_connectstate; - - u8 pre_igvalue; - u8 cur_igvalue; - - char backoff_val; - char backoff_val_range_max; - char backoff_val_range_min; - u8 rx_gain_range_max; - u8 rx_gain_range_min; - u8 min_undecorated_pwdb_for_dm; - long last_min_undecorated_pwdb_for_dm; - - u8 pre_cck_pd_state; - u8 cur_cck_pd_state; - - u8 pre_cck_fa_state; - u8 cur_cck_fa_state; - - u8 pre_ccastate; - u8 cur_ccasate; - - u8 large_fa_hit; - u8 forbidden_igi; - u32 recover_cnt; -}; - struct swat { u8 failure_cnt; u8 try_flag; @@ -200,8 +151,6 @@ enum dm_dig_connect { DIG_CONNECT_MAX }; -extern struct dig_t de_digtable; - void rtl92d_dm_init(struct ieee80211_hw *hw); void rtl92d_dm_watchdog(struct ieee80211_hw *hw); void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c index 509f5af38ad..b338d526c42 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c @@ -1743,9 +1743,13 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) chipver |= CHIP_92D_D_CUT; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); break; + case 0xCC33: + chipver |= CHIP_92D_E_CUT; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n"); + break; default: chipver |= CHIP_92D_D_CUT; - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unkown CUT!\n"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n"); break; } rtlpriv->rtlhal.version = chipver; diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c index 28fc5fb8057..18380a7829f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c @@ -3064,6 +3064,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) static void rtl92d_phy_set_io(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; struct rtl_phy *rtlphy = &(rtlpriv->phy); RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, @@ -3071,13 +3072,13 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw) rtlphy->current_io_type, rtlphy->set_io_inprogress); switch (rtlphy->current_io_type) { case IO_CMD_RESUME_DM_BY_SCAN: - de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; + de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; rtl92d_dm_write_dig(hw); rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel); break; case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue; - de_digtable.cur_igvalue = 0x37; + rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue; + de_digtable->cur_igvalue = 0x37; rtl92d_dm_write_dig(hw); break; default: diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index a7f6126e2f8..1666ef7fd87 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -466,12 +466,13 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, type = WLAN_FC_GET_TYPE(fc); praddr = hdr->addr1; packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) && - (!pstats->crc) && (!pstats->icv)); + ether_addr_equal(mac->bssid, + (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && + (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); packet_toself = packet_matchbssid && - (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + ether_addr_equal(praddr, rtlefuse->dev_addr); if (ieee80211_is_beacon(fc)) packet_beacon = true; _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h index 0dc736c2723..057a52431b0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h @@ -530,12 +530,8 @@ SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset((void *)__pdesc, 0, _size); \ -} while (0); + memset((void *)__pdesc, 0, \ + min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)) /* For 92D early mode */ #define SET_EARLYMODE_PKTNUM(__paddr, __value) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h index d1b0a1e1497..20afec62ce0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h @@ -252,12 +252,7 @@ * the desc is cleared. */ #define TX_DESC_NEXT_DESC_OFFSET 36 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0); + memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)) /* Rx Desc */ #define RX_STATUS_DESC_SIZE 24 diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c index fbabae17259..2e1158026fb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c @@ -35,7 +35,6 @@ #include "dm.h" #include "fw.h" -struct dig_t digtable; static const u32 edca_setting_dl[PEER_MAX] = { 0xa44f, /* 0 UNKNOWN */ 0x5ea44f, /* 1 REALTEK_90 */ @@ -421,62 +420,64 @@ static void _rtl92s_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *digtable = &rtlpriv->dm_digtable; struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); - if (falsealm_cnt->cnt_all > digtable.fa_highthresh) { - if ((digtable.backoff_val - 6) < - digtable.backoffval_range_min) - digtable.backoff_val = digtable.backoffval_range_min; + if (falsealm_cnt->cnt_all > digtable->fa_highthresh) { + if ((digtable->backoff_val - 6) < + digtable->backoffval_range_min) + digtable->backoff_val = digtable->backoffval_range_min; else - digtable.backoff_val -= 6; - } else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) { - if ((digtable.backoff_val + 6) > - digtable.backoffval_range_max) - digtable.backoff_val = - digtable.backoffval_range_max; + digtable->backoff_val -= 6; + } else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) { + if ((digtable->backoff_val + 6) > + digtable->backoffval_range_max) + digtable->backoff_val = + digtable->backoffval_range_max; else - digtable.backoff_val += 6; + digtable->backoff_val += 6; } } static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *digtable = &rtlpriv->dm_digtable; struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); static u8 initialized, force_write; u8 initial_gain = 0; - if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) || - (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { - if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { + if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) || + (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { + if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { if (rtlpriv->psc.rfpwr_state != ERFON) return; - if (digtable.backoff_enable_flag) + if (digtable->backoff_enable_flag) rtl92s_backoff_enable_flag(hw); else - digtable.backoff_val = DM_DIG_BACKOFF; - - if ((digtable.rssi_val + 10 - digtable.backoff_val) > - digtable.rx_gain_range_max) - digtable.cur_igvalue = - digtable.rx_gain_range_max; - else if ((digtable.rssi_val + 10 - digtable.backoff_val) - < digtable.rx_gain_range_min) - digtable.cur_igvalue = - digtable.rx_gain_range_min; + digtable->backoff_val = DM_DIG_BACKOFF; + + if ((digtable->rssi_val + 10 - digtable->backoff_val) > + digtable->rx_gain_range_max) + digtable->cur_igvalue = + digtable->rx_gain_range_max; + else if ((digtable->rssi_val + 10 - digtable->backoff_val) + < digtable->rx_gain_range_min) + digtable->cur_igvalue = + digtable->rx_gain_range_min; else - digtable.cur_igvalue = digtable.rssi_val + 10 - - digtable.backoff_val; + digtable->cur_igvalue = digtable->rssi_val + 10 - + digtable->backoff_val; if (falsealm_cnt->cnt_all > 10000) - digtable.cur_igvalue = - (digtable.cur_igvalue > 0x33) ? - digtable.cur_igvalue : 0x33; + digtable->cur_igvalue = + (digtable->cur_igvalue > 0x33) ? + digtable->cur_igvalue : 0x33; if (falsealm_cnt->cnt_all > 16000) - digtable.cur_igvalue = - digtable.rx_gain_range_max; + digtable->cur_igvalue = + digtable->rx_gain_range_max; /* connected -> connected or disconnected -> disconnected */ } else { /* Firmware control DIG, do nothing in driver dm */ @@ -486,31 +487,31 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) * disconnected or beforeconnect->(dis)connected */ } else { /* Enable FW DIG */ - digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); - digtable.backoff_val = DM_DIG_BACKOFF; - digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0]; - digtable.pre_igvalue = 0; + digtable->backoff_val = DM_DIG_BACKOFF; + digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0]; + digtable->pre_igvalue = 0; return; } /* Forced writing to prevent from fw-dig overwriting. */ - if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, + if (digtable->pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0)) force_write = 1; - if ((digtable.pre_igvalue != digtable.cur_igvalue) || + if ((digtable->pre_igvalue != digtable->cur_igvalue) || !initialized || force_write) { /* Disable FW DIG */ rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE); - initial_gain = (u8)digtable.cur_igvalue; + initial_gain = (u8)digtable->cur_igvalue; /* Set initial gain. */ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain); - digtable.pre_igvalue = digtable.cur_igvalue; + digtable->pre_igvalue = digtable->cur_igvalue; initialized = 1; force_write = 0; } @@ -519,6 +520,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *digtable = &rtlpriv->dm_digtable; if (rtlpriv->mac80211.act_scanning) return; @@ -526,17 +528,17 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) /* Decide the current status and if modify initial gain or not */ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) - digtable.cur_sta_connectstate = DIG_STA_CONNECT; + digtable->cur_sta_connectstate = DIG_STA_CONNECT; else - digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; + digtable->cur_sta_connectstate = DIG_STA_DISCONNECT; - digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; + digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; /* Change dig mode to rssi */ - if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) { - if (digtable.dig_twoport_algorithm == + if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) { + if (digtable->dig_twoport_algorithm == DIG_TWO_PORT_ALGO_FALSE_ALARM) { - digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; + digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS); } } @@ -544,13 +546,14 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) _rtl92s_dm_false_alarm_counter_statistics(hw); _rtl92s_dm_initial_gain_sta_beforeconnect(hw); - digtable.pre_sta_connectstate = digtable.cur_sta_connectstate; + digtable->pre_sta_connectstate = digtable->cur_sta_connectstate; } static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct dig_t *digtable = &rtlpriv->dm_digtable; /* 2T2R TP issue */ if (rtlphy->rf_type == RF_2T2R) @@ -559,7 +562,7 @@ static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) if (!rtlpriv->dm.dm_initialgain_enable) return; - if (digtable.dig_enable_flag == false) + if (digtable->dig_enable_flag == false) return; _rtl92s_dm_ctrl_initgain_bytwoport(hw); @@ -639,51 +642,52 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw) static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *digtable = &rtlpriv->dm_digtable; /* Disable DIG scheme now.*/ - digtable.dig_enable_flag = true; - digtable.backoff_enable_flag = true; + digtable->dig_enable_flag = true; + digtable->backoff_enable_flag = true; if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) && (hal_get_firmwareversion(rtlpriv) >= 0x3c)) - digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT; + digtable->dig_algorithm = DIG_ALGO_BY_TOW_PORT; else - digtable.dig_algorithm = + digtable->dig_algorithm = DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM; - digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; - digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - /* off=by real rssi value, on=by digtable.rssi_val for new dig */ - digtable.dig_dbgmode = DM_DBG_OFF; - digtable.dig_slgorithm_switch = 0; + digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; + digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + /* off=by real rssi value, on=by digtable->rssi_val for new dig */ + digtable->dig_dbgmode = DM_DBG_OFF; + digtable->dig_slgorithm_switch = 0; /* 2007/10/04 MH Define init gain threshol. */ - digtable.dig_state = DM_STA_DIG_MAX; - digtable.dig_highpwrstate = DM_STA_DIG_MAX; + digtable->dig_state = DM_STA_DIG_MAX; + digtable->dig_highpwrstate = DM_STA_DIG_MAX; - digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; - digtable.pre_sta_connectstate = DIG_STA_DISCONNECT; - digtable.cur_ap_connectstate = DIG_AP_DISCONNECT; - digtable.pre_ap_connectstate = DIG_AP_DISCONNECT; + digtable->cur_sta_connectstate = DIG_STA_DISCONNECT; + digtable->pre_sta_connectstate = DIG_STA_DISCONNECT; + digtable->cur_ap_connectstate = DIG_AP_DISCONNECT; + digtable->pre_ap_connectstate = DIG_AP_DISCONNECT; - digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; - digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; + digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; + digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; - digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; - digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; + digtable->rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; + digtable->rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; /* for dig debug rssi value */ - digtable.rssi_val = 50; - digtable.backoff_val = DM_DIG_BACKOFF; - digtable.rx_gain_range_max = DM_DIG_MAX; + digtable->rssi_val = 50; + digtable->backoff_val = DM_DIG_BACKOFF; + digtable->rx_gain_range_max = DM_DIG_MAX; - digtable.rx_gain_range_min = DM_DIG_MIN; + digtable->rx_gain_range_min = DM_DIG_MIN; - digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX; - digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN; + digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX; + digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN; } static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h index e1b19a64176..2e9052c8fe4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h @@ -29,48 +29,6 @@ #ifndef __RTL_92S_DM_H__ #define __RTL_92S_DM_H__ -struct dig_t { - u8 dig_enable_flag; - u8 dig_algorithm; - u8 dig_twoport_algorithm; - u8 dig_ext_port_stage; - u8 dig_dbgmode; - u8 dig_slgorithm_switch; - - long rssi_lowthresh; - long rssi_highthresh; - - u32 fa_lowthresh; - u32 fa_highthresh; - - long rssi_highpower_lowthresh; - long rssi_highpower_highthresh; - - u8 dig_state; - u8 dig_highpwrstate; - u8 cur_sta_connectstate; - u8 pre_sta_connectstate; - u8 cur_ap_connectstate; - u8 pre_ap_connectstate; - - u8 cur_pd_thstate; - u8 pre_pd_thstate; - u8 cur_cs_ratiostate; - u8 pre_cs_ratiostate; - - u32 pre_igvalue; - u32 cur_igvalue; - - u8 backoff_enable_flag; - char backoff_val; - char backoffval_range_max; - char backoffval_range_min; - u8 rx_gain_range_max; - u8 rx_gain_range_min; - - long rssi_val; -}; - enum dm_dig_alg { DIG_ALGO_BY_FALSE_ALARM = 0, DIG_ALGO_BY_RSSI = 1, @@ -154,8 +112,6 @@ enum dm_ratr_sta { #define DM_DIG_BACKOFF_MAX 12 #define DM_DIG_BACKOFF_MIN -4 -extern struct dig_t digtable; - void rtl92s_dm_watchdog(struct ieee80211_hw *hw); void rtl92s_dm_init(struct ieee80211_hw *hw); void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h index b4afff62643..d53f4332464 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h @@ -345,7 +345,7 @@ enum fw_h2c_cmd { do { \ udelay(1000); \ rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \ - } while (0); + } while (0) #define FW_CMD_IO_UPDATE(rtlpriv, _val) \ rtlpriv->rtlhal.fwcmd_iomap = _val; @@ -354,13 +354,13 @@ enum fw_h2c_cmd { do { \ rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \ FW_CMD_IO_UPDATE(rtlpriv, _val); \ - } while (0); + } while (0) #define FW_CMD_PARA_SET(rtlpriv, _val) \ do { \ rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \ rtlpriv->rtlhal.fwcmd_ioparam = _val; \ - } while (0); + } while (0) #define FW_CMD_IO_QUERY(rtlpriv) \ (u16)(rtlpriv->rtlhal.fwcmd_iomap) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c index 4a499928e4c..8d7099bc472 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c @@ -1450,6 +1450,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw) bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *digtable = &rtlpriv->dm_digtable; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv); @@ -1588,16 +1589,16 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio) FW_SS_CTL); if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE || - !digtable.dig_enable_flag) + !digtable->dig_enable_flag) fw_cmdmap &= ~FW_DIG_ENABLE_CTL; if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) || rtlpriv->dm.dynamic_txpower_enable) fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL; - if ((digtable.dig_ext_port_stage == + if ((digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_0) || - (digtable.dig_ext_port_stage == + (digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_1)) fw_cmdmap &= ~FW_DIG_ENABLE_CTL; diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index f1b36005c6a..730bcc91952 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -450,21 +450,4 @@ static struct pci_driver rtl92se_driver = { .driver.pm = &rtlwifi_pm_ops, }; -static int __init rtl92se_module_init(void) -{ - int ret = 0; - - ret = pci_register_driver(&rtl92se_driver); - if (ret) - RT_ASSERT(false, "No device found\n"); - - return ret; -} - -static void __exit rtl92se_module_exit(void) -{ - pci_unregister_driver(&rtl92se_driver); -} - -module_init(rtl92se_module_init); -module_exit(rtl92se_module_exit); +module_pci_driver(rtl92se_driver); diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 2fd3d13b7ce..812b5858f14 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -492,13 +492,14 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw, praddr = hdr->addr1; packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) && - (!pstats->crc) && (!pstats->icv)); + ether_addr_equal(mac->bssid, + (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && + (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); packet_toself = packet_matchbssid && - (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + ether_addr_equal(praddr, rtlefuse->dev_addr); if (ieee80211_is_beacon(fc)) packet_beacon = true; diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 28ebc69218a..bd816aef26d 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -1592,6 +1592,65 @@ struct rtl_debug { char proc_name[20]; }; +struct ps_t { + u8 pre_ccastate; + u8 cur_ccasate; + u8 pre_rfstate; + u8 cur_rfstate; + long rssi_val_min; +}; + +struct dig_t { + u32 rssi_lowthresh; + u32 rssi_highthresh; + u32 fa_lowthresh; + u32 fa_highthresh; + long last_min_undecorated_pwdb_for_dm; + long rssi_highpower_lowthresh; + long rssi_highpower_highthresh; + u32 recover_cnt; + u32 pre_igvalue; + u32 cur_igvalue; + long rssi_val; + u8 dig_enable_flag; + u8 dig_ext_port_stage; + u8 dig_algorithm; + u8 dig_twoport_algorithm; + u8 dig_dbgmode; + u8 dig_slgorithm_switch; + u8 cursta_connectctate; + u8 presta_connectstate; + u8 curmultista_connectstate; + char backoff_val; + char backoff_val_range_max; + char backoff_val_range_min; + u8 rx_gain_range_max; + u8 rx_gain_range_min; + u8 min_undecorated_pwdb_for_dm; + u8 rssi_val_min; + u8 pre_cck_pd_state; + u8 cur_cck_pd_state; + u8 pre_cck_fa_state; + u8 cur_cck_fa_state; + u8 pre_ccastate; + u8 cur_ccasate; + u8 large_fa_hit; + u8 forbidden_igi; + u8 dig_state; + u8 dig_highpwrstate; + u8 cur_sta_connectstate; + u8 pre_sta_connectstate; + u8 cur_ap_connectstate; + u8 pre_ap_connectstate; + u8 cur_pd_thstate; + u8 pre_pd_thstate; + u8 cur_cs_ratiostate; + u8 pre_cs_ratiostate; + u8 backoff_enable_flag; + char backoffval_range_max; + char backoffval_range_min; +}; + struct rtl_priv { struct completion firmware_loading_complete; struct rtl_locks locks; @@ -1629,6 +1688,10 @@ struct rtl_priv { interface or hardware */ unsigned long status; + /* tables for dm */ + struct dig_t dm_digtable; + struct ps_t dm_pstable; + /* data buffer pointer for USB reads */ __le32 *usb_data; int usb_data_index; @@ -1958,37 +2021,35 @@ static inline void rtl_write_dword(struct rtl_priv *rtlpriv, static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { - return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw, - regaddr, - bitmask); + struct rtl_priv *rtlpriv = hw->priv; + + return rtlpriv->cfg->ops->get_bbreg(hw, regaddr, bitmask); } static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data) { - ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw, - regaddr, bitmask, - data); + struct rtl_priv *rtlpriv = hw->priv; + rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data); } static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask) { - return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw, - rfpath, - regaddr, - bitmask); + struct rtl_priv *rtlpriv = hw->priv; + + return rtlpriv->cfg->ops->get_rfreg(hw, rfpath, regaddr, bitmask); } static inline void rtl_set_rfreg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data) { - ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw, - rfpath, regaddr, - bitmask, data); + struct rtl_priv *rtlpriv = hw->priv; + + rtlpriv->cfg->ops->set_rfreg(hw, rfpath, regaddr, bitmask, data); } static inline bool is_hal_stop(struct rtl_hal *rtlhal) diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig new file mode 100644 index 00000000000..1a72932e221 --- /dev/null +++ b/drivers/net/wireless/ti/Kconfig @@ -0,0 +1,14 @@ +menuconfig WL_TI + bool "TI Wireless LAN support" + ---help--- + This section contains support for all the wireless drivers + for Texas Instruments WLAN chips, such as wl1251 and the wl12xx + family. + +if WL_TI +source "drivers/net/wireless/ti/wl1251/Kconfig" +source "drivers/net/wireless/ti/wl12xx/Kconfig" + +# keep last for automatic dependencies +source "drivers/net/wireless/ti/wlcore/Kconfig" +endif # WL_TI diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile new file mode 100644 index 00000000000..0a565622d4a --- /dev/null +++ b/drivers/net/wireless/ti/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_WLCORE) += wlcore/ +obj-$(CONFIG_WL12XX) += wl12xx/ +obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/ +obj-$(CONFIG_WL1251) += wl1251/ diff --git a/drivers/net/wireless/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig index 1fb65849414..1fb65849414 100644 --- a/drivers/net/wireless/wl1251/Kconfig +++ b/drivers/net/wireless/ti/wl1251/Kconfig diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile index a5c6328b5f7..a5c6328b5f7 100644 --- a/drivers/net/wireless/wl1251/Makefile +++ b/drivers/net/wireless/ti/wl1251/Makefile diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c index ad87a1ac646..ad87a1ac646 100644 --- a/drivers/net/wireless/wl1251/acx.c +++ b/drivers/net/wireless/ti/wl1251/acx.c diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h index c2ba100f9b1..c2ba100f9b1 100644 --- a/drivers/net/wireless/wl1251/acx.h +++ b/drivers/net/wireless/ti/wl1251/acx.h diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c index a2e5241382d..a2e5241382d 100644 --- a/drivers/net/wireless/wl1251/boot.c +++ b/drivers/net/wireless/ti/wl1251/boot.c diff --git a/drivers/net/wireless/wl1251/boot.h b/drivers/net/wireless/ti/wl1251/boot.h index 7661bc5e466..7661bc5e466 100644 --- a/drivers/net/wireless/wl1251/boot.h +++ b/drivers/net/wireless/ti/wl1251/boot.h diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c index d14d69d733a..d14d69d733a 100644 --- a/drivers/net/wireless/wl1251/cmd.c +++ b/drivers/net/wireless/ti/wl1251/cmd.c diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h index ee4f2b39182..ee4f2b39182 100644 --- a/drivers/net/wireless/wl1251/cmd.h +++ b/drivers/net/wireless/ti/wl1251/cmd.h diff --git a/drivers/net/wireless/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c index 448da1f8c22..448da1f8c22 100644 --- a/drivers/net/wireless/wl1251/debugfs.c +++ b/drivers/net/wireless/ti/wl1251/debugfs.c diff --git a/drivers/net/wireless/wl1251/debugfs.h b/drivers/net/wireless/ti/wl1251/debugfs.h index b3417c02a21..b3417c02a21 100644 --- a/drivers/net/wireless/wl1251/debugfs.h +++ b/drivers/net/wireless/ti/wl1251/debugfs.h diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index 9f15ccaf8f0..9f15ccaf8f0 100644 --- a/drivers/net/wireless/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c diff --git a/drivers/net/wireless/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h index 30eb5d150bf..30eb5d150bf 100644 --- a/drivers/net/wireless/wl1251/event.h +++ b/drivers/net/wireless/ti/wl1251/event.h diff --git a/drivers/net/wireless/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c index 89b43d35473..89b43d35473 100644 --- a/drivers/net/wireless/wl1251/init.c +++ b/drivers/net/wireless/ti/wl1251/init.c diff --git a/drivers/net/wireless/wl1251/init.h b/drivers/net/wireless/ti/wl1251/init.h index 543f17582ea..543f17582ea 100644 --- a/drivers/net/wireless/wl1251/init.h +++ b/drivers/net/wireless/ti/wl1251/init.h diff --git a/drivers/net/wireless/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c index cdcadbf6ac2..cdcadbf6ac2 100644 --- a/drivers/net/wireless/wl1251/io.c +++ b/drivers/net/wireless/ti/wl1251/io.c diff --git a/drivers/net/wireless/wl1251/io.h b/drivers/net/wireless/ti/wl1251/io.h index d382877c34c..d382877c34c 100644 --- a/drivers/net/wireless/wl1251/io.h +++ b/drivers/net/wireless/ti/wl1251/io.h diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index d1afb8e3b2e..d1afb8e3b2e 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c index db719f7d269..db719f7d269 100644 --- a/drivers/net/wireless/wl1251/ps.c +++ b/drivers/net/wireless/ti/wl1251/ps.c diff --git a/drivers/net/wireless/wl1251/ps.h b/drivers/net/wireless/ti/wl1251/ps.h index 75efad246d6..75efad246d6 100644 --- a/drivers/net/wireless/wl1251/ps.h +++ b/drivers/net/wireless/ti/wl1251/ps.h diff --git a/drivers/net/wireless/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h index a5809019c5c..a5809019c5c 100644 --- a/drivers/net/wireless/wl1251/reg.h +++ b/drivers/net/wireless/ti/wl1251/reg.h diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index 6af35265c90..6af35265c90 100644 --- a/drivers/net/wireless/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c diff --git a/drivers/net/wireless/wl1251/rx.h b/drivers/net/wireless/ti/wl1251/rx.h index 4448f635a4d..4448f635a4d 100644 --- a/drivers/net/wireless/wl1251/rx.h +++ b/drivers/net/wireless/ti/wl1251/rx.h diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index 1b851f650e0..1b851f650e0 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 6248c354fc5..6248c354fc5 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c diff --git a/drivers/net/wireless/wl1251/spi.h b/drivers/net/wireless/ti/wl1251/spi.h index 16d506955cc..16d506955cc 100644 --- a/drivers/net/wireless/wl1251/spi.h +++ b/drivers/net/wireless/ti/wl1251/spi.h diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index 28121c590a2..28121c590a2 100644 --- a/drivers/net/wireless/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c diff --git a/drivers/net/wireless/wl1251/tx.h b/drivers/net/wireless/ti/wl1251/tx.h index 81338d39b43..81338d39b43 100644 --- a/drivers/net/wireless/wl1251/tx.h +++ b/drivers/net/wireless/ti/wl1251/tx.h diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h index 9d8f5816c6f..9d8f5816c6f 100644 --- a/drivers/net/wireless/wl1251/wl1251.h +++ b/drivers/net/wireless/ti/wl1251/wl1251.h diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h index 04ed5149577..04ed5149577 100644 --- a/drivers/net/wireless/wl1251/wl12xx_80211.h +++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig new file mode 100644 index 00000000000..5b92329122c --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/Kconfig @@ -0,0 +1,8 @@ +config WL12XX + tristate "TI wl12xx support" + select WLCORE + ---help--- + This module adds support for wireless adapters based on TI wl1271, + wl1273, wl1281 and wl1283 chipsets. This module does *not* include + support for wl1251. For wl1251 support, use the separate homonymous + driver instead. diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile new file mode 100644 index 00000000000..87f64b14db3 --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/Makefile @@ -0,0 +1,3 @@ +wl12xx-objs = main.o cmd.o acx.o + +obj-$(CONFIG_WL12XX) += wl12xx.o diff --git a/drivers/net/wireless/ti/wl12xx/acx.c b/drivers/net/wireless/ti/wl12xx/acx.c new file mode 100644 index 00000000000..bea06b2d7bf --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/acx.c @@ -0,0 +1,53 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2008-2009 Nokia Corporation + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "../wlcore/cmd.h" +#include "../wlcore/debug.h" +#include "../wlcore/acx.h" + +#include "acx.h" + +int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap) +{ + struct wl1271_acx_host_config_bitmap *bitmap_conf; + int ret; + + bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL); + if (!bitmap_conf) { + ret = -ENOMEM; + goto out; + } + + bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap); + + ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP, + bitmap_conf, sizeof(*bitmap_conf)); + if (ret < 0) { + wl1271_warning("wl1271 bitmap config opt failed: %d", ret); + goto out; + } + +out: + kfree(bitmap_conf); + + return ret; +} diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h new file mode 100644 index 00000000000..d1f5aba0afc --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/acx.h @@ -0,0 +1,36 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2010 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_ACX_H__ +#define __WL12XX_ACX_H__ + +#include "../wlcore/wlcore.h" + +struct wl1271_acx_host_config_bitmap { + struct acx_header header; + + __le32 host_cfg_bitmap; +} __packed; + +int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); + +#endif /* __WL12XX_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c new file mode 100644 index 00000000000..8ffaeb5f214 --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/cmd.c @@ -0,0 +1,254 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2009-2010 Nokia Corporation + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "../wlcore/cmd.h" +#include "../wlcore/debug.h" + +#include "wl12xx.h" +#include "cmd.h" + +int wl1271_cmd_ext_radio_parms(struct wl1271 *wl) +{ + struct wl1271_ext_radio_parms_cmd *ext_radio_parms; + struct wl12xx_priv *priv = wl->priv; + struct wl12xx_conf_rf *rf = &priv->conf.rf; + int ret; + + if (!wl->nvs) + return -ENODEV; + + ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL); + if (!ext_radio_parms) + return -ENOMEM; + + ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM; + + memcpy(ext_radio_parms->tx_per_channel_power_compensation_2, + rf->tx_per_channel_power_compensation_2, + CONF_TX_PWR_COMPENSATION_LEN_2); + memcpy(ext_radio_parms->tx_per_channel_power_compensation_5, + rf->tx_per_channel_power_compensation_5, + CONF_TX_PWR_COMPENSATION_LEN_5); + + wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ", + ext_radio_parms, sizeof(*ext_radio_parms)); + + ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0); + if (ret < 0) + wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed"); + + kfree(ext_radio_parms); + return ret; +} + +int wl1271_cmd_general_parms(struct wl1271 *wl) +{ + struct wl1271_general_parms_cmd *gen_parms; + struct wl1271_ini_general_params *gp = + &((struct wl1271_nvs_file *)wl->nvs)->general_params; + bool answer = false; + int ret; + + if (!wl->nvs) + return -ENODEV; + + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from INI out of bounds"); + return -EINVAL; + } + + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); + if (!gen_parms) + return -ENOMEM; + + gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; + + memcpy(&gen_parms->general_params, gp, sizeof(*gp)); + + if (gp->tx_bip_fem_auto_detect) + answer = true; + + /* Override the REF CLK from the NVS with the one from platform data */ + gen_parms->general_params.ref_clock = wl->ref_clock; + + ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); + if (ret < 0) { + wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); + goto out; + } + + gp->tx_bip_fem_manufacturer = + gen_parms->general_params.tx_bip_fem_manufacturer; + + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", + answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); + +out: + kfree(gen_parms); + return ret; +} + +int wl128x_cmd_general_parms(struct wl1271 *wl) +{ + struct wl128x_general_parms_cmd *gen_parms; + struct wl128x_ini_general_params *gp = + &((struct wl128x_nvs_file *)wl->nvs)->general_params; + bool answer = false; + int ret; + + if (!wl->nvs) + return -ENODEV; + + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from ini out of bounds"); + return -EINVAL; + } + + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); + if (!gen_parms) + return -ENOMEM; + + gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; + + memcpy(&gen_parms->general_params, gp, sizeof(*gp)); + + if (gp->tx_bip_fem_auto_detect) + answer = true; + + /* Replace REF and TCXO CLKs with the ones from platform data */ + gen_parms->general_params.ref_clock = wl->ref_clock; + gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock; + + ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); + if (ret < 0) { + wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); + goto out; + } + + gp->tx_bip_fem_manufacturer = + gen_parms->general_params.tx_bip_fem_manufacturer; + + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", + answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); + +out: + kfree(gen_parms); + return ret; +} + +int wl1271_cmd_radio_parms(struct wl1271 *wl) +{ + struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs; + struct wl1271_radio_parms_cmd *radio_parms; + struct wl1271_ini_general_params *gp = &nvs->general_params; + int ret; + + if (!wl->nvs) + return -ENODEV; + + radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); + if (!radio_parms) + return -ENOMEM; + + radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; + + /* 2.4GHz parameters */ + memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, + sizeof(struct wl1271_ini_band_params_2)); + memcpy(&radio_parms->dyn_params_2, + &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl1271_ini_fem_params_2)); + + /* 5GHz parameters */ + memcpy(&radio_parms->static_params_5, + &nvs->stat_radio_params_5, + sizeof(struct wl1271_ini_band_params_5)); + memcpy(&radio_parms->dyn_params_5, + &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl1271_ini_fem_params_5)); + + wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", + radio_parms, sizeof(*radio_parms)); + + ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); + if (ret < 0) + wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); + + kfree(radio_parms); + return ret; +} + +int wl128x_cmd_radio_parms(struct wl1271 *wl) +{ + struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; + struct wl128x_radio_parms_cmd *radio_parms; + struct wl128x_ini_general_params *gp = &nvs->general_params; + int ret; + + if (!wl->nvs) + return -ENODEV; + + radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); + if (!radio_parms) + return -ENOMEM; + + radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; + + /* 2.4GHz parameters */ + memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, + sizeof(struct wl128x_ini_band_params_2)); + memcpy(&radio_parms->dyn_params_2, + &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl128x_ini_fem_params_2)); + + /* 5GHz parameters */ + memcpy(&radio_parms->static_params_5, + &nvs->stat_radio_params_5, + sizeof(struct wl128x_ini_band_params_5)); + memcpy(&radio_parms->dyn_params_5, + &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl128x_ini_fem_params_5)); + + radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options; + + wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", + radio_parms, sizeof(*radio_parms)); + + ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); + if (ret < 0) + wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); + + kfree(radio_parms); + return ret; +} diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h new file mode 100644 index 00000000000..140a0e8829d --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/cmd.h @@ -0,0 +1,112 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_CMD_H__ +#define __WL12XX_CMD_H__ + +#include "conf.h" + +#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 +#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E + +struct wl1271_general_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + struct wl1271_ini_general_params general_params; + + u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 sr_sen_n_p; + u8 sr_sen_n_p_gain; + u8 sr_sen_nrn; + u8 sr_sen_prn; + u8 padding[3]; +} __packed; + +struct wl128x_general_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + struct wl128x_ini_general_params general_params; + + u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 sr_sen_n_p; + u8 sr_sen_n_p_gain; + u8 sr_sen_nrn; + u8 sr_sen_prn; + u8 padding[3]; +} __packed; + +struct wl1271_radio_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + /* Static radio parameters */ + struct wl1271_ini_band_params_2 static_params_2; + struct wl1271_ini_band_params_5 static_params_5; + + /* Dynamic radio parameters */ + struct wl1271_ini_fem_params_2 dyn_params_2; + u8 padding2; + struct wl1271_ini_fem_params_5 dyn_params_5; + u8 padding3[2]; +} __packed; + +struct wl128x_radio_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + /* Static radio parameters */ + struct wl128x_ini_band_params_2 static_params_2; + struct wl128x_ini_band_params_5 static_params_5; + + u8 fem_vendor_and_options; + + /* Dynamic radio parameters */ + struct wl128x_ini_fem_params_2 dyn_params_2; + u8 padding2; + struct wl128x_ini_fem_params_5 dyn_params_5; +} __packed; + +#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26 + +struct wl1271_ext_radio_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2]; + u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; + u8 padding[3]; +} __packed; + +int wl1271_cmd_general_parms(struct wl1271 *wl); +int wl128x_cmd_general_parms(struct wl1271 *wl); +int wl1271_cmd_radio_parms(struct wl1271 *wl); +int wl128x_cmd_radio_parms(struct wl1271 *wl); +int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); + +#endif /* __WL12XX_CMD_H__ */ diff --git a/drivers/net/wireless/ti/wl12xx/conf.h b/drivers/net/wireless/ti/wl12xx/conf.h new file mode 100644 index 00000000000..75e29897a0f --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/conf.h @@ -0,0 +1,50 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_CONF_H__ +#define __WL12XX_CONF_H__ + +/* these are number of channels on the band divided by two, rounded up */ +#define CONF_TX_PWR_COMPENSATION_LEN_2 7 +#define CONF_TX_PWR_COMPENSATION_LEN_5 18 + +struct wl12xx_conf_rf { + /* + * Per channel power compensation for 2.4GHz + * + * Range: s8 + */ + u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2]; + + /* + * Per channel power compensation for 5GHz + * + * Range: s8 + */ + u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; +}; + +struct wl12xx_priv_conf { + struct wl12xx_conf_rf rf; + struct conf_memory_settings mem_wl127x; +}; + +#endif /* __WL12XX_CONF_H__ */ diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c new file mode 100644 index 00000000000..d7dd3def07b --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -0,0 +1,1388 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <linux/err.h> + +#include <linux/wl12xx.h> + +#include "../wlcore/wlcore.h" +#include "../wlcore/debug.h" +#include "../wlcore/io.h" +#include "../wlcore/acx.h" +#include "../wlcore/tx.h" +#include "../wlcore/rx.h" +#include "../wlcore/io.h" +#include "../wlcore/boot.h" + +#include "wl12xx.h" +#include "reg.h" +#include "cmd.h" +#include "acx.h" + +static struct wlcore_conf wl12xx_conf = { + .sg = { + .params = { + [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, + [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, + [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, + [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, + [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, + [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, + [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, + [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, + [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, + [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, + [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, + [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, + [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, + [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, + [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, + [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, + [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, + [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, + [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, + [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, + [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, + [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, + [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, + [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, + [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, + [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, + /* active scan params */ + [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, + [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, + [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, + /* passive scan params */ + [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, + [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, + [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, + /* passive scan in dual antenna params */ + [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, + [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, + [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, + /* general params */ + [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, + [CONF_SG_ANTENNA_CONFIGURATION] = 0, + [CONF_SG_BEACON_MISS_PERCENT] = 60, + [CONF_SG_DHCP_TIME] = 5000, + [CONF_SG_RXT] = 1200, + [CONF_SG_TXT] = 1000, + [CONF_SG_ADAPTIVE_RXT_TXT] = 1, + [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, + [CONF_SG_HV3_MAX_SERVED] = 6, + [CONF_SG_PS_POLL_TIMEOUT] = 10, + [CONF_SG_UPSD_TIMEOUT] = 10, + [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, + [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, + [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, + /* AP params */ + [CONF_AP_BEACON_MISS_TX] = 3, + [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, + [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, + [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, + [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, + [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, + /* CTS Diluting params */ + [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, + [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, + }, + .state = CONF_SG_PROTECTIVE, + }, + .rx = { + .rx_msdu_life_time = 512000, + .packet_detection_threshold = 0, + .ps_poll_timeout = 15, + .upsd_timeout = 15, + .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, + .rx_cca_threshold = 0, + .irq_blk_threshold = 0xFFFF, + .irq_pkt_threshold = 0, + .irq_timeout = 600, + .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, + }, + .tx = { + .tx_energy_detection = 0, + .sta_rc_conf = { + .enabled_rates = 0, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + .ac_conf_count = 4, + .ac_conf = { + [CONF_TX_AC_BE] = { + .ac = CONF_TX_AC_BE, + .cw_min = 15, + .cw_max = 63, + .aifsn = 3, + .tx_op_limit = 0, + }, + [CONF_TX_AC_BK] = { + .ac = CONF_TX_AC_BK, + .cw_min = 15, + .cw_max = 63, + .aifsn = 7, + .tx_op_limit = 0, + }, + [CONF_TX_AC_VI] = { + .ac = CONF_TX_AC_VI, + .cw_min = 15, + .cw_max = 63, + .aifsn = CONF_TX_AIFS_PIFS, + .tx_op_limit = 3008, + }, + [CONF_TX_AC_VO] = { + .ac = CONF_TX_AC_VO, + .cw_min = 15, + .cw_max = 63, + .aifsn = CONF_TX_AIFS_PIFS, + .tx_op_limit = 1504, + }, + }, + .max_tx_retries = 100, + .ap_aging_period = 300, + .tid_conf_count = 4, + .tid_conf = { + [CONF_TX_AC_BE] = { + .queue_id = CONF_TX_AC_BE, + .channel_type = CONF_CHANNEL_TYPE_EDCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [CONF_TX_AC_BK] = { + .queue_id = CONF_TX_AC_BK, + .channel_type = CONF_CHANNEL_TYPE_EDCF, + .tsid = CONF_TX_AC_BK, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [CONF_TX_AC_VI] = { + .queue_id = CONF_TX_AC_VI, + .channel_type = CONF_CHANNEL_TYPE_EDCF, + .tsid = CONF_TX_AC_VI, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [CONF_TX_AC_VO] = { + .queue_id = CONF_TX_AC_VO, + .channel_type = CONF_CHANNEL_TYPE_EDCF, + .tsid = CONF_TX_AC_VO, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + }, + .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, + .tx_compl_timeout = 700, + .tx_compl_threshold = 4, + .basic_rate = CONF_HW_BIT_RATE_1MBPS, + .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, + .tmpl_short_retry_limit = 10, + .tmpl_long_retry_limit = 10, + .tx_watchdog_timeout = 5000, + }, + .conn = { + .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, + .listen_interval = 1, + .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM, + .suspend_listen_interval = 3, + .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, + .bcn_filt_ie_count = 2, + .bcn_filt_ie = { + [0] = { + .ie = WLAN_EID_CHANNEL_SWITCH, + .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, + }, + [1] = { + .ie = WLAN_EID_HT_OPERATION, + .rule = CONF_BCN_RULE_PASS_ON_CHANGE, + }, + }, + .synch_fail_thold = 10, + .bss_lose_timeout = 100, + .beacon_rx_timeout = 10000, + .broadcast_timeout = 20000, + .rx_broadcast_in_ps = 1, + .ps_poll_threshold = 10, + .bet_enable = CONF_BET_MODE_ENABLE, + .bet_max_consecutive = 50, + .psm_entry_retries = 8, + .psm_exit_retries = 16, + .psm_entry_nullfunc_retries = 3, + .dynamic_ps_timeout = 40, + .forced_ps = false, + .keep_alive_interval = 55000, + .max_listen_interval = 20, + }, + .itrim = { + .enable = false, + .timeout = 50000, + }, + .pm_config = { + .host_clk_settling_time = 5000, + .host_fast_wakeup_support = false + }, + .roam_trigger = { + .trigger_pacing = 1, + .avg_weight_rssi_beacon = 20, + .avg_weight_rssi_data = 10, + .avg_weight_snr_beacon = 20, + .avg_weight_snr_data = 10, + }, + .scan = { + .min_dwell_time_active = 7500, + .max_dwell_time_active = 30000, + .min_dwell_time_passive = 100000, + .max_dwell_time_passive = 100000, + .num_probe_reqs = 2, + .split_scan_timeout = 50000, + }, + .sched_scan = { + /* + * Values are in TU/1000 but since sched scan FW command + * params are in TUs rounding up may occur. + */ + .base_dwell_time = 7500, + .max_dwell_time_delta = 22500, + /* based on 250bits per probe @1Mbps */ + .dwell_time_delta_per_probe = 2000, + /* based on 250bits per probe @6Mbps (plus a bit more) */ + .dwell_time_delta_per_probe_5 = 350, + .dwell_time_passive = 100000, + .dwell_time_dfs = 150000, + .num_probe_reqs = 2, + .rssi_threshold = -90, + .snr_threshold = 0, + }, + .ht = { + .rx_ba_win_size = 8, + .tx_ba_win_size = 64, + .inactivity_timeout = 10000, + .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, + }, + /* + * Memory config for wl127x chips is given in the + * wl12xx_default_priv_conf struct. The below configuration is + * for wl128x chips. + */ + .mem = { + .num_stations = 1, + .ssid_profiles = 1, + .rx_block_num = 40, + .tx_min_block_num = 40, + .dynamic_memory = 1, + .min_req_tx_blocks = 45, + .min_req_rx_blocks = 22, + .tx_min = 27, + }, + .fm_coex = { + .enable = true, + .swallow_period = 5, + .n_divider_fref_set_1 = 0xff, /* default */ + .n_divider_fref_set_2 = 12, + .m_divider_fref_set_1 = 148, + .m_divider_fref_set_2 = 0xffff, /* default */ + .coex_pll_stabilization_time = 0xffffffff, /* default */ + .ldo_stabilization_time = 0xffff, /* default */ + .fm_disturbed_band_margin = 0xff, /* default */ + .swallow_clk_diff = 0xff, /* default */ + }, + .rx_streaming = { + .duration = 150, + .queues = 0x1, + .interval = 20, + .always = 0, + }, + .fwlog = { + .mode = WL12XX_FWLOG_ON_DEMAND, + .mem_blocks = 2, + .severity = 0, + .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, + .output = WL12XX_FWLOG_OUTPUT_HOST, + .threshold = 0, + }, + .rate = { + .rate_retry_score = 32000, + .per_add = 8192, + .per_th1 = 2048, + .per_th2 = 4096, + .max_per = 8100, + .inverse_curiosity_factor = 5, + .tx_fail_low_th = 4, + .tx_fail_high_th = 10, + .per_alpha_shift = 4, + .per_add_shift = 13, + .per_beta1_shift = 10, + .per_beta2_shift = 8, + .rate_check_up = 2, + .rate_check_down = 12, + .rate_retry_policy = { + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, + }, + }, + .hangover = { + .recover_time = 0, + .hangover_period = 20, + .dynamic_mode = 1, + .early_termination_mode = 1, + .max_period = 20, + .min_period = 1, + .increase_delta = 1, + .decrease_delta = 2, + .quiet_time = 4, + .increase_time = 1, + .window_size = 16, + }, +}; + +static struct wl12xx_priv_conf wl12xx_default_priv_conf = { + .rf = { + .tx_per_channel_power_compensation_2 = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .tx_per_channel_power_compensation_5 = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + .mem_wl127x = { + .num_stations = 1, + .ssid_profiles = 1, + .rx_block_num = 70, + .tx_min_block_num = 40, + .dynamic_memory = 1, + .min_req_tx_blocks = 100, + .min_req_rx_blocks = 22, + .tx_min = 27, + }, + +}; + +#define WL12XX_TX_HW_BLOCK_SPARE_DEFAULT 1 +#define WL12XX_TX_HW_BLOCK_GEM_SPARE 2 +#define WL12XX_TX_HW_BLOCK_SIZE 252 + +static const u8 wl12xx_rate_to_idx_2ghz[] = { + /* MCS rates are used only with 11n */ + 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */ + 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */ + 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */ + 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */ + 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */ + 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */ + 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */ + 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */ + 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */ + + 11, /* WL12XX_CONF_HW_RXTX_RATE_54 */ + 10, /* WL12XX_CONF_HW_RXTX_RATE_48 */ + 9, /* WL12XX_CONF_HW_RXTX_RATE_36 */ + 8, /* WL12XX_CONF_HW_RXTX_RATE_24 */ + + /* TI-specific rate */ + CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */ + + 7, /* WL12XX_CONF_HW_RXTX_RATE_18 */ + 6, /* WL12XX_CONF_HW_RXTX_RATE_12 */ + 3, /* WL12XX_CONF_HW_RXTX_RATE_11 */ + 5, /* WL12XX_CONF_HW_RXTX_RATE_9 */ + 4, /* WL12XX_CONF_HW_RXTX_RATE_6 */ + 2, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */ + 1, /* WL12XX_CONF_HW_RXTX_RATE_2 */ + 0 /* WL12XX_CONF_HW_RXTX_RATE_1 */ +}; + +static const u8 wl12xx_rate_to_idx_5ghz[] = { + /* MCS rates are used only with 11n */ + 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */ + 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */ + 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */ + 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */ + 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */ + 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */ + 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */ + 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */ + 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */ + + 7, /* WL12XX_CONF_HW_RXTX_RATE_54 */ + 6, /* WL12XX_CONF_HW_RXTX_RATE_48 */ + 5, /* WL12XX_CONF_HW_RXTX_RATE_36 */ + 4, /* WL12XX_CONF_HW_RXTX_RATE_24 */ + + /* TI-specific rate */ + CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */ + + 3, /* WL12XX_CONF_HW_RXTX_RATE_18 */ + 2, /* WL12XX_CONF_HW_RXTX_RATE_12 */ + CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_11 */ + 1, /* WL12XX_CONF_HW_RXTX_RATE_9 */ + 0, /* WL12XX_CONF_HW_RXTX_RATE_6 */ + CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */ + CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_2 */ + CONF_HW_RXTX_RATE_UNSUPPORTED /* WL12XX_CONF_HW_RXTX_RATE_1 */ +}; + +static const u8 *wl12xx_band_rate_to_idx[] = { + [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz, + [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz +}; + +enum wl12xx_hw_rates { + WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI = 0, + WL12XX_CONF_HW_RXTX_RATE_MCS7, + WL12XX_CONF_HW_RXTX_RATE_MCS6, + WL12XX_CONF_HW_RXTX_RATE_MCS5, + WL12XX_CONF_HW_RXTX_RATE_MCS4, + WL12XX_CONF_HW_RXTX_RATE_MCS3, + WL12XX_CONF_HW_RXTX_RATE_MCS2, + WL12XX_CONF_HW_RXTX_RATE_MCS1, + WL12XX_CONF_HW_RXTX_RATE_MCS0, + WL12XX_CONF_HW_RXTX_RATE_54, + WL12XX_CONF_HW_RXTX_RATE_48, + WL12XX_CONF_HW_RXTX_RATE_36, + WL12XX_CONF_HW_RXTX_RATE_24, + WL12XX_CONF_HW_RXTX_RATE_22, + WL12XX_CONF_HW_RXTX_RATE_18, + WL12XX_CONF_HW_RXTX_RATE_12, + WL12XX_CONF_HW_RXTX_RATE_11, + WL12XX_CONF_HW_RXTX_RATE_9, + WL12XX_CONF_HW_RXTX_RATE_6, + WL12XX_CONF_HW_RXTX_RATE_5_5, + WL12XX_CONF_HW_RXTX_RATE_2, + WL12XX_CONF_HW_RXTX_RATE_1, + WL12XX_CONF_HW_RXTX_RATE_MAX, +}; + +static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = { + [PART_DOWN] = { + .mem = { + .start = 0x00000000, + .size = 0x000177c0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x00008800 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + }, + }, + + [PART_BOOT] = { /* in wl12xx we can use a mix of work and down + * partition here */ + .mem = { + .start = 0x00040000, + .size = 0x00014fc0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x00008800 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + }, + }, + + [PART_WORK] = { + .mem = { + .start = 0x00040000, + .size = 0x00014fc0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x0000a000 + }, + .mem2 = { + .start = 0x003004f8, + .size = 0x00000004 + }, + .mem3 = { + .start = 0x00040404, + .size = 0x00000000 + }, + }, + + [PART_DRPW] = { + .mem = { + .start = 0x00040000, + .size = 0x00014fc0 + }, + .reg = { + .start = DRPW_BASE, + .size = 0x00006000 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + } + } +}; + +static const int wl12xx_rtable[REG_TABLE_LEN] = { + [REG_ECPU_CONTROL] = WL12XX_REG_ECPU_CONTROL, + [REG_INTERRUPT_NO_CLEAR] = WL12XX_REG_INTERRUPT_NO_CLEAR, + [REG_INTERRUPT_ACK] = WL12XX_REG_INTERRUPT_ACK, + [REG_COMMAND_MAILBOX_PTR] = WL12XX_REG_COMMAND_MAILBOX_PTR, + [REG_EVENT_MAILBOX_PTR] = WL12XX_REG_EVENT_MAILBOX_PTR, + [REG_INTERRUPT_TRIG] = WL12XX_REG_INTERRUPT_TRIG, + [REG_INTERRUPT_MASK] = WL12XX_REG_INTERRUPT_MASK, + [REG_PC_ON_RECOVERY] = WL12XX_SCR_PAD4, + [REG_CHIP_ID_B] = WL12XX_CHIP_ID_B, + [REG_CMD_MBOX_ADDRESS] = WL12XX_CMD_MBOX_ADDRESS, + + /* data access memory addresses, used with partition translation */ + [REG_SLV_MEM_DATA] = WL1271_SLV_MEM_DATA, + [REG_SLV_REG_DATA] = WL1271_SLV_REG_DATA, + + /* raw data access memory addresses */ + [REG_RAW_FW_STATUS_ADDR] = FW_STATUS_ADDR, +}; + +/* TODO: maybe move to a new header file? */ +#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin" +#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin" +#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin" + +#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin" +#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin" +#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin" + +static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) +{ + if (wl->chip.id != CHIP_ID_1283_PG20) { + struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; + struct wl1271_rx_mem_pool_addr rx_mem_addr; + + /* + * Choose the block we want to read + * For aggregated packets, only the first memory block + * should be retrieved. The FW takes care of the rest. + */ + u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK; + + rx_mem_addr.addr = (mem_block << 8) + + le32_to_cpu(wl_mem_map->packet_memory_pool_start); + + rx_mem_addr.addr_extra = rx_mem_addr.addr + 4; + + wl1271_write(wl, WL1271_SLV_REG_DATA, + &rx_mem_addr, sizeof(rx_mem_addr), false); + } +} + +static int wl12xx_identify_chip(struct wl1271 *wl) +{ + int ret = 0; + + switch (wl->chip.id) { + case CHIP_ID_1271_PG10: + wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", + wl->chip.id); + + /* clear the alignment quirk, since we don't support it */ + wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; + + wl->quirks |= WLCORE_QUIRK_LEGACY_NVS; + wl->sr_fw_name = WL127X_FW_NAME_SINGLE; + wl->mr_fw_name = WL127X_FW_NAME_MULTI; + memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x, + sizeof(wl->conf.mem)); + + /* read data preparation is only needed by wl127x */ + wl->ops->prepare_read = wl127x_prepare_read; + + break; + + case CHIP_ID_1271_PG20: + wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", + wl->chip.id); + + /* clear the alignment quirk, since we don't support it */ + wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; + + wl->quirks |= WLCORE_QUIRK_LEGACY_NVS; + wl->plt_fw_name = WL127X_PLT_FW_NAME; + wl->sr_fw_name = WL127X_FW_NAME_SINGLE; + wl->mr_fw_name = WL127X_FW_NAME_MULTI; + memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x, + sizeof(wl->conf.mem)); + + /* read data preparation is only needed by wl127x */ + wl->ops->prepare_read = wl127x_prepare_read; + + break; + + case CHIP_ID_1283_PG20: + wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", + wl->chip.id); + wl->plt_fw_name = WL128X_PLT_FW_NAME; + wl->sr_fw_name = WL128X_FW_NAME_SINGLE; + wl->mr_fw_name = WL128X_FW_NAME_MULTI; + break; + case CHIP_ID_1283_PG10: + default: + wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); + ret = -ENODEV; + goto out; + } + +out: + return ret; +} + +static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val) +{ + /* write address >> 1 + 0x30000 to OCP_POR_CTR */ + addr = (addr >> 1) + 0x30000; + wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); + + /* write value to OCP_POR_WDATA */ + wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val); + + /* write 1 to OCP_CMD */ + wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE); +} + +static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr) +{ + u32 val; + int timeout = OCP_CMD_LOOP; + + /* write address >> 1 + 0x30000 to OCP_POR_CTR */ + addr = (addr >> 1) + 0x30000; + wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); + + /* write 2 to OCP_CMD */ + wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ); + + /* poll for data ready */ + do { + val = wl1271_read32(wl, WL12XX_OCP_DATA_READ); + } while (!(val & OCP_READY_MASK) && --timeout); + + if (!timeout) { + wl1271_warning("Top register access timed out."); + return 0xffff; + } + + /* check data status and return if OK */ + if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK) + return val & 0xffff; + else { + wl1271_warning("Top register access returned error."); + return 0xffff; + } +} + +static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl) +{ + u16 spare_reg; + + /* Mask bits [2] & [8:4] in the sys_clk_cfg register */ + spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); + if (spare_reg == 0xFFFF) + return -EFAULT; + spare_reg |= (BIT(3) | BIT(5) | BIT(6)); + wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); + + /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */ + wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG, + WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF); + + /* Delay execution for 15msec, to let the HW settle */ + mdelay(15); + + return 0; +} + +static bool wl128x_is_tcxo_valid(struct wl1271 *wl) +{ + u16 tcxo_detection; + + tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG); + if (tcxo_detection & TCXO_DET_FAILED) + return false; + + return true; +} + +static bool wl128x_is_fref_valid(struct wl1271 *wl) +{ + u16 fref_detection; + + fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG); + if (fref_detection & FREF_CLK_DETECT_FAIL) + return false; + + return true; +} + +static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl) +{ + wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL); + wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL); + wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL); + + return 0; +} + +static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk) +{ + u16 spare_reg; + u16 pll_config; + u8 input_freq; + + /* Mask bits [3:1] in the sys_clk_cfg register */ + spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); + if (spare_reg == 0xFFFF) + return -EFAULT; + spare_reg |= BIT(2); + wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); + + /* Handle special cases of the TCXO clock */ + if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 || + wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6) + return wl128x_manually_configure_mcs_pll(wl); + + /* Set the input frequency according to the selected clock source */ + input_freq = (clk & 1) + 1; + + pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG); + if (pll_config == 0xFFFF) + return -EFAULT; + pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT); + pll_config |= MCS_PLL_ENABLE_HP; + wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config); + + return 0; +} + +/* + * WL128x has two clocks input - TCXO and FREF. + * TCXO is the main clock of the device, while FREF is used to sync + * between the GPS and the cellular modem. + * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used + * as the WLAN/BT main clock. + */ +static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock) +{ + u16 sys_clk_cfg; + + /* For XTAL-only modes, FREF will be used after switching from TCXO */ + if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL || + wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) { + if (!wl128x_switch_tcxo_to_fref(wl)) + return -EINVAL; + goto fref_clk; + } + + /* Query the HW, to determine which clock source we should use */ + sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG); + if (sys_clk_cfg == 0xFFFF) + return -EINVAL; + if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF) + goto fref_clk; + + /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */ + if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 || + wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) { + if (!wl128x_switch_tcxo_to_fref(wl)) + return -EINVAL; + goto fref_clk; + } + + /* TCXO clock is selected */ + if (!wl128x_is_tcxo_valid(wl)) + return -EINVAL; + *selected_clock = wl->tcxo_clock; + goto config_mcs_pll; + +fref_clk: + /* FREF clock is selected */ + if (!wl128x_is_fref_valid(wl)) + return -EINVAL; + *selected_clock = wl->ref_clock; + +config_mcs_pll: + return wl128x_configure_mcs_pll(wl, *selected_clock); +} + +static int wl127x_boot_clk(struct wl1271 *wl) +{ + u32 pause; + u32 clk; + + if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3) + wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION; + + if (wl->ref_clock == CONF_REF_CLK_19_2_E || + wl->ref_clock == CONF_REF_CLK_38_4_E || + wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL) + /* ref clk: 19.2/38.4/38.4-XTAL */ + clk = 0x3; + else if (wl->ref_clock == CONF_REF_CLK_26_E || + wl->ref_clock == CONF_REF_CLK_52_E) + /* ref clk: 26/52 */ + clk = 0x5; + else + return -EINVAL; + + if (wl->ref_clock != CONF_REF_CLK_19_2_E) { + u16 val; + /* Set clock type (open drain) */ + val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE); + val &= FREF_CLK_TYPE_BITS; + wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val); + + /* Set clock pull mode (no pull) */ + val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL); + val |= NO_PULL; + wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val); + } else { + u16 val; + /* Set clock polarity */ + val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY); + val &= FREF_CLK_POLARITY_BITS; + val |= CLK_REQ_OUTN_SEL; + wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); + } + + wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk); + + pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS); + + wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); + + pause &= ~(WU_COUNTER_PAUSE_VAL); + pause |= WU_COUNTER_PAUSE_VAL; + wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause); + + return 0; +} + +static int wl1271_boot_soft_reset(struct wl1271 *wl) +{ + unsigned long timeout; + u32 boot_data; + + /* perform soft reset */ + wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); + + /* SOFT_RESET is self clearing */ + timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); + while (1) { + boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET); + wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); + if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) + break; + + if (time_after(jiffies, timeout)) { + /* 1.2 check pWhalBus->uSelfClearTime if the + * timeout was reached */ + wl1271_error("soft reset timeout"); + return -1; + } + + udelay(SOFT_RESET_STALL_TIME); + } + + /* disable Rx/Tx */ + wl1271_write32(wl, WL12XX_ENABLE, 0x0); + + /* disable auto calibration on start*/ + wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff); + + return 0; +} + +static int wl12xx_pre_boot(struct wl1271 *wl) +{ + int ret = 0; + u32 clk; + int selected_clock = -1; + + if (wl->chip.id == CHIP_ID_1283_PG20) { + ret = wl128x_boot_clk(wl, &selected_clock); + if (ret < 0) + goto out; + } else { + ret = wl127x_boot_clk(wl); + if (ret < 0) + goto out; + } + + /* Continue the ELP wake up sequence */ + wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); + udelay(500); + + wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); + + /* Read-modify-write DRPW_SCRATCH_START register (see next state) + to be used by DRPw FW. The RTRIM value will be added by the FW + before taking DRPw out of reset */ + + clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START); + + wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); + + if (wl->chip.id == CHIP_ID_1283_PG20) + clk |= ((selected_clock & 0x3) << 1) << 4; + else + clk |= (wl->ref_clock << 1) << 4; + + wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk); + + wlcore_set_partition(wl, &wl->ptable[PART_WORK]); + + /* Disable interrupts */ + wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); + + ret = wl1271_boot_soft_reset(wl); + if (ret < 0) + goto out; + +out: + return ret; +} + +static void wl12xx_pre_upload(struct wl1271 *wl) +{ + u32 tmp; + + /* write firmware's last address (ie. it's length) to + * ACX_EEPROMLESS_IND_REG */ + wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); + + wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND); + + tmp = wlcore_read_reg(wl, REG_CHIP_ID_B); + + wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); + + /* 6. read the EEPROM parameters */ + tmp = wl1271_read32(wl, WL12XX_SCR_PAD2); + + /* WL1271: The reference driver skips steps 7 to 10 (jumps directly + * to upload_fw) */ + + if (wl->chip.id == CHIP_ID_1283_PG20) + wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA); +} + +static void wl12xx_enable_interrupts(struct wl1271 *wl) +{ + u32 polarity; + + polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY); + + /* We use HIGH polarity, so unset the LOW bit */ + polarity &= ~POLARITY_LOW; + wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity); + + wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR); + + wlcore_enable_interrupts(wl); + wlcore_write_reg(wl, REG_INTERRUPT_MASK, + WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); + + wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL); +} + +static int wl12xx_boot(struct wl1271 *wl) +{ + int ret; + + ret = wl12xx_pre_boot(wl); + if (ret < 0) + goto out; + + ret = wlcore_boot_upload_nvs(wl); + if (ret < 0) + goto out; + + wl12xx_pre_upload(wl); + + ret = wlcore_boot_upload_firmware(wl); + if (ret < 0) + goto out; + + ret = wlcore_boot_run_firmware(wl); + if (ret < 0) + goto out; + + wl12xx_enable_interrupts(wl); + +out: + return ret; +} + +static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr, + void *buf, size_t len) +{ + wl1271_write(wl, cmd_box_addr, buf, len, false); + wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD); +} + +static void wl12xx_ack_event(struct wl1271 *wl) +{ + wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK); +} + +static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks) +{ + u32 blk_size = WL12XX_TX_HW_BLOCK_SIZE; + u32 align_len = wlcore_calc_packet_alignment(wl, len); + + return (align_len + blk_size - 1) / blk_size + spare_blks; +} + +static void +wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, + u32 blks, u32 spare_blks) +{ + if (wl->chip.id == CHIP_ID_1283_PG20) { + desc->wl128x_mem.total_mem_blocks = blks; + } else { + desc->wl127x_mem.extra_blocks = spare_blks; + desc->wl127x_mem.total_mem_blocks = blks; + } +} + +static void +wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb) +{ + u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len); + + if (wl->chip.id == CHIP_ID_1283_PG20) { + desc->wl128x_mem.extra_bytes = aligned_len - skb->len; + desc->length = cpu_to_le16(aligned_len >> 2); + + wl1271_debug(DEBUG_TX, + "tx_fill_hdr: hlid: %d len: %d life: %d mem: %d extra: %d", + desc->hlid, + le16_to_cpu(desc->length), + le16_to_cpu(desc->life_time), + desc->wl128x_mem.total_mem_blocks, + desc->wl128x_mem.extra_bytes); + } else { + /* calculate number of padding bytes */ + int pad = aligned_len - skb->len; + desc->tx_attr |= + cpu_to_le16(pad << TX_HW_ATTR_OFST_LAST_WORD_PAD); + + /* Store the aligned length in terms of words */ + desc->length = cpu_to_le16(aligned_len >> 2); + + wl1271_debug(DEBUG_TX, + "tx_fill_hdr: pad: %d hlid: %d len: %d life: %d mem: %d", + pad, desc->hlid, + le16_to_cpu(desc->length), + le16_to_cpu(desc->life_time), + desc->wl127x_mem.total_mem_blocks); + } +} + +static enum wl_rx_buf_align +wl12xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc) +{ + if (rx_desc & RX_BUF_UNALIGNED_PAYLOAD) + return WLCORE_RX_BUF_UNALIGNED; + + return WLCORE_RX_BUF_ALIGNED; +} + +static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data, + u32 data_len) +{ + struct wl1271_rx_descriptor *desc = rx_data; + + /* invalid packet */ + if (data_len < sizeof(*desc) || + data_len < sizeof(*desc) + desc->pad_len) + return 0; + + return data_len - sizeof(*desc) - desc->pad_len; +} + +static void wl12xx_tx_delayed_compl(struct wl1271 *wl) +{ + if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff)) + return; + + wl1271_tx_complete(wl); +} + +static int wl12xx_hw_init(struct wl1271 *wl) +{ + int ret; + + if (wl->chip.id == CHIP_ID_1283_PG20) { + u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; + + ret = wl128x_cmd_general_parms(wl); + if (ret < 0) + goto out; + ret = wl128x_cmd_radio_parms(wl); + if (ret < 0) + goto out; + + if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) + /* Enable SDIO padding */ + host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK; + + /* Must be before wl1271_acx_init_mem_config() */ + ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap); + if (ret < 0) + goto out; + } else { + ret = wl1271_cmd_general_parms(wl); + if (ret < 0) + goto out; + ret = wl1271_cmd_radio_parms(wl); + if (ret < 0) + goto out; + ret = wl1271_cmd_ext_radio_parms(wl); + if (ret < 0) + goto out; + } +out: + return ret; +} + +static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + return wlvif->rate_set; +} + +static int wl12xx_identify_fw(struct wl1271 *wl) +{ + unsigned int *fw_ver = wl->chip.fw_ver; + + /* Only new station firmwares support routing fw logs to the host */ + if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && + (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN)) + wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED; + + /* This feature is not yet supported for AP mode */ + if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) + wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED; + + return 0; +} + +static void wl12xx_conf_init(struct wl1271 *wl) +{ + struct wl12xx_priv *priv = wl->priv; + + /* apply driver default configuration */ + memcpy(&wl->conf, &wl12xx_conf, sizeof(wl12xx_conf)); + + /* apply default private configuration */ + memcpy(&priv->conf, &wl12xx_default_priv_conf, sizeof(priv->conf)); +} + +static bool wl12xx_mac_in_fuse(struct wl1271 *wl) +{ + bool supported = false; + u8 major, minor; + + if (wl->chip.id == CHIP_ID_1283_PG20) { + major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver); + minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver); + + /* in wl128x we have the MAC address if the PG is >= (2, 1) */ + if (major > 2 || (major == 2 && minor >= 1)) + supported = true; + } else { + major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver); + minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver); + + /* in wl127x we have the MAC address if the PG is >= (3, 1) */ + if (major == 3 && minor >= 1) + supported = true; + } + + wl1271_debug(DEBUG_PROBE, + "PG Ver major = %d minor = %d, MAC %s present", + major, minor, supported ? "is" : "is not"); + + return supported; +} + +static void wl12xx_get_fuse_mac(struct wl1271 *wl) +{ + u32 mac1, mac2; + + wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); + + mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1); + mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2); + + /* these are the two parts of the BD_ADDR */ + wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) + + ((mac1 & 0xff000000) >> 24); + wl->fuse_nic_addr = mac1 & 0xffffff; + + wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); +} + +static s8 wl12xx_get_pg_ver(struct wl1271 *wl) +{ + u32 die_info; + + if (wl->chip.id == CHIP_ID_1283_PG20) + die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); + else + die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1); + + return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; +} + +static void wl12xx_get_mac(struct wl1271 *wl) +{ + if (wl12xx_mac_in_fuse(wl)) + wl12xx_get_fuse_mac(wl); +} + +static struct wlcore_ops wl12xx_ops = { + .identify_chip = wl12xx_identify_chip, + .identify_fw = wl12xx_identify_fw, + .boot = wl12xx_boot, + .trigger_cmd = wl12xx_trigger_cmd, + .ack_event = wl12xx_ack_event, + .calc_tx_blocks = wl12xx_calc_tx_blocks, + .set_tx_desc_blocks = wl12xx_set_tx_desc_blocks, + .set_tx_desc_data_len = wl12xx_set_tx_desc_data_len, + .get_rx_buf_align = wl12xx_get_rx_buf_align, + .get_rx_packet_len = wl12xx_get_rx_packet_len, + .tx_immediate_compl = NULL, + .tx_delayed_compl = wl12xx_tx_delayed_compl, + .hw_init = wl12xx_hw_init, + .init_vif = NULL, + .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask, + .get_pg_ver = wl12xx_get_pg_ver, + .get_mac = wl12xx_get_mac, +}; + +static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), + .ht_supported = true, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, + .mcs = { + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + .rx_highest = cpu_to_le16(72), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, +}; + +static int __devinit wl12xx_probe(struct platform_device *pdev) +{ + struct wl1271 *wl; + struct ieee80211_hw *hw; + struct wl12xx_priv *priv; + + hw = wlcore_alloc_hw(sizeof(*priv)); + if (IS_ERR(hw)) { + wl1271_error("can't allocate hw"); + return PTR_ERR(hw); + } + + wl = hw->priv; + wl->ops = &wl12xx_ops; + wl->ptable = wl12xx_ptable; + wl->rtable = wl12xx_rtable; + wl->num_tx_desc = 16; + wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT; + wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE; + wl->band_rate_to_idx = wl12xx_band_rate_to_idx; + wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; + wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; + wl->fw_status_priv_len = 0; + memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap)); + wl12xx_conf_init(wl); + + return wlcore_probe(wl, pdev); +} + +static const struct platform_device_id wl12xx_id_table[] __devinitconst = { + { "wl12xx", 0 }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(platform, wl12xx_id_table); + +static struct platform_driver wl12xx_driver = { + .probe = wl12xx_probe, + .remove = __devexit_p(wlcore_remove), + .id_table = wl12xx_id_table, + .driver = { + .name = "wl12xx_driver", + .owner = THIS_MODULE, + } +}; + +static int __init wl12xx_init(void) +{ + return platform_driver_register(&wl12xx_driver); +} +module_init(wl12xx_init); + +static void __exit wl12xx_exit(void) +{ + platform_driver_unregister(&wl12xx_driver); +} +module_exit(wl12xx_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); +MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); +MODULE_FIRMWARE(WL127X_FW_NAME_MULTI); +MODULE_FIRMWARE(WL127X_PLT_FW_NAME); +MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE); +MODULE_FIRMWARE(WL128X_FW_NAME_MULTI); +MODULE_FIRMWARE(WL128X_PLT_FW_NAME); diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h index 340db324bc2..79ede02e258 100644 --- a/drivers/net/wireless/wl12xx/reg.h +++ b/drivers/net/wireless/ti/wl12xx/reg.h @@ -33,16 +33,8 @@ #define REGISTERS_DOWN_SIZE 0x00008800 #define REGISTERS_WORK_SIZE 0x0000b000 -#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC #define FW_STATUS_ADDR (0x14FC0 + 0xA000) -/* ELP register commands */ -#define ELPCTRL_WAKE_UP 0x1 -#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 -#define ELPCTRL_SLEEP 0x0 -/* ELP WLAN_READY bit */ -#define ELPCTRL_WLAN_READY 0x2 - /*=============================================== Host Software Reset - 32bit RW ------------------------------------------ @@ -57,14 +49,14 @@ (not self-clearing), the Wlan hardware exits the software reset state. ===============================================*/ -#define ACX_REG_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) +#define WL12XX_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) #define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) #define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) #define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) -#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) -#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) +#define WL12XX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) +#define WL12XX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) /*============================================= Host Interrupt Mask Register - 32bit (RW) @@ -94,7 +86,7 @@ 21- - Default: 0x0001 *==============================================*/ -#define ACX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) +#define WL12XX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) /*============================================= Host Interrupt Mask Set 16bit, (Write only) @@ -125,7 +117,7 @@ Reading this register doesn't effect its content. =============================================*/ -#define ACX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) +#define WL12XX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) /*============================================= Host Interrupt Status Clear on Read Register @@ -148,9 +140,9 @@ HINT_STS_ND registers, thus making the assotiated interrupt inactive. (0-no effect) ==============================================*/ -#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) +#define WL12XX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) -#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) +#define WL12XX_REG_RX_DRIVER_COUNTER (REGISTERS_BASE + 0x0538) /* Device Configuration registers*/ #define SOR_CFG (REGISTERS_BASE + 0x0800) @@ -175,9 +167,9 @@ 1 halt eCPU 0 enable eCPU ===============================================*/ -#define ACX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) +#define WL12XX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) -#define HI_CFG (REGISTERS_BASE + 0x0808) +#define WL12XX_HI_CFG (REGISTERS_BASE + 0x0808) /*=============================================== EEPROM Burst Read Start - 32bit RW @@ -196,72 +188,67 @@ *================================================*/ #define ACX_REG_EE_START (REGISTERS_BASE + 0x080C) -#define OCP_POR_CTR (REGISTERS_BASE + 0x09B4) -#define OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) -#define OCP_DATA_READ (REGISTERS_BASE + 0x09BC) -#define OCP_CMD (REGISTERS_BASE + 0x09C0) - -#define WL1271_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8) +#define WL12XX_OCP_POR_CTR (REGISTERS_BASE + 0x09B4) +#define WL12XX_OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) +#define WL12XX_OCP_DATA_READ (REGISTERS_BASE + 0x09BC) +#define WL12XX_OCP_CMD (REGISTERS_BASE + 0x09C0) -#define CHIP_ID_B (REGISTERS_BASE + 0x5674) +#define WL12XX_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8) -#define CHIP_ID_1271_PG10 (0x4030101) -#define CHIP_ID_1271_PG20 (0x4030111) -#define CHIP_ID_1283_PG10 (0x05030101) -#define CHIP_ID_1283_PG20 (0x05030111) +#define WL12XX_CHIP_ID_B (REGISTERS_BASE + 0x5674) -#define ENABLE (REGISTERS_BASE + 0x5450) +#define WL12XX_ENABLE (REGISTERS_BASE + 0x5450) /* Power Management registers */ -#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) -#define ELP_CMD (REGISTERS_BASE + 0x5808) -#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) -#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) -#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) +#define WL12XX_ELP_CFG_MODE (REGISTERS_BASE + 0x5804) +#define WL12XX_ELP_CMD (REGISTERS_BASE + 0x5808) +#define WL12XX_PLL_CAL_TIME (REGISTERS_BASE + 0x5810) +#define WL12XX_CLK_REQ_TIME (REGISTERS_BASE + 0x5814) +#define WL12XX_CLK_BUF_TIME (REGISTERS_BASE + 0x5818) -#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) +#define WL12XX_CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) /* Scratch Pad registers*/ -#define SCR_PAD0 (REGISTERS_BASE + 0x5608) -#define SCR_PAD1 (REGISTERS_BASE + 0x560C) -#define SCR_PAD2 (REGISTERS_BASE + 0x5610) -#define SCR_PAD3 (REGISTERS_BASE + 0x5614) -#define SCR_PAD4 (REGISTERS_BASE + 0x5618) -#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) -#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) -#define SCR_PAD5 (REGISTERS_BASE + 0x5624) -#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) -#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) -#define SCR_PAD6 (REGISTERS_BASE + 0x5630) -#define SCR_PAD7 (REGISTERS_BASE + 0x5634) -#define SCR_PAD8 (REGISTERS_BASE + 0x5638) -#define SCR_PAD9 (REGISTERS_BASE + 0x563C) +#define WL12XX_SCR_PAD0 (REGISTERS_BASE + 0x5608) +#define WL12XX_SCR_PAD1 (REGISTERS_BASE + 0x560C) +#define WL12XX_SCR_PAD2 (REGISTERS_BASE + 0x5610) +#define WL12XX_SCR_PAD3 (REGISTERS_BASE + 0x5614) +#define WL12XX_SCR_PAD4 (REGISTERS_BASE + 0x5618) +#define WL12XX_SCR_PAD4_SET (REGISTERS_BASE + 0x561C) +#define WL12XX_SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) +#define WL12XX_SCR_PAD5 (REGISTERS_BASE + 0x5624) +#define WL12XX_SCR_PAD5_SET (REGISTERS_BASE + 0x5628) +#define WL12XX_SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) +#define WL12XX_SCR_PAD6 (REGISTERS_BASE + 0x5630) +#define WL12XX_SCR_PAD7 (REGISTERS_BASE + 0x5634) +#define WL12XX_SCR_PAD8 (REGISTERS_BASE + 0x5638) +#define WL12XX_SCR_PAD9 (REGISTERS_BASE + 0x563C) /* Spare registers*/ -#define SPARE_A1 (REGISTERS_BASE + 0x0994) -#define SPARE_A2 (REGISTERS_BASE + 0x0998) -#define SPARE_A3 (REGISTERS_BASE + 0x099C) -#define SPARE_A4 (REGISTERS_BASE + 0x09A0) -#define SPARE_A5 (REGISTERS_BASE + 0x09A4) -#define SPARE_A6 (REGISTERS_BASE + 0x09A8) -#define SPARE_A7 (REGISTERS_BASE + 0x09AC) -#define SPARE_A8 (REGISTERS_BASE + 0x09B0) -#define SPARE_B1 (REGISTERS_BASE + 0x5420) -#define SPARE_B2 (REGISTERS_BASE + 0x5424) -#define SPARE_B3 (REGISTERS_BASE + 0x5428) -#define SPARE_B4 (REGISTERS_BASE + 0x542C) -#define SPARE_B5 (REGISTERS_BASE + 0x5430) -#define SPARE_B6 (REGISTERS_BASE + 0x5434) -#define SPARE_B7 (REGISTERS_BASE + 0x5438) -#define SPARE_B8 (REGISTERS_BASE + 0x543C) - -#define PLL_PARAMETERS (REGISTERS_BASE + 0x6040) -#define WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) -#define WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) -#define DRPW_SCRATCH_START (DRPW_BASE + 0x002C) - - -#define ACX_SLV_SOFT_RESET_BIT BIT(1) +#define WL12XX_SPARE_A1 (REGISTERS_BASE + 0x0994) +#define WL12XX_SPARE_A2 (REGISTERS_BASE + 0x0998) +#define WL12XX_SPARE_A3 (REGISTERS_BASE + 0x099C) +#define WL12XX_SPARE_A4 (REGISTERS_BASE + 0x09A0) +#define WL12XX_SPARE_A5 (REGISTERS_BASE + 0x09A4) +#define WL12XX_SPARE_A6 (REGISTERS_BASE + 0x09A8) +#define WL12XX_SPARE_A7 (REGISTERS_BASE + 0x09AC) +#define WL12XX_SPARE_A8 (REGISTERS_BASE + 0x09B0) +#define WL12XX_SPARE_B1 (REGISTERS_BASE + 0x5420) +#define WL12XX_SPARE_B2 (REGISTERS_BASE + 0x5424) +#define WL12XX_SPARE_B3 (REGISTERS_BASE + 0x5428) +#define WL12XX_SPARE_B4 (REGISTERS_BASE + 0x542C) +#define WL12XX_SPARE_B5 (REGISTERS_BASE + 0x5430) +#define WL12XX_SPARE_B6 (REGISTERS_BASE + 0x5434) +#define WL12XX_SPARE_B7 (REGISTERS_BASE + 0x5438) +#define WL12XX_SPARE_B8 (REGISTERS_BASE + 0x543C) + +#define WL12XX_PLL_PARAMETERS (REGISTERS_BASE + 0x6040) +#define WL12XX_WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) +#define WL12XX_WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) +#define WL12XX_DRPW_SCRATCH_START (DRPW_BASE + 0x002C) + +#define WL12XX_CMD_MBOX_ADDRESS 0x407B4 + #define ACX_REG_EEPROM_START_BIT BIT(1) /* Command/Information Mailbox Pointers */ @@ -279,7 +266,7 @@ the host receives the Init Complete interrupt from the Wlan hardware. ===============================================*/ -#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) +#define WL12XX_REG_COMMAND_MAILBOX_PTR (WL12XX_SCR_PAD0) /*=============================================== Information Mailbox Pointer - 32bit RW @@ -294,7 +281,7 @@ until after the host receives the Init Complete interrupt from the Wlan hardware. ===============================================*/ -#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) +#define WL12XX_REG_EVENT_MAILBOX_PTR (WL12XX_SCR_PAD1) /*=============================================== EEPROM Read/Write Request 32bit RW @@ -365,26 +352,6 @@ #define ACX_CONT_WIND_MIN_MASK 0x0000007f #define ACX_CONT_WIND_MAX 0x03ff0000 -/*=============================================== - HI_CFG Interface Configuration Register Values - ------------------------------------------ - ===============================================*/ -#define HI_CFG_UART_ENABLE 0x00000004 -#define HI_CFG_RST232_ENABLE 0x00000008 -#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 -#define HI_CFG_HOST_INT_ENABLE 0x00000020 -#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 -#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 -#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 -#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 -#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 - -#define HI_CFG_DEF_VAL \ - (HI_CFG_UART_ENABLE | \ - HI_CFG_RST232_ENABLE | \ - HI_CFG_CLOCK_REQ_SELECT | \ - HI_CFG_HOST_INT_ENABLE) - #define REF_FREQ_19_2 0 #define REF_FREQ_26_0 1 #define REF_FREQ_38_4 2 @@ -400,38 +367,19 @@ #define LUT_PARAM_BB_PLL_LOOP_FILTER 5 #define LUT_PARAM_NUM 6 -#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) +#define WL12XX_EEPROMLESS_IND (WL12XX_SCR_PAD4) #define USE_EEPROM 0 -#define SOFT_RESET_MAX_TIME 1000000 -#define SOFT_RESET_STALL_TIME 1000 #define NVS_DATA_BUNDARY_ALIGNMENT 4 - -/* Firmware image load chunk size */ -#define CHUNK_SIZE 16384 - /* Firmware image header size */ #define FW_HDR_SIZE 8 -#define ECPU_CONTROL_HALT 0x00000101 - - /****************************************************************************** CHANNELS, BAND & REG DOMAINS definitions ******************************************************************************/ - -enum { - RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */ - RADIO_BAND_5GHZ = 1, /* 5 Ghz band */ - RADIO_BAND_JAPAN_4_9_GHZ = 2, - DEFAULT_BAND = RADIO_BAND_2_4GHZ, - INVALID_BAND = 0xFE, - MAX_RADIO_BANDS = 0xFF -}; - #define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ #define OFDM_RATE_BIT BIT(6) #define PBCC_RATE_BIT BIT(7) @@ -465,14 +413,82 @@ b12-b0 - Supported Rate indicator bits as defined below. ******************************************************************************/ +#define OCP_CMD_LOOP 32 +#define OCP_CMD_WRITE 0x1 +#define OCP_CMD_READ 0x2 +#define OCP_READY_MASK BIT(18) +#define OCP_STATUS_MASK (BIT(16) | BIT(17)) +#define OCP_STATUS_NO_RESP 0x00000 +#define OCP_STATUS_OK 0x10000 +#define OCP_STATUS_REQ_FAILED 0x20000 +#define OCP_STATUS_RESP_ERROR 0x30000 + +#define OCP_REG_POLARITY 0x0064 +#define OCP_REG_CLK_TYPE 0x0448 +#define OCP_REG_CLK_POLARITY 0x0cb2 +#define OCP_REG_CLK_PULL 0x0cb4 + +#define POLARITY_LOW BIT(1) +#define NO_PULL (BIT(14) | BIT(15)) + +#define FREF_CLK_TYPE_BITS 0xfffffe7f +#define CLK_REQ_PRCM 0x100 +#define FREF_CLK_POLARITY_BITS 0xfffff8ff +#define CLK_REQ_OUTN_SEL 0x700 + +#define WU_COUNTER_PAUSE_VAL 0x3FF + +/* PLL configuration algorithm for wl128x */ +#define SYS_CLK_CFG_REG 0x2200 +/* Bit[0] - 0-TCXO, 1-FREF */ +#define MCS_PLL_CLK_SEL_FREF BIT(0) +/* Bit[3:2] - 01-TCXO, 10-FREF */ +#define WL_CLK_REQ_TYPE_FREF BIT(3) +#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2)) +/* Bit[4] - 0-TCXO, 1-FREF */ +#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4) + +#define TCXO_ILOAD_INT_REG 0x2264 +#define TCXO_CLK_DETECT_REG 0x2266 + +#define TCXO_DET_FAILED BIT(4) + +#define FREF_ILOAD_INT_REG 0x2084 +#define FREF_CLK_DETECT_REG 0x2086 +#define FREF_CLK_DETECT_FAIL BIT(4) + +/* Use this reg for masking during driver access */ +#define WL_SPARE_REG 0x2320 +#define WL_SPARE_VAL BIT(2) +/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */ +#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3)) + +#define PLL_LOCK_COUNTERS_REG 0xD8C +#define PLL_LOCK_COUNTERS_COEX 0x0F +#define PLL_LOCK_COUNTERS_MCS 0xF0 +#define MCS_PLL_OVERRIDE_REG 0xD90 +#define MCS_PLL_CONFIG_REG 0xD92 +#define MCS_SEL_IN_FREQ_MASK 0x0070 +#define MCS_SEL_IN_FREQ_SHIFT 4 +#define MCS_PLL_CONFIG_REG_VAL 0x73 +#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1)) + +#define MCS_PLL_M_REG 0xD94 +#define MCS_PLL_N_REG 0xD96 +#define MCS_PLL_M_REG_VAL 0xC8 +#define MCS_PLL_N_REG_VAL 0x07 + +#define SDIO_IO_DS 0xd14 + +/* SDIO/wSPI DS configuration values */ +enum { + HCI_IO_DS_8MA = 0, + HCI_IO_DS_4MA = 1, /* default */ + HCI_IO_DS_6MA = 2, + HCI_IO_DS_2MA = 3, +}; -/************************************************************************* - - Interrupt Trigger Register (Host -> WiLink) - -**************************************************************************/ - -/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ +/* end PLL configuration algorithm for wl128x */ /* * Host Command Interrupt. Setting this bit masks @@ -480,7 +496,7 @@ b12-b0 - Supported Rate indicator bits as defined below. * the FW that it has sent a command * to the Wlan hardware Command Mailbox. */ -#define INTR_TRIG_CMD BIT(0) +#define WL12XX_INTR_TRIG_CMD BIT(0) /* * Host Event Acknowlegde Interrupt. The host @@ -488,42 +504,27 @@ b12-b0 - Supported Rate indicator bits as defined below. * the unsolicited information from the event * mailbox. */ -#define INTR_TRIG_EVENT_ACK BIT(1) - -/* - * The host sets this bit to inform the Wlan - * FW that a TX packet is in the XFER - * Buffer #0. - */ -#define INTR_TRIG_TX_PROC0 BIT(2) - -/* - * The host sets this bit to inform the FW - * that it read a packet from RX XFER - * Buffer #0. - */ -#define INTR_TRIG_RX_PROC0 BIT(3) - -#define INTR_TRIG_DEBUG_ACK BIT(4) +#define WL12XX_INTR_TRIG_EVENT_ACK BIT(1) -#define INTR_TRIG_STATE_CHANGED BIT(5) - - -/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ - -/* - * The host sets this bit to inform the FW - * that it read a packet from RX XFER - * Buffer #1. - */ -#define INTR_TRIG_RX_PROC1 BIT(17) +/*=============================================== + HI_CFG Interface Configuration Register Values + ------------------------------------------ + ===============================================*/ +#define HI_CFG_UART_ENABLE 0x00000004 +#define HI_CFG_RST232_ENABLE 0x00000008 +#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 +#define HI_CFG_HOST_INT_ENABLE 0x00000020 +#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 +#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 +#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 +#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 +#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 -/* - * The host sets this bit to inform the Wlan - * hardware that a TX packet is in the XFER - * Buffer #1. - */ -#define INTR_TRIG_TX_PROC1 BIT(18) +#define HI_CFG_DEF_VAL \ + (HI_CFG_UART_ENABLE | \ + HI_CFG_RST232_ENABLE | \ + HI_CFG_CLOCK_REQ_SELECT | \ + HI_CFG_HOST_INT_ENABLE) #define WL127X_REG_FUSE_DATA_2_1 0x050a #define WL128X_REG_FUSE_DATA_2_1 0x2152 diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h new file mode 100644 index 00000000000..74cd332e23e --- /dev/null +++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h @@ -0,0 +1,31 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL12XX_PRIV_H__ +#define __WL12XX_PRIV_H__ + +#include "conf.h" + +struct wl12xx_priv { + struct wl12xx_priv_conf conf; +}; + +#endif /* __WL12XX_PRIV_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig new file mode 100644 index 00000000000..9d04c38938b --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -0,0 +1,41 @@ +config WLCORE + tristate "TI wlcore support" + depends on WL_TI && GENERIC_HARDIRQS + depends on INET + select FW_LOADER + ---help--- + This module contains the main code for TI WLAN chips. It abstracts + hardware-specific differences among different chipset families. + Each chipset family needs to implement its own lower-level module + that will depend on this module for the common code. + + If you choose to build a module, it will be called wlcore. Say N if + unsure. + +config WLCORE_SPI + tristate "TI wlcore SPI support" + depends on WLCORE && SPI_MASTER + select CRC7 + ---help--- + This module adds support for the SPI interface of adapters using + TI WLAN chipsets. Select this if your platform is using + the SPI bus. + + If you choose to build a module, it'll be called wlcore_spi. + Say N if unsure. + +config WLCORE_SDIO + tristate "TI wlcore SDIO support" + depends on WLCORE && MMC + ---help--- + This module adds support for the SDIO interface of adapters using + TI WLAN chipsets. Select this if your platform is using + the SDIO bus. + + If you choose to build a module, it'll be called wlcore_sdio. + Say N if unsure. + +config WL12XX_PLATFORM_DATA + bool + depends on WLCORE_SDIO != n || WL1251_SDIO != n + default y diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile new file mode 100644 index 00000000000..d9fba9e3213 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/Makefile @@ -0,0 +1,15 @@ +wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ + boot.o init.o debugfs.o scan.o + +wlcore_spi-objs = spi.o +wlcore_sdio-objs = sdio.o + +wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o +obj-$(CONFIG_WLCORE) += wlcore.o +obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o +obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o + +# small builtin driver bit +obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index bc96db0683a..5912541a925 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -28,11 +28,11 @@ #include <linux/spi/spi.h> #include <linux/slab.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "wl12xx_80211.h" -#include "reg.h" #include "ps.h" +#include "hw_ops.h" int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 wake_up_event, u8 listen_interval) @@ -757,7 +757,10 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) /* configure one AP supported rate class */ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); - acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); + + /* the AP policy is HW specific */ + acx->rate_policy.enabled_rates = + cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif)); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; @@ -969,17 +972,14 @@ int wl12xx_acx_mem_cfg(struct wl1271 *wl) goto out; } - if (wl->chip.id == CHIP_ID_1283_PG20) - mem = &wl->conf.mem_wl128x; - else - mem = &wl->conf.mem_wl127x; + mem = &wl->conf.mem; /* memory config */ mem_conf->num_stations = mem->num_stations; mem_conf->rx_mem_block_num = mem->rx_block_num; mem_conf->tx_min_mem_block_num = mem->tx_min_block_num; mem_conf->num_ssid_profiles = mem->ssid_profiles; - mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); + mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc); mem_conf->dyn_mem_enable = mem->dynamic_memory; mem_conf->tx_free_req = mem->min_req_tx_blocks; mem_conf->rx_free_req = mem->min_req_rx_blocks; @@ -998,32 +998,6 @@ out: return ret; } -int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap) -{ - struct wl1271_acx_host_config_bitmap *bitmap_conf; - int ret; - - bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL); - if (!bitmap_conf) { - ret = -ENOMEM; - goto out; - } - - bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap); - - ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP, - bitmap_conf, sizeof(*bitmap_conf)); - if (ret < 0) { - wl1271_warning("wl1271 bitmap config opt failed: %d", ret); - goto out; - } - -out: - kfree(bitmap_conf); - - return ret; -} - int wl1271_acx_init_mem_config(struct wl1271 *wl) { int ret; diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index a28fc044034..b2f88831b7a 100644 --- a/drivers/net/wireless/wl12xx/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -25,7 +25,7 @@ #ifndef __ACX_H__ #define __ACX_H__ -#include "wl12xx.h" +#include "wlcore.h" #include "cmd.h" /************************************************************************* @@ -824,16 +824,11 @@ struct wl1271_acx_keep_alive_config { __le32 period; } __packed; +/* TODO: maybe this needs to be moved somewhere else? */ #define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) #define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) #define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) -struct wl1271_acx_host_config_bitmap { - struct acx_header header; - - __le32 host_cfg_bitmap; -} __packed; - enum { WL1271_ACX_TRIG_TYPE_LEVEL = 0, WL1271_ACX_TRIG_TYPE_EDGE, @@ -1274,7 +1269,6 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); int wl1271_acx_tx_config_options(struct wl1271 *wl); int wl12xx_acx_mem_cfg(struct wl1271 *wl); int wl1271_acx_init_mem_config(struct wl1271 *wl); -int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); int wl1271_acx_smart_reflex(struct wl1271 *wl); int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c new file mode 100644 index 00000000000..3a2207db540 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -0,0 +1,443 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho <luciano.coelho@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/slab.h> +#include <linux/wl12xx.h> +#include <linux/export.h> + +#include "debug.h" +#include "acx.h" +#include "boot.h" +#include "io.h" +#include "event.h" +#include "rx.h" +#include "hw_ops.h" + +static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) +{ + u32 cpu_ctrl; + + /* 10.5.0 run the firmware (I) */ + cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL); + + /* 10.5.1 run the firmware (II) */ + cpu_ctrl |= flag; + wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl); +} + +static int wlcore_parse_fw_ver(struct wl1271 *wl) +{ + int ret; + + ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", + &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], + &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], + &wl->chip.fw_ver[4]); + + if (ret != 5) { + wl1271_warning("fw version incorrect value"); + memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); + return -EINVAL; + } + + ret = wlcore_identify_fw(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wlcore_boot_fw_version(struct wl1271 *wl) +{ + struct wl1271_static_data *static_data; + int ret; + + static_data = kmalloc(sizeof(*static_data), GFP_DMA); + if (!static_data) { + wl1271_error("Couldn't allocate memory for static data!"); + return -ENOMEM; + } + + wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data), + false); + + strncpy(wl->chip.fw_ver_str, static_data->fw_version, + sizeof(wl->chip.fw_ver_str)); + + kfree(static_data); + + /* make sure the string is NULL-terminated */ + wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; + + ret = wlcore_parse_fw_ver(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, + size_t fw_data_len, u32 dest) +{ + struct wlcore_partition_set partition; + int addr, chunk_num, partition_limit; + u8 *p, *chunk; + + /* whal_FwCtrl_LoadFwImageSm() */ + + wl1271_debug(DEBUG_BOOT, "starting firmware upload"); + + wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", + fw_data_len, CHUNK_SIZE); + + if ((fw_data_len % 4) != 0) { + wl1271_error("firmware length not multiple of four"); + return -EIO; + } + + chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL); + if (!chunk) { + wl1271_error("allocation for firmware upload chunk failed"); + return -ENOMEM; + } + + memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition)); + partition.mem.start = dest; + wlcore_set_partition(wl, &partition); + + /* 10.1 set partition limit and chunk num */ + chunk_num = 0; + partition_limit = wl->ptable[PART_DOWN].mem.size; + + while (chunk_num < fw_data_len / CHUNK_SIZE) { + /* 10.2 update partition, if needed */ + addr = dest + (chunk_num + 2) * CHUNK_SIZE; + if (addr > partition_limit) { + addr = dest + chunk_num * CHUNK_SIZE; + partition_limit = chunk_num * CHUNK_SIZE + + wl->ptable[PART_DOWN].mem.size; + partition.mem.start = addr; + wlcore_set_partition(wl, &partition); + } + + /* 10.3 upload the chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", + p, addr); + wl1271_write(wl, addr, chunk, CHUNK_SIZE, false); + + chunk_num++; + } + + /* 10.4 upload the last chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, fw_data_len % CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", + fw_data_len % CHUNK_SIZE, p, addr); + wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); + + kfree(chunk); + return 0; +} + +int wlcore_boot_upload_firmware(struct wl1271 *wl) +{ + u32 chunks, addr, len; + int ret = 0; + u8 *fw; + + fw = wl->fw; + chunks = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); + + while (chunks--) { + addr = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + len = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + if (len > 300000) { + wl1271_info("firmware chunk too long: %u", len); + return -EINVAL; + } + wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", + chunks, addr, len); + ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); + if (ret != 0) + break; + fw += len; + } + + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware); + +int wlcore_boot_upload_nvs(struct wl1271 *wl) +{ + size_t nvs_len, burst_len; + int i; + u32 dest_addr, val; + u8 *nvs_ptr, *nvs_aligned; + + if (wl->nvs == NULL) + return -ENODEV; + + if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) { + struct wl1271_nvs_file *nvs = + (struct wl1271_nvs_file *)wl->nvs; + /* + * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz + * band configurations) can be removed when those NVS files stop + * floating around. + */ + if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || + wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { + if (nvs->general_params.dual_mode_select) + wl->enable_11a = true; + } + + if (wl->nvs_len != sizeof(struct wl1271_nvs_file) && + (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE || + wl->enable_11a)) { + wl1271_error("nvs size is not as expected: %zu != %zu", + wl->nvs_len, sizeof(struct wl1271_nvs_file)); + kfree(wl->nvs); + wl->nvs = NULL; + wl->nvs_len = 0; + return -EILSEQ; + } + + /* only the first part of the NVS needs to be uploaded */ + nvs_len = sizeof(nvs->nvs); + nvs_ptr = (u8 *) nvs->nvs; + } else { + struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; + + if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) { + if (nvs->general_params.dual_mode_select) + wl->enable_11a = true; + } else { + wl1271_error("nvs size is not as expected: %zu != %zu", + wl->nvs_len, + sizeof(struct wl128x_nvs_file)); + kfree(wl->nvs); + wl->nvs = NULL; + wl->nvs_len = 0; + return -EILSEQ; + } + + /* only the first part of the NVS needs to be uploaded */ + nvs_len = sizeof(nvs->nvs); + nvs_ptr = (u8 *)nvs->nvs; + } + + /* update current MAC address to NVS */ + nvs_ptr[11] = wl->addresses[0].addr[0]; + nvs_ptr[10] = wl->addresses[0].addr[1]; + nvs_ptr[6] = wl->addresses[0].addr[2]; + nvs_ptr[5] = wl->addresses[0].addr[3]; + nvs_ptr[4] = wl->addresses[0].addr[4]; + nvs_ptr[3] = wl->addresses[0].addr[5]; + + /* + * Layout before the actual NVS tables: + * 1 byte : burst length. + * 2 bytes: destination address. + * n bytes: data to burst copy. + * + * This is ended by a 0 length, then the NVS tables. + */ + + /* FIXME: Do we need to check here whether the LSB is 1? */ + while (nvs_ptr[0]) { + burst_len = nvs_ptr[0]; + dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); + + /* + * Due to our new wl1271_translate_reg_addr function, + * we need to add the register partition start address + * to the destination + */ + dest_addr += wl->curr_part.reg.start; + + /* We move our pointer to the data */ + nvs_ptr += 3; + + for (i = 0; i < burst_len; i++) { + if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) + | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); + + wl1271_debug(DEBUG_BOOT, + "nvs burst write 0x%x: 0x%x", + dest_addr, val); + wl1271_write32(wl, dest_addr, val); + + nvs_ptr += 4; + dest_addr += 4; + } + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + } + + /* + * We've reached the first zero length, the first NVS table + * is located at an aligned offset which is at least 7 bytes further. + * NOTE: The wl->nvs->nvs element must be first, in order to + * simplify the casting, we assume it is at the beginning of + * the wl->nvs structure. + */ + nvs_ptr = (u8 *)wl->nvs + + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + + nvs_len -= nvs_ptr - (u8 *)wl->nvs; + + /* Now we must set the partition correctly */ + wlcore_set_partition(wl, &wl->ptable[PART_WORK]); + + /* Copy the NVS tables to a new block to ensure alignment */ + nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); + if (!nvs_aligned) + return -ENOMEM; + + /* And finally we upload the NVS tables */ + wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, + nvs_aligned, nvs_len, false); + + kfree(nvs_aligned); + return 0; + +out_badnvs: + wl1271_error("nvs data is malformed"); + return -EILSEQ; +} +EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs); + +int wlcore_boot_run_firmware(struct wl1271 *wl) +{ + int loop, ret; + u32 chip_id, intr; + + /* Make sure we have the boot partition */ + wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); + + wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); + + chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B); + + wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); + + if (chip_id != wl->chip.id) { + wl1271_error("chip id doesn't match after firmware boot"); + return -EIO; + } + + /* wait for init to complete */ + loop = 0; + while (loop++ < INIT_LOOP) { + udelay(INIT_LOOP_DELAY); + intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); + + if (intr == 0xffffffff) { + wl1271_error("error reading hardware complete " + "init indication"); + return -EIO; + } + /* check that ACX_INTR_INIT_COMPLETE is enabled */ + else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) { + wlcore_write_reg(wl, REG_INTERRUPT_ACK, + WL1271_ACX_INTR_INIT_COMPLETE); + break; + } + } + + if (loop > INIT_LOOP) { + wl1271_error("timeout waiting for the hardware to " + "complete initialization"); + return -EIO; + } + + /* get hardware config command mail box */ + wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR); + + wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr); + + /* get hardware config event mail box */ + wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR); + wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); + + wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x", + wl->mbox_ptr[0], wl->mbox_ptr[1]); + + ret = wlcore_boot_fw_version(wl); + if (ret < 0) { + wl1271_error("couldn't boot firmware"); + return ret; + } + + /* + * in case of full asynchronous mode the firmware event must be + * ready to receive event from the command mailbox + */ + + /* unmask required mbox events */ + wl->event_mask = BSS_LOSE_EVENT_ID | + SCAN_COMPLETE_EVENT_ID | + ROLE_STOP_COMPLETE_EVENT_ID | + RSSI_SNR_TRIGGER_0_EVENT_ID | + PSPOLL_DELIVERY_FAILURE_EVENT_ID | + SOFT_GEMINI_SENSE_EVENT_ID | + PERIODIC_SCAN_REPORT_EVENT_ID | + PERIODIC_SCAN_COMPLETE_EVENT_ID | + DUMMY_PACKET_EVENT_ID | + PEER_REMOVE_COMPLETE_EVENT_ID | + BA_SESSION_RX_CONSTRAINT_EVENT_ID | + REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID | + INACTIVE_STA_EVENT_ID | + MAX_TX_RETRY_EVENT_ID | + CHANNEL_SWITCH_COMPLETE_EVENT_ID; + + ret = wl1271_event_unmask(wl); + if (ret < 0) { + wl1271_error("EVENT mask setting failed"); + return ret; + } + + /* set the working partition to its "running" mode offset */ + wlcore_set_partition(wl, &wl->ptable[PART_WORK]); + + /* firmware startup completed */ + return 0; +} +EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware); diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h new file mode 100644 index 00000000000..094981dd222 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/boot.h @@ -0,0 +1,54 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho <luciano.coelho@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BOOT_H__ +#define __BOOT_H__ + +#include "wlcore.h" + +int wlcore_boot_upload_firmware(struct wl1271 *wl); +int wlcore_boot_upload_nvs(struct wl1271 *wl); +int wlcore_boot_run_firmware(struct wl1271 *wl); + +#define WL1271_NO_SUBBANDS 8 +#define WL1271_NO_POWER_LEVELS 4 +#define WL1271_FW_VERSION_MAX_LEN 20 + +struct wl1271_static_data { + u8 mac_address[ETH_ALEN]; + u8 padding[2]; + u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; + u32 hw_version; + u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; +}; + +/* number of times we try to read the INIT interrupt */ +#define INIT_LOOP 20000 + +/* delay between retries */ +#define INIT_LOOP_DELAY 50 + +#define WU_COUNTER_PAUSE_VAL 0x3FF +#define WELP_ARM_COMMAND_VAL 0x4 + +#endif diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 3414fc11e9b..5c4716c6f04 100644 --- a/drivers/net/wireless/wl12xx/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -28,9 +28,8 @@ #include <linux/ieee80211.h> #include <linux/slab.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" -#include "reg.h" #include "io.h" #include "acx.h" #include "wl12xx_80211.h" @@ -67,11 +66,15 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, wl1271_write(wl, wl->cmd_box_addr, buf, len, false); - wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); + /* + * TODO: we just need this because one bit is in a different + * place. Is there any better way? + */ + wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len); timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); - intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { if (time_after(jiffies, timeout)) { wl1271_error("command complete timeout"); @@ -85,7 +88,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, else msleep(1); - intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); } /* read back the status code of the command */ @@ -100,8 +103,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, goto fail; } - wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, - WL1271_ACX_INTR_CMD_COMPLETE); + wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE); return 0; fail: @@ -110,240 +112,18 @@ fail: return ret; } -int wl1271_cmd_general_parms(struct wl1271 *wl) -{ - struct wl1271_general_parms_cmd *gen_parms; - struct wl1271_ini_general_params *gp = - &((struct wl1271_nvs_file *)wl->nvs)->general_params; - bool answer = false; - int ret; - - if (!wl->nvs) - return -ENODEV; - - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from INI out of bounds"); - return -EINVAL; - } - - gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); - if (!gen_parms) - return -ENOMEM; - - gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; - - memcpy(&gen_parms->general_params, gp, sizeof(*gp)); - - if (gp->tx_bip_fem_auto_detect) - answer = true; - - /* Override the REF CLK from the NVS with the one from platform data */ - gen_parms->general_params.ref_clock = wl->ref_clock; - - ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); - if (ret < 0) { - wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); - goto out; - } - - gp->tx_bip_fem_manufacturer = - gen_parms->general_params.tx_bip_fem_manufacturer; - - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from FW out of bounds"); - ret = -EINVAL; - goto out; - } - - wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", - answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); - -out: - kfree(gen_parms); - return ret; -} - -int wl128x_cmd_general_parms(struct wl1271 *wl) -{ - struct wl128x_general_parms_cmd *gen_parms; - struct wl128x_ini_general_params *gp = - &((struct wl128x_nvs_file *)wl->nvs)->general_params; - bool answer = false; - int ret; - - if (!wl->nvs) - return -ENODEV; - - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from ini out of bounds"); - return -EINVAL; - } - - gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); - if (!gen_parms) - return -ENOMEM; - - gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; - - memcpy(&gen_parms->general_params, gp, sizeof(*gp)); - - if (gp->tx_bip_fem_auto_detect) - answer = true; - - /* Replace REF and TCXO CLKs with the ones from platform data */ - gen_parms->general_params.ref_clock = wl->ref_clock; - gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock; - - ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); - if (ret < 0) { - wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); - goto out; - } - - gp->tx_bip_fem_manufacturer = - gen_parms->general_params.tx_bip_fem_manufacturer; - - if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { - wl1271_warning("FEM index from FW out of bounds"); - ret = -EINVAL; - goto out; - } - - wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", - answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); - -out: - kfree(gen_parms); - return ret; -} - -int wl1271_cmd_radio_parms(struct wl1271 *wl) -{ - struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs; - struct wl1271_radio_parms_cmd *radio_parms; - struct wl1271_ini_general_params *gp = &nvs->general_params; - int ret; - - if (!wl->nvs) - return -ENODEV; - - radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); - if (!radio_parms) - return -ENOMEM; - - radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; - - /* 2.4GHz parameters */ - memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, - sizeof(struct wl1271_ini_band_params_2)); - memcpy(&radio_parms->dyn_params_2, - &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, - sizeof(struct wl1271_ini_fem_params_2)); - - /* 5GHz parameters */ - memcpy(&radio_parms->static_params_5, - &nvs->stat_radio_params_5, - sizeof(struct wl1271_ini_band_params_5)); - memcpy(&radio_parms->dyn_params_5, - &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, - sizeof(struct wl1271_ini_fem_params_5)); - - wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", - radio_parms, sizeof(*radio_parms)); - - ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); - if (ret < 0) - wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); - - kfree(radio_parms); - return ret; -} - -int wl128x_cmd_radio_parms(struct wl1271 *wl) -{ - struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; - struct wl128x_radio_parms_cmd *radio_parms; - struct wl128x_ini_general_params *gp = &nvs->general_params; - int ret; - - if (!wl->nvs) - return -ENODEV; - - radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); - if (!radio_parms) - return -ENOMEM; - - radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; - - /* 2.4GHz parameters */ - memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, - sizeof(struct wl128x_ini_band_params_2)); - memcpy(&radio_parms->dyn_params_2, - &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, - sizeof(struct wl128x_ini_fem_params_2)); - - /* 5GHz parameters */ - memcpy(&radio_parms->static_params_5, - &nvs->stat_radio_params_5, - sizeof(struct wl128x_ini_band_params_5)); - memcpy(&radio_parms->dyn_params_5, - &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, - sizeof(struct wl128x_ini_fem_params_5)); - - radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options; - - wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", - radio_parms, sizeof(*radio_parms)); - - ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); - if (ret < 0) - wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); - - kfree(radio_parms); - return ret; -} - -int wl1271_cmd_ext_radio_parms(struct wl1271 *wl) -{ - struct wl1271_ext_radio_parms_cmd *ext_radio_parms; - struct conf_rf_settings *rf = &wl->conf.rf; - int ret; - - if (!wl->nvs) - return -ENODEV; - - ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL); - if (!ext_radio_parms) - return -ENOMEM; - - ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM; - - memcpy(ext_radio_parms->tx_per_channel_power_compensation_2, - rf->tx_per_channel_power_compensation_2, - CONF_TX_PWR_COMPENSATION_LEN_2); - memcpy(ext_radio_parms->tx_per_channel_power_compensation_5, - rf->tx_per_channel_power_compensation_5, - CONF_TX_PWR_COMPENSATION_LEN_5); - - wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ", - ext_radio_parms, sizeof(*ext_radio_parms)); - - ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0); - if (ret < 0) - wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed"); - - kfree(ext_radio_parms); - return ret; -} - /* * Poll the mailbox event field until any of the bits in the mask is set or a * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) */ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) { - u32 events_vector, event; + u32 *events_vector; + u32 event; unsigned long timeout; + int ret = 0; + + events_vector = kmalloc(sizeof(*events_vector), GFP_DMA); timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); @@ -351,21 +131,24 @@ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) if (time_after(jiffies, timeout)) { wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", (int)mask); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto out; } msleep(1); /* read from both event fields */ - wl1271_read(wl, wl->mbox_ptr[0], &events_vector, - sizeof(events_vector), false); - event = events_vector & mask; - wl1271_read(wl, wl->mbox_ptr[1], &events_vector, - sizeof(events_vector), false); - event |= events_vector & mask; + wl1271_read(wl, wl->mbox_ptr[0], events_vector, + sizeof(*events_vector), false); + event = *events_vector & mask; + wl1271_read(wl, wl->mbox_ptr[1], events_vector, + sizeof(*events_vector), false); + event |= *events_vector & mask; } while (!event); - return 0; +out: + kfree(events_vector); + return ret; } static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) @@ -522,7 +305,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, cmd->role_id = wlvif->dev_role_id; if (wlvif->band == IEEE80211_BAND_5GHZ) - cmd->band = WL12XX_BAND_5GHZ; + cmd->band = WLCORE_BAND_5GHZ; cmd->channel = wlvif->channel; if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { @@ -613,7 +396,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->role_id = wlvif->role_id; if (wlvif->band == IEEE80211_BAND_5GHZ) - cmd->band = WL12XX_BAND_5GHZ; + cmd->band = WLCORE_BAND_5GHZ; cmd->channel = wlvif->channel; cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); @@ -750,14 +533,14 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) switch (wlvif->band) { case IEEE80211_BAND_2GHZ: - cmd->band = RADIO_BAND_2_4GHZ; + cmd->band = WLCORE_BAND_2_4GHZ; break; case IEEE80211_BAND_5GHZ: - cmd->band = RADIO_BAND_5GHZ; + cmd->band = WLCORE_BAND_5GHZ; break; default: wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); - cmd->band = RADIO_BAND_2_4GHZ; + cmd->band = WLCORE_BAND_2_4GHZ; break; } @@ -830,7 +613,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) cmd->role_id = wlvif->role_id; if (wlvif->band == IEEE80211_BAND_5GHZ) - cmd->band = WL12XX_BAND_5GHZ; + cmd->band = WLCORE_BAND_5GHZ; cmd->channel = wlvif->channel; cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); @@ -904,6 +687,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) return ret; } +EXPORT_SYMBOL_GPL(wl1271_cmd_test); /** * read acx from firmware @@ -960,6 +744,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) return 0; } +EXPORT_SYMBOL_GPL(wl1271_cmd_configure); int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) { @@ -1730,10 +1515,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, cmd->channel = wlvif->channel; switch (wlvif->band) { case IEEE80211_BAND_2GHZ: - cmd->band = RADIO_BAND_2_4GHZ; + cmd->band = WLCORE_BAND_2_4GHZ; break; case IEEE80211_BAND_5GHZ: - cmd->band = RADIO_BAND_5GHZ; + cmd->band = WLCORE_BAND_5GHZ; break; default: wl1271_error("roc - unknown band: %d", (int)wlvif->band); diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index de217d92516..a46ae07cb77 100644 --- a/drivers/net/wireless/wl12xx/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -25,17 +25,12 @@ #ifndef __CMD_H__ #define __CMD_H__ -#include "wl12xx.h" +#include "wlcore.h" struct acx_header; int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, size_t res_len); -int wl1271_cmd_general_parms(struct wl1271 *wl); -int wl128x_cmd_general_parms(struct wl1271 *wl); -int wl1271_cmd_radio_parms(struct wl1271 *wl); -int wl128x_cmd_radio_parms(struct wl1271 *wl); -int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, u8 *role_id); int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); @@ -262,13 +257,13 @@ struct wl12xx_cmd_role_disable { u8 padding[3]; } __packed; -enum wl12xx_band { - WL12XX_BAND_2_4GHZ = 0, - WL12XX_BAND_5GHZ = 1, - WL12XX_BAND_JAPAN_4_9_GHZ = 2, - WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ, - WL12XX_BAND_INVALID = 0x7E, - WL12XX_BAND_MAX_RADIO = 0x7F, +enum wlcore_band { + WLCORE_BAND_2_4GHZ = 0, + WLCORE_BAND_5GHZ = 1, + WLCORE_BAND_JAPAN_4_9_GHZ = 2, + WLCORE_BAND_DEFAULT = WLCORE_BAND_2_4GHZ, + WLCORE_BAND_INVALID = 0x7E, + WLCORE_BAND_MAX_RADIO = 0x7F, }; struct wl12xx_cmd_role_start { @@ -494,83 +489,6 @@ enum wl1271_channel_tune_bands { #define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 -#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 -#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E -#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26 - -struct wl1271_general_parms_cmd { - struct wl1271_cmd_header header; - - struct wl1271_cmd_test_header test; - - struct wl1271_ini_general_params general_params; - - u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM]; - u8 sr_sen_n_p; - u8 sr_sen_n_p_gain; - u8 sr_sen_nrn; - u8 sr_sen_prn; - u8 padding[3]; -} __packed; - -struct wl128x_general_parms_cmd { - struct wl1271_cmd_header header; - - struct wl1271_cmd_test_header test; - - struct wl128x_ini_general_params general_params; - - u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM]; - u8 sr_sen_n_p; - u8 sr_sen_n_p_gain; - u8 sr_sen_nrn; - u8 sr_sen_prn; - u8 padding[3]; -} __packed; - -struct wl1271_radio_parms_cmd { - struct wl1271_cmd_header header; - - struct wl1271_cmd_test_header test; - - /* Static radio parameters */ - struct wl1271_ini_band_params_2 static_params_2; - struct wl1271_ini_band_params_5 static_params_5; - - /* Dynamic radio parameters */ - struct wl1271_ini_fem_params_2 dyn_params_2; - u8 padding2; - struct wl1271_ini_fem_params_5 dyn_params_5; - u8 padding3[2]; -} __packed; - -struct wl128x_radio_parms_cmd { - struct wl1271_cmd_header header; - - struct wl1271_cmd_test_header test; - - /* Static radio parameters */ - struct wl128x_ini_band_params_2 static_params_2; - struct wl128x_ini_band_params_5 static_params_5; - - u8 fem_vendor_and_options; - - /* Dynamic radio parameters */ - struct wl128x_ini_fem_params_2 dyn_params_2; - u8 padding2; - struct wl128x_ini_fem_params_5 dyn_params_5; -} __packed; - -struct wl1271_ext_radio_parms_cmd { - struct wl1271_cmd_header header; - - struct wl1271_cmd_test_header test; - - u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2]; - u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; - u8 padding[3]; -} __packed; - /* * There are three types of disconnections: * diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/ti/wlcore/conf.h index 3e581e19424..fef0db4213b 100644 --- a/drivers/net/wireless/wl12xx/conf.h +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -65,36 +65,7 @@ enum { CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, }; -enum { - CONF_HW_RXTX_RATE_MCS7_SGI = 0, - CONF_HW_RXTX_RATE_MCS7, - CONF_HW_RXTX_RATE_MCS6, - CONF_HW_RXTX_RATE_MCS5, - CONF_HW_RXTX_RATE_MCS4, - CONF_HW_RXTX_RATE_MCS3, - CONF_HW_RXTX_RATE_MCS2, - CONF_HW_RXTX_RATE_MCS1, - CONF_HW_RXTX_RATE_MCS0, - CONF_HW_RXTX_RATE_54, - CONF_HW_RXTX_RATE_48, - CONF_HW_RXTX_RATE_36, - CONF_HW_RXTX_RATE_24, - CONF_HW_RXTX_RATE_22, - CONF_HW_RXTX_RATE_18, - CONF_HW_RXTX_RATE_12, - CONF_HW_RXTX_RATE_11, - CONF_HW_RXTX_RATE_9, - CONF_HW_RXTX_RATE_6, - CONF_HW_RXTX_RATE_5_5, - CONF_HW_RXTX_RATE_2, - CONF_HW_RXTX_RATE_1, - CONF_HW_RXTX_RATE_MAX, - CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff -}; - -/* Rates between and including these are MCS rates */ -#define CONF_HW_RXTX_RATE_MCS_MIN CONF_HW_RXTX_RATE_MCS7_SGI -#define CONF_HW_RXTX_RATE_MCS_MAX CONF_HW_RXTX_RATE_MCS0 +#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff enum { CONF_SG_DISABLE = 0, @@ -1096,16 +1067,31 @@ struct conf_scan_settings { }; struct conf_sched_scan_settings { - /* minimum time to wait on the channel for active scans (in TUs) */ - u16 min_dwell_time_active; + /* + * The base time to wait on the channel for active scans (in TU/1000). + * The minimum dwell time is calculated according to this: + * min_dwell_time = base + num_of_probes_to_be_sent * delta_per_probe + * The maximum dwell time is calculated according to this: + * max_dwell_time = min_dwell_time + max_dwell_time_delta + */ + u32 base_dwell_time; - /* maximum time to wait on the channel for active scans (in TUs) */ - u16 max_dwell_time_active; + /* The delta between the min dwell time and max dwell time for + * active scans (in TU/1000s). The max dwell time is used by the FW once + * traffic is detected on the channel. + */ + u32 max_dwell_time_delta; + + /* Delta added to min dwell time per each probe in 2.4 GHz (TU/1000) */ + u32 dwell_time_delta_per_probe; - /* time to wait on the channel for passive scans (in TUs) */ + /* Delta added to min dwell time per each probe in 5 GHz (TU/1000) */ + u32 dwell_time_delta_per_probe_5; + + /* time to wait on the channel for passive scans (in TU/1000) */ u32 dwell_time_passive; - /* time to wait on the channel for DFS scans (in TUs) */ + /* time to wait on the channel for DFS scans (in TU/1000) */ u32 dwell_time_dfs; /* number of probe requests to send on each channel in active scans */ @@ -1118,26 +1104,6 @@ struct conf_sched_scan_settings { s8 snr_threshold; }; -/* these are number of channels on the band divided by two, rounded up */ -#define CONF_TX_PWR_COMPENSATION_LEN_2 7 -#define CONF_TX_PWR_COMPENSATION_LEN_5 18 - -struct conf_rf_settings { - /* - * Per channel power compensation for 2.4GHz - * - * Range: s8 - */ - u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2]; - - /* - * Per channel power compensation for 5GHz - * - * Range: s8 - */ - u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; -}; - struct conf_ht_setting { u8 rx_ba_win_size; u8 tx_ba_win_size; @@ -1286,7 +1252,7 @@ struct conf_hangover_settings { u8 window_size; }; -struct conf_drv_settings { +struct wlcore_conf { struct conf_sg_settings sg; struct conf_rx_settings rx; struct conf_tx_settings tx; @@ -1296,16 +1262,13 @@ struct conf_drv_settings { struct conf_roam_trigger_settings roam_trigger; struct conf_scan_settings scan; struct conf_sched_scan_settings sched_scan; - struct conf_rf_settings rf; struct conf_ht_setting ht; - struct conf_memory_settings mem_wl127x; - struct conf_memory_settings mem_wl128x; + struct conf_memory_settings mem; struct conf_fm_coex fm_coex; struct conf_rx_streaming_settings rx_streaming; struct conf_fwlog fwlog; struct conf_rate_policy_settings rate; struct conf_hangover_settings hangover; - u8 hci_io_ds; }; #endif diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/ti/wlcore/debug.h index ec0fdc25b28..6b800b3cbea 100644 --- a/drivers/net/wireless/wl12xx/debug.h +++ b/drivers/net/wireless/ti/wlcore/debug.h @@ -52,6 +52,7 @@ enum { DEBUG_ADHOC = BIT(16), DEBUG_AP = BIT(17), DEBUG_PROBE = BIT(18), + DEBUG_IO = BIT(19), DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), DEBUG_ALL = ~0, }; diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 564d49575c9..d5aea1ff5ad 100644 --- a/drivers/net/wireless/wl12xx/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -26,7 +26,7 @@ #include <linux/skbuff.h> #include <linux/slab.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "acx.h" #include "ps.h" @@ -647,6 +647,7 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf, VIF_STATE_PRINT_INT(last_rssi_event); VIF_STATE_PRINT_INT(ba_support); VIF_STATE_PRINT_INT(ba_allowed); + VIF_STATE_PRINT_INT(is_gem); VIF_STATE_PRINT_LLHEX(tx_security_seq); VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); } diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index 254c5b292cf..a8d3aef011f 100644 --- a/drivers/net/wireless/wl12xx/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -24,7 +24,7 @@ #ifndef __DEBUGFS_H__ #define __DEBUGFS_H__ -#include "wl12xx.h" +#include "wlcore.h" int wl1271_debugfs_init(struct wl1271 *wl); void wl1271_debugfs_exit(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/ti/wlcore/event.c index c953717f38e..292632ddf89 100644 --- a/drivers/net/wireless/wl12xx/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -21,9 +21,8 @@ * */ -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" -#include "reg.h" #include "io.h" #include "event.h" #include "ps.h" @@ -98,8 +97,9 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); } -static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) +static int wl1271_event_process(struct wl1271 *wl) { + struct event_mailbox *mbox = wl->mbox; struct ieee80211_vif *vif; struct wl12xx_vif *wlvif; u32 vector; @@ -196,7 +196,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) bool success; if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, - &wl->flags)) + &wlvif->flags)) continue; success = mbox->channel_switch_status ? false : true; @@ -278,18 +278,8 @@ int wl1271_event_unmask(struct wl1271 *wl) return 0; } -void wl1271_event_mbox_config(struct wl1271 *wl) -{ - wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR); - wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); - - wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", - wl->mbox_ptr[0], wl->mbox_ptr[1]); -} - int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) { - struct event_mailbox mbox; int ret; wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); @@ -298,16 +288,19 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) return -EINVAL; /* first we read the mbox descriptor */ - wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox, - sizeof(struct event_mailbox), false); + wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox, + sizeof(*wl->mbox), false); /* process the descriptor */ - ret = wl1271_event_process(wl, &mbox); + ret = wl1271_event_process(wl); if (ret < 0) return ret; - /* then we let the firmware know it can go on...*/ - wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); + /* + * TODO: we just need this because one bit is in a different + * place. Is there any better way? + */ + wl->ops->ack_event(wl); return 0; } diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/ti/wlcore/event.h index 057d193d352..8adf18d6c58 100644 --- a/drivers/net/wireless/wl12xx/event.h +++ b/drivers/net/wireless/ti/wlcore/event.h @@ -132,8 +132,9 @@ struct event_mailbox { u8 reserved_8[9]; } __packed; +struct wl1271; + int wl1271_event_unmask(struct wl1271 *wl); -void wl1271_event_mbox_config(struct wl1271 *wl); int wl1271_event_handle(struct wl1271 *wl, u8 mbox); #endif diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h new file mode 100644 index 00000000000..9384b4d56c2 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/hw_ops.h @@ -0,0 +1,122 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WLCORE_HW_OPS_H__ +#define __WLCORE_HW_OPS_H__ + +#include "wlcore.h" +#include "rx.h" + +static inline u32 +wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks) +{ + if (!wl->ops->calc_tx_blocks) + BUG_ON(1); + + return wl->ops->calc_tx_blocks(wl, len, spare_blks); +} + +static inline void +wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, + u32 blks, u32 spare_blks) +{ + if (!wl->ops->set_tx_desc_blocks) + BUG_ON(1); + + return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks); +} + +static inline void +wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb) +{ + if (!wl->ops->set_tx_desc_data_len) + BUG_ON(1); + + wl->ops->set_tx_desc_data_len(wl, desc, skb); +} + +static inline enum wl_rx_buf_align +wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc) +{ + + if (!wl->ops->get_rx_buf_align) + BUG_ON(1); + + return wl->ops->get_rx_buf_align(wl, rx_desc); +} + +static inline void +wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) +{ + if (wl->ops->prepare_read) + wl->ops->prepare_read(wl, rx_desc, len); +} + +static inline u32 +wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len) +{ + if (!wl->ops->get_rx_packet_len) + BUG_ON(1); + + return wl->ops->get_rx_packet_len(wl, rx_data, data_len); +} + +static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl) +{ + if (wl->ops->tx_delayed_compl) + wl->ops->tx_delayed_compl(wl); +} + +static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl) +{ + if (wl->ops->tx_immediate_compl) + wl->ops->tx_immediate_compl(wl); +} + +static inline int +wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (wl->ops->init_vif) + return wl->ops->init_vif(wl, wlvif); + + return 0; +} + +static inline u32 +wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (!wl->ops->sta_get_ap_rate_mask) + BUG_ON(1); + + return wl->ops->sta_get_ap_rate_mask(wl, wlvif); +} + +static inline int wlcore_identify_fw(struct wl1271 *wl) +{ + if (wl->ops->identify_fw) + return wl->ops->identify_fw(wl); + + return 0; +} + +#endif diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/ti/wlcore/ini.h index 4cf9ecc5621..4cf9ecc5621 100644 --- a/drivers/net/wireless/wl12xx/ini.h +++ b/drivers/net/wireless/ti/wlcore/ini.h diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/ti/wlcore/init.c index 203fbebf09e..9f89255eb6e 100644 --- a/drivers/net/wireless/wl12xx/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -30,9 +30,9 @@ #include "wl12xx_80211.h" #include "acx.h" #include "cmd.h" -#include "reg.h" #include "tx.h" #include "io.h" +#include "hw_ops.h" int wl1271_init_templates_config(struct wl1271 *wl) { @@ -319,7 +319,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl) { int ret; - if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) + if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) return 0; ret = wl12xx_cmd_config_fwlog(wl); @@ -494,26 +494,6 @@ static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); } -int wl1271_chip_specific_init(struct wl1271 *wl) -{ - int ret = 0; - - if (wl->chip.id == CHIP_ID_1283_PG20) { - u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; - - if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)) - /* Enable SDIO padding */ - host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK; - - /* Must be before wl1271_acx_init_mem_config() */ - ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap); - if (ret < 0) - goto out; - } -out: - return ret; -} - /* vif-specifc initialization */ static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { @@ -582,10 +562,17 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) if (ret < 0) return ret; } else if (!wl->sta_count) { - /* Configure for ELP power saving */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); - if (ret < 0) - return ret; + if (wl->quirks & WLCORE_QUIRK_NO_ELP) { + /* Configure for power always on */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); + if (ret < 0) + return ret; + } else { + /* Configure for ELP power saving */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + return ret; + } } } @@ -652,6 +639,10 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) if (ret < 0) return ret; + ret = wlcore_hw_init_vif(wl, wlvif); + if (ret < 0) + return ret; + return 0; } @@ -659,27 +650,8 @@ int wl1271_hw_init(struct wl1271 *wl) { int ret; - if (wl->chip.id == CHIP_ID_1283_PG20) { - ret = wl128x_cmd_general_parms(wl); - if (ret < 0) - return ret; - ret = wl128x_cmd_radio_parms(wl); - if (ret < 0) - return ret; - } else { - ret = wl1271_cmd_general_parms(wl); - if (ret < 0) - return ret; - ret = wl1271_cmd_radio_parms(wl); - if (ret < 0) - return ret; - ret = wl1271_cmd_ext_radio_parms(wl); - if (ret < 0) - return ret; - } - - /* Chip-specific init */ - ret = wl1271_chip_specific_init(wl); + /* Chip-specific hw init */ + ret = wl->ops->hw_init(wl); if (ret < 0) return ret; diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/ti/wlcore/init.h index 2da0f404ef6..a45fbfddec1 100644 --- a/drivers/net/wireless/wl12xx/init.h +++ b/drivers/net/wireless/ti/wlcore/init.h @@ -24,7 +24,7 @@ #ifndef __INIT_H__ #define __INIT_H__ -#include "wl12xx.h" +#include "wlcore.h" int wl1271_hw_init_power_auth(struct wl1271 *wl); int wl1271_init_templates_config(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/ti/wlcore/io.c index c574a3b31e3..7cd0081aede 100644 --- a/drivers/net/wireless/wl12xx/io.c +++ b/drivers/net/wireless/ti/wlcore/io.c @@ -26,84 +26,12 @@ #include <linux/spi/spi.h> #include <linux/interrupt.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "wl12xx_80211.h" #include "io.h" #include "tx.h" -#define OCP_CMD_LOOP 32 - -#define OCP_CMD_WRITE 0x1 -#define OCP_CMD_READ 0x2 - -#define OCP_READY_MASK BIT(18) -#define OCP_STATUS_MASK (BIT(16) | BIT(17)) - -#define OCP_STATUS_NO_RESP 0x00000 -#define OCP_STATUS_OK 0x10000 -#define OCP_STATUS_REQ_FAILED 0x20000 -#define OCP_STATUS_RESP_ERROR 0x30000 - -struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN] = { - [PART_DOWN] = { - .mem = { - .start = 0x00000000, - .size = 0x000177c0 - }, - .reg = { - .start = REGISTERS_BASE, - .size = 0x00008800 - }, - .mem2 = { - .start = 0x00000000, - .size = 0x00000000 - }, - .mem3 = { - .start = 0x00000000, - .size = 0x00000000 - }, - }, - - [PART_WORK] = { - .mem = { - .start = 0x00040000, - .size = 0x00014fc0 - }, - .reg = { - .start = REGISTERS_BASE, - .size = 0x0000a000 - }, - .mem2 = { - .start = 0x003004f8, - .size = 0x00000004 - }, - .mem3 = { - .start = 0x00040404, - .size = 0x00000000 - }, - }, - - [PART_DRPW] = { - .mem = { - .start = 0x00040000, - .size = 0x00014fc0 - }, - .reg = { - .start = DRPW_BASE, - .size = 0x00006000 - }, - .mem2 = { - .start = 0x00000000, - .size = 0x00000000 - }, - .mem3 = { - .start = 0x00000000, - .size = 0x00000000 - } - } -}; - bool wl1271_set_block_size(struct wl1271 *wl) { if (wl->if_ops->set_block_size) { @@ -114,17 +42,53 @@ bool wl1271_set_block_size(struct wl1271 *wl) return false; } -void wl1271_disable_interrupts(struct wl1271 *wl) +void wlcore_disable_interrupts(struct wl1271 *wl) { disable_irq(wl->irq); } +EXPORT_SYMBOL_GPL(wlcore_disable_interrupts); -void wl1271_enable_interrupts(struct wl1271 *wl) +void wlcore_enable_interrupts(struct wl1271 *wl) { enable_irq(wl->irq); } +EXPORT_SYMBOL_GPL(wlcore_enable_interrupts); -/* Set the SPI partitions to access the chip addresses +int wlcore_translate_addr(struct wl1271 *wl, int addr) +{ + struct wlcore_partition_set *part = &wl->curr_part; + + /* + * To translate, first check to which window of addresses the + * particular address belongs. Then subtract the starting address + * of that window from the address. Then, add offset of the + * translated region. + * + * The translated regions occur next to each other in physical device + * memory, so just add the sizes of the preceding address regions to + * get the offset to the new region. + */ + if ((addr >= part->mem.start) && + (addr < part->mem.start + part->mem.size)) + return addr - part->mem.start; + else if ((addr >= part->reg.start) && + (addr < part->reg.start + part->reg.size)) + return addr - part->reg.start + part->mem.size; + else if ((addr >= part->mem2.start) && + (addr < part->mem2.start + part->mem2.size)) + return addr - part->mem2.start + part->mem.size + + part->reg.size; + else if ((addr >= part->mem3.start) && + (addr < part->mem3.start + part->mem3.size)) + return addr - part->mem3.start + part->mem.size + + part->reg.size + part->mem2.size; + + WARN(1, "HW address 0x%x out of range", addr); + return 0; +} +EXPORT_SYMBOL_GPL(wlcore_translate_addr); + +/* Set the partitions to access the chip addresses * * To simplify driver code, a fixed (virtual) memory map is defined for * register and memory addresses. Because in the chipset, in different stages @@ -158,33 +122,43 @@ void wl1271_enable_interrupts(struct wl1271 *wl) * | | * */ -int wl1271_set_partition(struct wl1271 *wl, - struct wl1271_partition_set *p) +void wlcore_set_partition(struct wl1271 *wl, + const struct wlcore_partition_set *p) { /* copy partition info */ - memcpy(&wl->part, p, sizeof(*p)); + memcpy(&wl->curr_part, p, sizeof(*p)); - wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X", p->mem.start, p->mem.size); - wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X", p->reg.start, p->reg.size); - wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X", + wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X", p->mem2.start, p->mem2.size); - wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X", + wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X", p->mem3.start, p->mem3.size); - /* write partition info to the chipset */ wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); + /* + * We don't need the size of the last partition, as it is + * automatically calculated based on the total memory size and + * the sizes of the previous partitions. + */ wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); +} +EXPORT_SYMBOL_GPL(wlcore_set_partition); - return 0; +void wlcore_select_partition(struct wl1271 *wl, u8 part) +{ + wl1271_debug(DEBUG_IO, "setting partition %d", part); + + wlcore_set_partition(wl, &wl->ptable[part]); } -EXPORT_SYMBOL_GPL(wl1271_set_partition); +EXPORT_SYMBOL_GPL(wlcore_select_partition); void wl1271_io_reset(struct wl1271 *wl) { @@ -197,48 +171,3 @@ void wl1271_io_init(struct wl1271 *wl) if (wl->if_ops->init) wl->if_ops->init(wl->dev); } - -void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) -{ - /* write address >> 1 + 0x30000 to OCP_POR_CTR */ - addr = (addr >> 1) + 0x30000; - wl1271_write32(wl, OCP_POR_CTR, addr); - - /* write value to OCP_POR_WDATA */ - wl1271_write32(wl, OCP_DATA_WRITE, val); - - /* write 1 to OCP_CMD */ - wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE); -} - -u16 wl1271_top_reg_read(struct wl1271 *wl, int addr) -{ - u32 val; - int timeout = OCP_CMD_LOOP; - - /* write address >> 1 + 0x30000 to OCP_POR_CTR */ - addr = (addr >> 1) + 0x30000; - wl1271_write32(wl, OCP_POR_CTR, addr); - - /* write 2 to OCP_CMD */ - wl1271_write32(wl, OCP_CMD, OCP_CMD_READ); - - /* poll for data ready */ - do { - val = wl1271_read32(wl, OCP_DATA_READ); - } while (!(val & OCP_READY_MASK) && --timeout); - - if (!timeout) { - wl1271_warning("Top register access timed out."); - return 0xffff; - } - - /* check data status and return if OK */ - if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK) - return val & 0xffff; - else { - wl1271_warning("Top register access returned error."); - return 0xffff; - } -} - diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/ti/wlcore/io.h index 4fb3dab8c3b..8942954b56a 100644 --- a/drivers/net/wireless/wl12xx/io.h +++ b/drivers/net/wireless/ti/wlcore/io.h @@ -26,7 +26,6 @@ #define __IO_H__ #include <linux/irqreturn.h> -#include "reg.h" #define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 @@ -43,15 +42,14 @@ #define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 -extern struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN]; - struct wl1271; -void wl1271_disable_interrupts(struct wl1271 *wl); -void wl1271_enable_interrupts(struct wl1271 *wl); +void wlcore_disable_interrupts(struct wl1271 *wl); +void wlcore_enable_interrupts(struct wl1271 *wl); void wl1271_io_reset(struct wl1271 *wl); void wl1271_io_init(struct wl1271 *wl); +int wlcore_translate_addr(struct wl1271 *wl, int addr); /* Raw target IO, address is not translated */ static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, @@ -66,6 +64,18 @@ static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, wl->if_ops->read(wl->dev, addr, buf, len, fixed); } +static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf, + size_t len, bool fixed) +{ + wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf, + size_t len, bool fixed) +{ + wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed); +} + static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) { wl1271_raw_read(wl, addr, &wl->buffer_32, @@ -81,36 +91,12 @@ static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val) sizeof(wl->buffer_32), false); } -/* Translated target IO */ -static inline int wl1271_translate_addr(struct wl1271 *wl, int addr) -{ - /* - * To translate, first check to which window of addresses the - * particular address belongs. Then subtract the starting address - * of that window from the address. Then, add offset of the - * translated region. - * - * The translated regions occur next to each other in physical device - * memory, so just add the sizes of the preceding address regions to - * get the offset to the new region. - * - * Currently, only the two first regions are addressed, and the - * assumption is that all addresses will fall into either of those - * two. - */ - if ((addr >= wl->part.reg.start) && - (addr < wl->part.reg.start + wl->part.reg.size)) - return addr - wl->part.reg.start + wl->part.mem.size; - else - return addr - wl->part.mem.start; -} - static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { int physical; - physical = wl1271_translate_addr(wl, addr); + physical = wlcore_translate_addr(wl, addr); wl1271_raw_read(wl, physical, buf, len, fixed); } @@ -120,11 +106,23 @@ static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf, { int physical; - physical = wl1271_translate_addr(wl, addr); + physical = wlcore_translate_addr(wl, addr); wl1271_raw_write(wl, physical, buf, len, fixed); } +static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf, + size_t len, bool fixed) +{ + wl1271_write(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf, + size_t len, bool fixed) +{ + wl1271_read(wl, wl->rtable[reg], buf, len, fixed); +} + static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, void *buf, size_t len, bool fixed) { @@ -134,19 +132,30 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, /* Addresses are stored internally as addresses to 32 bytes blocks */ addr = hwaddr << 5; - physical = wl1271_translate_addr(wl, addr); + physical = wlcore_translate_addr(wl, addr); wl1271_raw_read(wl, physical, buf, len, fixed); } static inline u32 wl1271_read32(struct wl1271 *wl, int addr) { - return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr)); + return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr)); } static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) { - wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val); + wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val); +} + +static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg) +{ + return wl1271_raw_read32(wl, + wlcore_translate_addr(wl, wl->rtable[reg])); +} + +static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val) +{ + wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val); } static inline void wl1271_power_off(struct wl1271 *wl) @@ -164,13 +173,8 @@ static inline int wl1271_power_on(struct wl1271 *wl) return ret; } - -/* Top Register IO */ -void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val); -u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); - -int wl1271_set_partition(struct wl1271 *wl, - struct wl1271_partition_set *p); +void wlcore_set_partition(struct wl1271 *wl, + const struct wlcore_partition_set *p); bool wl1271_set_block_size(struct wl1271 *wl); @@ -178,4 +182,6 @@ bool wl1271_set_block_size(struct wl1271 *wl); int wl1271_tx_dummy_packet(struct wl1271 *wl); +void wlcore_select_partition(struct wl1271 *wl, u8 part); + #endif diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/ti/wlcore/main.c index 39002363611..2b0f987660c 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -35,10 +35,9 @@ #include <linux/sched.h> #include <linux/interrupt.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "wl12xx_80211.h" -#include "reg.h" #include "io.h" #include "event.h" #include "tx.h" @@ -50,342 +49,15 @@ #include "boot.h" #include "testmode.h" #include "scan.h" +#include "hw_ops.h" #define WL1271_BOOT_RETRIES 3 -static struct conf_drv_settings default_conf = { - .sg = { - .params = { - [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, - [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, - [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, - [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, - [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, - [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, - [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, - [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, - [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, - [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, - [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, - [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, - [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, - [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, - [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, - [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, - /* active scan params */ - [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, - [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, - /* passive scan params */ - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, - [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, - /* passive scan in dual antenna params */ - [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, - [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, - [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, - /* general params */ - [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, - [CONF_SG_ANTENNA_CONFIGURATION] = 0, - [CONF_SG_BEACON_MISS_PERCENT] = 60, - [CONF_SG_DHCP_TIME] = 5000, - [CONF_SG_RXT] = 1200, - [CONF_SG_TXT] = 1000, - [CONF_SG_ADAPTIVE_RXT_TXT] = 1, - [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, - [CONF_SG_HV3_MAX_SERVED] = 6, - [CONF_SG_PS_POLL_TIMEOUT] = 10, - [CONF_SG_UPSD_TIMEOUT] = 10, - [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, - [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, - [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, - /* AP params */ - [CONF_AP_BEACON_MISS_TX] = 3, - [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, - [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, - [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, - [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, - [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, - /* CTS Diluting params */ - [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, - [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, - }, - .state = CONF_SG_PROTECTIVE, - }, - .rx = { - .rx_msdu_life_time = 512000, - .packet_detection_threshold = 0, - .ps_poll_timeout = 15, - .upsd_timeout = 15, - .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, - .rx_cca_threshold = 0, - .irq_blk_threshold = 0xFFFF, - .irq_pkt_threshold = 0, - .irq_timeout = 600, - .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, - }, - .tx = { - .tx_energy_detection = 0, - .sta_rc_conf = { - .enabled_rates = 0, - .short_retry_limit = 10, - .long_retry_limit = 10, - .aflags = 0, - }, - .ac_conf_count = 4, - .ac_conf = { - [CONF_TX_AC_BE] = { - .ac = CONF_TX_AC_BE, - .cw_min = 15, - .cw_max = 63, - .aifsn = 3, - .tx_op_limit = 0, - }, - [CONF_TX_AC_BK] = { - .ac = CONF_TX_AC_BK, - .cw_min = 15, - .cw_max = 63, - .aifsn = 7, - .tx_op_limit = 0, - }, - [CONF_TX_AC_VI] = { - .ac = CONF_TX_AC_VI, - .cw_min = 15, - .cw_max = 63, - .aifsn = CONF_TX_AIFS_PIFS, - .tx_op_limit = 3008, - }, - [CONF_TX_AC_VO] = { - .ac = CONF_TX_AC_VO, - .cw_min = 15, - .cw_max = 63, - .aifsn = CONF_TX_AIFS_PIFS, - .tx_op_limit = 1504, - }, - }, - .max_tx_retries = 100, - .ap_aging_period = 300, - .tid_conf_count = 4, - .tid_conf = { - [CONF_TX_AC_BE] = { - .queue_id = CONF_TX_AC_BE, - .channel_type = CONF_CHANNEL_TYPE_EDCF, - .tsid = CONF_TX_AC_BE, - .ps_scheme = CONF_PS_SCHEME_LEGACY, - .ack_policy = CONF_ACK_POLICY_LEGACY, - .apsd_conf = {0, 0}, - }, - [CONF_TX_AC_BK] = { - .queue_id = CONF_TX_AC_BK, - .channel_type = CONF_CHANNEL_TYPE_EDCF, - .tsid = CONF_TX_AC_BK, - .ps_scheme = CONF_PS_SCHEME_LEGACY, - .ack_policy = CONF_ACK_POLICY_LEGACY, - .apsd_conf = {0, 0}, - }, - [CONF_TX_AC_VI] = { - .queue_id = CONF_TX_AC_VI, - .channel_type = CONF_CHANNEL_TYPE_EDCF, - .tsid = CONF_TX_AC_VI, - .ps_scheme = CONF_PS_SCHEME_LEGACY, - .ack_policy = CONF_ACK_POLICY_LEGACY, - .apsd_conf = {0, 0}, - }, - [CONF_TX_AC_VO] = { - .queue_id = CONF_TX_AC_VO, - .channel_type = CONF_CHANNEL_TYPE_EDCF, - .tsid = CONF_TX_AC_VO, - .ps_scheme = CONF_PS_SCHEME_LEGACY, - .ack_policy = CONF_ACK_POLICY_LEGACY, - .apsd_conf = {0, 0}, - }, - }, - .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, - .tx_compl_timeout = 700, - .tx_compl_threshold = 4, - .basic_rate = CONF_HW_BIT_RATE_1MBPS, - .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, - .tmpl_short_retry_limit = 10, - .tmpl_long_retry_limit = 10, - .tx_watchdog_timeout = 5000, - }, - .conn = { - .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, - .listen_interval = 1, - .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM, - .suspend_listen_interval = 3, - .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, - .bcn_filt_ie_count = 2, - .bcn_filt_ie = { - [0] = { - .ie = WLAN_EID_CHANNEL_SWITCH, - .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, - }, - [1] = { - .ie = WLAN_EID_HT_INFORMATION, - .rule = CONF_BCN_RULE_PASS_ON_CHANGE, - }, - }, - .synch_fail_thold = 10, - .bss_lose_timeout = 100, - .beacon_rx_timeout = 10000, - .broadcast_timeout = 20000, - .rx_broadcast_in_ps = 1, - .ps_poll_threshold = 10, - .bet_enable = CONF_BET_MODE_ENABLE, - .bet_max_consecutive = 50, - .psm_entry_retries = 8, - .psm_exit_retries = 16, - .psm_entry_nullfunc_retries = 3, - .dynamic_ps_timeout = 200, - .forced_ps = false, - .keep_alive_interval = 55000, - .max_listen_interval = 20, - }, - .itrim = { - .enable = false, - .timeout = 50000, - }, - .pm_config = { - .host_clk_settling_time = 5000, - .host_fast_wakeup_support = false - }, - .roam_trigger = { - .trigger_pacing = 1, - .avg_weight_rssi_beacon = 20, - .avg_weight_rssi_data = 10, - .avg_weight_snr_beacon = 20, - .avg_weight_snr_data = 10, - }, - .scan = { - .min_dwell_time_active = 7500, - .max_dwell_time_active = 30000, - .min_dwell_time_passive = 100000, - .max_dwell_time_passive = 100000, - .num_probe_reqs = 2, - .split_scan_timeout = 50000, - }, - .sched_scan = { - /* sched_scan requires dwell times in TU instead of TU/1000 */ - .min_dwell_time_active = 30, - .max_dwell_time_active = 60, - .dwell_time_passive = 100, - .dwell_time_dfs = 150, - .num_probe_reqs = 2, - .rssi_threshold = -90, - .snr_threshold = 0, - }, - .rf = { - .tx_per_channel_power_compensation_2 = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - .tx_per_channel_power_compensation_5 = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - .ht = { - .rx_ba_win_size = 8, - .tx_ba_win_size = 64, - .inactivity_timeout = 10000, - .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, - }, - .mem_wl127x = { - .num_stations = 1, - .ssid_profiles = 1, - .rx_block_num = 70, - .tx_min_block_num = 40, - .dynamic_memory = 1, - .min_req_tx_blocks = 100, - .min_req_rx_blocks = 22, - .tx_min = 27, - }, - .mem_wl128x = { - .num_stations = 1, - .ssid_profiles = 1, - .rx_block_num = 40, - .tx_min_block_num = 40, - .dynamic_memory = 1, - .min_req_tx_blocks = 45, - .min_req_rx_blocks = 22, - .tx_min = 27, - }, - .fm_coex = { - .enable = true, - .swallow_period = 5, - .n_divider_fref_set_1 = 0xff, /* default */ - .n_divider_fref_set_2 = 12, - .m_divider_fref_set_1 = 148, - .m_divider_fref_set_2 = 0xffff, /* default */ - .coex_pll_stabilization_time = 0xffffffff, /* default */ - .ldo_stabilization_time = 0xffff, /* default */ - .fm_disturbed_band_margin = 0xff, /* default */ - .swallow_clk_diff = 0xff, /* default */ - }, - .rx_streaming = { - .duration = 150, - .queues = 0x1, - .interval = 20, - .always = 0, - }, - .fwlog = { - .mode = WL12XX_FWLOG_ON_DEMAND, - .mem_blocks = 2, - .severity = 0, - .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, - .output = WL12XX_FWLOG_OUTPUT_HOST, - .threshold = 0, - }, - .hci_io_ds = HCI_IO_DS_6MA, - .rate = { - .rate_retry_score = 32000, - .per_add = 8192, - .per_th1 = 2048, - .per_th2 = 4096, - .max_per = 8100, - .inverse_curiosity_factor = 5, - .tx_fail_low_th = 4, - .tx_fail_high_th = 10, - .per_alpha_shift = 4, - .per_add_shift = 13, - .per_beta1_shift = 10, - .per_beta2_shift = 8, - .rate_check_up = 2, - .rate_check_down = 12, - .rate_retry_policy = { - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, - }, - }, - .hangover = { - .recover_time = 0, - .hangover_period = 20, - .dynamic_mode = 1, - .early_termination_mode = 1, - .max_period = 20, - .min_period = 1, - .increase_delta = 1, - .decrease_delta = 2, - .quiet_time = 4, - .increase_time = 1, - .window_size = 16, - }, -}; +#define WL1271_BOOT_RETRIES 3 static char *fwlog_param; static bool bug_on_recovery; +static bool no_recovery; static void __wl1271_op_remove_interface(struct wl1271 *wl, struct ieee80211_vif *vif, @@ -628,22 +300,8 @@ out: mutex_unlock(&wl->mutex); } -static void wl1271_conf_init(struct wl1271 *wl) +static void wlcore_adjust_conf(struct wl1271 *wl) { - - /* - * This function applies the default configuration to the driver. This - * function is invoked upon driver load (spi probe.) - * - * The configuration is stored in a run-time structure in order to - * facilitate for run-time adjustment of any of the parameters. Making - * changes to the configuration structure will apply the new values on - * the next interface up (wl1271_op_start.) - */ - - /* apply driver default configuration */ - memcpy(&wl->conf, &default_conf, sizeof(default_conf)); - /* Adjust settings according to optional module parameters */ if (fwlog_param) { if (!strcmp(fwlog_param, "continuous")) { @@ -666,28 +324,7 @@ static int wl1271_plt_init(struct wl1271 *wl) { int ret; - if (wl->chip.id == CHIP_ID_1283_PG20) - ret = wl128x_cmd_general_parms(wl); - else - ret = wl1271_cmd_general_parms(wl); - if (ret < 0) - return ret; - - if (wl->chip.id == CHIP_ID_1283_PG20) - ret = wl128x_cmd_radio_parms(wl); - else - ret = wl1271_cmd_radio_parms(wl); - if (ret < 0) - return ret; - - if (wl->chip.id != CHIP_ID_1283_PG20) { - ret = wl1271_cmd_ext_radio_parms(wl); - if (ret < 0) - return ret; - } - - /* Chip-specific initializations */ - ret = wl1271_chip_specific_init(wl); + ret = wl->ops->hw_init(wl); if (ret < 0) return ret; @@ -750,7 +387,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, static void wl12xx_irq_update_links_status(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct wl12xx_fw_status *status) + struct wl_fw_status *status) { struct wl1271_link *lnk; u32 cur_fw_ps_map; @@ -770,9 +407,10 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { lnk = &wl->links[hlid]; - cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; + cnt = status->counters.tx_lnk_free_pkts[hlid] - + lnk->prev_freed_pkts; - lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; + lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid]; lnk->allocated_pkts -= cnt; wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, @@ -781,15 +419,19 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, } static void wl12xx_fw_status(struct wl1271 *wl, - struct wl12xx_fw_status *status) + struct wl_fw_status *status) { struct wl12xx_vif *wlvif; struct timespec ts; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; int i; + size_t status_len; + + status_len = sizeof(*status) + wl->fw_status_priv_len; - wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); + wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status, + status_len, false); wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " "drv_rx_counter = %d, tx_results_counter = %d)", @@ -801,10 +443,10 @@ static void wl12xx_fw_status(struct wl1271 *wl, for (i = 0; i < NUM_TX_QUEUES; i++) { /* prevent wrap-around in freed-packets counter */ wl->tx_allocated_pkts[i] -= - (status->tx_released_pkts[i] - + (status->counters.tx_released_pkts[i] - wl->tx_pkts_freed[i]) & 0xff; - wl->tx_pkts_freed[i] = status->tx_released_pkts[i]; + wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; } /* prevent wrap-around in total blocks counter */ @@ -927,6 +569,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) smp_mb__after_clear_bit(); wl12xx_fw_status(wl, wl->fw_status); + + wlcore_hw_tx_immediate_compl(wl); + intr = le32_to_cpu(wl->fw_status->intr); intr &= WL1271_INTR_MASK; if (!intr) { @@ -963,9 +608,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) } /* check for tx results */ - if (wl->fw_status->tx_results_counter != - (wl->tx_results_count & 0xff)) - wl1271_tx_complete(wl); + wlcore_hw_tx_delayed_compl(wl); /* Make sure the deferred queues don't get too long */ defer_count = skb_queue_len(&wl->deferred_tx_queue) + @@ -1046,10 +689,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) if (plt) { fw_type = WL12XX_FW_TYPE_PLT; - if (wl->chip.id == CHIP_ID_1283_PG20) - fw_name = WL128X_PLT_FW_NAME; - else - fw_name = WL127X_PLT_FW_NAME; + fw_name = wl->plt_fw_name; } else { /* * we can't call wl12xx_get_vif_count() here because @@ -1057,16 +697,10 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) */ if (wl->last_vif_count > 1) { fw_type = WL12XX_FW_TYPE_MULTI; - if (wl->chip.id == CHIP_ID_1283_PG20) - fw_name = WL128X_FW_NAME_MULTI; - else - fw_name = WL127X_FW_NAME_MULTI; + fw_name = wl->mr_fw_name; } else { fw_type = WL12XX_FW_TYPE_NORMAL; - if (wl->chip.id == CHIP_ID_1283_PG20) - fw_name = WL128X_FW_NAME_SINGLE; - else - fw_name = WL127X_FW_NAME_SINGLE; + fw_name = wl->sr_fw_name; } } @@ -1173,7 +807,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl) u32 first_addr; u8 *block; - if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || + if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || (wl->conf.fwlog.mem_blocks == 0)) return; @@ -1239,11 +873,20 @@ static void wl1271_recovery_work(struct work_struct *work) wl12xx_read_fwlog_panic(wl); wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", - wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); + wl->chip.fw_ver_str, + wlcore_read_reg(wl, REG_PC_ON_RECOVERY)); BUG_ON(bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + if (no_recovery) { + wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); + clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); + goto out_unlock; + } + + BUG_ON(bug_on_recovery); + /* * Advance security sequence number to overcome potential progress * in the firmware during recovery. This doens't hurt if the network is @@ -1290,10 +933,7 @@ out_unlock: static void wl1271_fw_wakeup(struct wl1271 *wl) { - u32 elp_reg; - - elp_reg = ELPCTRL_WAKE_UP; - wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); } static int wl1271_setup(struct wl1271 *wl) @@ -1323,7 +963,7 @@ static int wl12xx_set_power_on(struct wl1271 *wl) wl1271_io_reset(wl); wl1271_io_init(wl); - wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); + wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); /* ELP module wake up */ wl1271_fw_wakeup(wl); @@ -1348,44 +988,18 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) * negligible, we use the same block size for all different * chip types. */ - if (!wl1271_set_block_size(wl)) - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; - - switch (wl->chip.id) { - case CHIP_ID_1271_PG10: - wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", - wl->chip.id); + if (wl1271_set_block_size(wl)) + wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; - ret = wl1271_setup(wl); - if (ret < 0) - goto out; - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; - break; - - case CHIP_ID_1271_PG20: - wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", - wl->chip.id); - - ret = wl1271_setup(wl); - if (ret < 0) - goto out; - wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; - break; + ret = wl->ops->identify_chip(wl); + if (ret < 0) + goto out; - case CHIP_ID_1283_PG20: - wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", - wl->chip.id); + /* TODO: make sure the lower driver has set things up correctly */ - ret = wl1271_setup(wl); - if (ret < 0) - goto out; - break; - case CHIP_ID_1283_PG10: - default: - wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); - ret = -ENODEV; + ret = wl1271_setup(wl); + if (ret < 0) goto out; - } ret = wl12xx_fetch_firmware(wl, plt); if (ret < 0) @@ -1425,7 +1039,7 @@ int wl1271_plt_start(struct wl1271 *wl) if (ret < 0) goto power_off; - ret = wl1271_boot(wl); + ret = wl->ops->boot(wl); if (ret < 0) goto power_off; @@ -1454,7 +1068,7 @@ irq_disable: work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ - wl1271_disable_interrupts(wl); + wlcore_disable_interrupts(wl); wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); @@ -1481,7 +1095,7 @@ int wl1271_plt_stop(struct wl1271 *wl) * Otherwise, the interrupt handler might be called and exit without * reading the interrupt status. */ - wl1271_disable_interrupts(wl); + wlcore_disable_interrupts(wl); mutex_lock(&wl->mutex); if (!wl->plt) { mutex_unlock(&wl->mutex); @@ -1491,7 +1105,7 @@ int wl1271_plt_stop(struct wl1271 *wl) * may have been disabled when op_stop was called. It will, * however, balance the above call to disable_interrupts(). */ - wl1271_enable_interrupts(wl); + wlcore_enable_interrupts(wl); wl1271_error("cannot power down because not in PLT " "state: %d", wl->state); @@ -1652,14 +1266,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl, { int ret = 0; - mutex_lock(&wl->mutex); - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - goto out_unlock; + goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) - goto out_unlock; + goto out; ret = wl1271_acx_wake_up_conditions(wl, wlvif, wl->conf.conn.suspend_wake_up_event, @@ -1668,11 +1280,9 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl, if (ret < 0) wl1271_error("suspend: set wake up conditions failed: %d", ret); - wl1271_ps_elp_sleep(wl); -out_unlock: - mutex_unlock(&wl->mutex); +out: return ret; } @@ -1682,20 +1292,17 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl, { int ret = 0; - mutex_lock(&wl->mutex); - if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) - goto out_unlock; + goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) - goto out_unlock; + goto out; ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); wl1271_ps_elp_sleep(wl); -out_unlock: - mutex_unlock(&wl->mutex); +out: return ret; } @@ -1720,10 +1327,9 @@ static void wl1271_configure_resume(struct wl1271 *wl, if ((!is_ap) && (!is_sta)) return; - mutex_lock(&wl->mutex); ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) - goto out; + return; if (is_sta) { ret = wl1271_acx_wake_up_conditions(wl, wlvif, @@ -1739,8 +1345,6 @@ static void wl1271_configure_resume(struct wl1271 *wl, } wl1271_ps_elp_sleep(wl); -out: - mutex_unlock(&wl->mutex); } static int wl1271_op_suspend(struct ieee80211_hw *hw, @@ -1755,6 +1359,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, wl1271_tx_flush(wl); + mutex_lock(&wl->mutex); wl->wow_enabled = true; wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_configure_suspend(wl, wlvif); @@ -1763,6 +1368,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, return ret; } } + mutex_unlock(&wl->mutex); /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); @@ -1770,7 +1376,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, * disable and re-enable interrupts in order to flush * the threaded_irq */ - wl1271_disable_interrupts(wl); + wlcore_disable_interrupts(wl); /* * set suspended flag to avoid triggering a new threaded_irq @@ -1778,7 +1384,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, */ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); - wl1271_enable_interrupts(wl); + wlcore_enable_interrupts(wl); flush_work(&wl->tx_work); flush_delayed_work(&wl->elp_work); @@ -1810,12 +1416,15 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) wl1271_debug(DEBUG_MAC80211, "run postponed irq_work directly"); wl1271_irq(0, wl); - wl1271_enable_interrupts(wl); + wlcore_enable_interrupts(wl); } + + mutex_lock(&wl->mutex); wl12xx_for_each_wlvif(wl, wlvif) { wl1271_configure_resume(wl, wlvif); } wl->wow_enabled = false; + mutex_unlock(&wl->mutex); return 0; } @@ -1851,7 +1460,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw) * Otherwise, the interrupt handler might be called and exit without * reading the interrupt status. */ - wl1271_disable_interrupts(wl); + wlcore_disable_interrupts(wl); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { mutex_unlock(&wl->mutex); @@ -1861,7 +1470,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw) * may have been disabled when op_stop was called. It will, * however, balance the above call to disable_interrupts(). */ - wl1271_enable_interrupts(wl); + wlcore_enable_interrupts(wl); return; } @@ -1894,7 +1503,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw) wl->tx_results_count = 0; wl->tx_packets_count = 0; wl->time_offset = 0; - wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; wl->ap_fw_ps_map = 0; wl->ap_ps_map = 0; wl->sched_scanning = false; @@ -2067,7 +1675,7 @@ static bool wl12xx_init_fw(struct wl1271 *wl) if (ret < 0) goto power_off; - ret = wl1271_boot(wl); + ret = wl->ops->boot(wl); if (ret < 0) goto power_off; @@ -2087,7 +1695,7 @@ irq_disable: work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ - wl1271_disable_interrupts(wl); + wlcore_disable_interrupts(wl); wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); @@ -2360,10 +1968,12 @@ deinit: for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) wl12xx_free_rate_policy(wl, &wlvif->ap.ucast_rate_idx[i]); + wl1271_free_ap_keys(wl, wlvif); } + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = NULL; wl12xx_tx_reset_wlvif(wl, wlvif); - wl1271_free_ap_keys(wl, wlvif); if (wl->last_wlvif == wlvif) wl->last_wlvif = NULL; list_del(&wlvif->list); @@ -2946,6 +2556,17 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, int ret; bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + /* + * A role set to GEM cipher requires different Tx settings (namely + * spare blocks). Note when we are in this mode so the HW can adjust. + */ + if (key_type == KEY_GEM) { + if (action == KEY_ADD_OR_REPLACE) + wlvif->is_gem = true; + else if (action == KEY_REMOVE) + wlvif->is_gem = false; + } + if (is_ap) { struct wl1271_station *wl_sta; u8 hlid; @@ -2984,17 +2605,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - /* - * A STA set to GEM cipher requires 2 tx spare blocks. - * Return to default value when GEM cipher key is removed - */ - if (key_type == KEY_GEM) { - if (action == KEY_ADD_OR_REPLACE) - wl->tx_spare_blocks = 2; - else if (action == KEY_REMOVE) - wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - } - addr = sta ? sta->addr : bcast_addr; if (is_zero_ether_addr(addr)) { @@ -3791,8 +3401,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, wlvif->rssi_thold = bss_conf->cqm_rssi_thold; } - if (changed & BSS_CHANGED_BSSID && - (is_ibss || bss_conf->assoc)) + if (changed & BSS_CHANGED_BSSID) if (!is_zero_ether_addr(bss_conf->bssid)) { ret = wl12xx_cmd_build_null_data(wl, wlvif); if (ret < 0) @@ -3801,9 +3410,6 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, ret = wl1271_build_qos_null_data(wl, vif); if (ret < 0) goto out; - - /* Need to update the BSSID (for filtering etc) */ - do_join = true; } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { @@ -3830,6 +3436,7 @@ sta_not_found: int ieoffset; wlvif->aid = bss_conf->aid; wlvif->beacon_int = bss_conf->beacon_int; + do_join = true; set_assoc = true; /* @@ -4662,60 +4269,12 @@ static struct ieee80211_channel wl1271_channels[] = { { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, }; -/* mapping to indexes for wl1271_rates */ -static const u8 wl1271_rate_to_idx_2ghz[] = { - /* MCS rates are used only with 11n */ - 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */ - 7, /* CONF_HW_RXTX_RATE_MCS7 */ - 6, /* CONF_HW_RXTX_RATE_MCS6 */ - 5, /* CONF_HW_RXTX_RATE_MCS5 */ - 4, /* CONF_HW_RXTX_RATE_MCS4 */ - 3, /* CONF_HW_RXTX_RATE_MCS3 */ - 2, /* CONF_HW_RXTX_RATE_MCS2 */ - 1, /* CONF_HW_RXTX_RATE_MCS1 */ - 0, /* CONF_HW_RXTX_RATE_MCS0 */ - - 11, /* CONF_HW_RXTX_RATE_54 */ - 10, /* CONF_HW_RXTX_RATE_48 */ - 9, /* CONF_HW_RXTX_RATE_36 */ - 8, /* CONF_HW_RXTX_RATE_24 */ - - /* TI-specific rate */ - CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */ - - 7, /* CONF_HW_RXTX_RATE_18 */ - 6, /* CONF_HW_RXTX_RATE_12 */ - 3, /* CONF_HW_RXTX_RATE_11 */ - 5, /* CONF_HW_RXTX_RATE_9 */ - 4, /* CONF_HW_RXTX_RATE_6 */ - 2, /* CONF_HW_RXTX_RATE_5_5 */ - 1, /* CONF_HW_RXTX_RATE_2 */ - 0 /* CONF_HW_RXTX_RATE_1 */ -}; - -/* 11n STA capabilities */ -#define HW_RX_HIGHEST_RATE 72 - -#define WL12XX_HT_CAP { \ - .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \ - (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \ - .ht_supported = true, \ - .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \ - .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \ - .mcs = { \ - .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \ - .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \ - .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ - }, \ -} - /* can't be const, mac80211 writes to this */ static struct ieee80211_supported_band wl1271_band_2ghz = { .channels = wl1271_channels, .n_channels = ARRAY_SIZE(wl1271_channels), .bitrates = wl1271_rates, .n_bitrates = ARRAY_SIZE(wl1271_rates), - .ht_cap = WL12XX_HT_CAP, }; /* 5 GHz data rates for WL1273 */ @@ -4784,48 +4343,11 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = { { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, }; -/* mapping to indexes for wl1271_rates_5ghz */ -static const u8 wl1271_rate_to_idx_5ghz[] = { - /* MCS rates are used only with 11n */ - 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */ - 7, /* CONF_HW_RXTX_RATE_MCS7 */ - 6, /* CONF_HW_RXTX_RATE_MCS6 */ - 5, /* CONF_HW_RXTX_RATE_MCS5 */ - 4, /* CONF_HW_RXTX_RATE_MCS4 */ - 3, /* CONF_HW_RXTX_RATE_MCS3 */ - 2, /* CONF_HW_RXTX_RATE_MCS2 */ - 1, /* CONF_HW_RXTX_RATE_MCS1 */ - 0, /* CONF_HW_RXTX_RATE_MCS0 */ - - 7, /* CONF_HW_RXTX_RATE_54 */ - 6, /* CONF_HW_RXTX_RATE_48 */ - 5, /* CONF_HW_RXTX_RATE_36 */ - 4, /* CONF_HW_RXTX_RATE_24 */ - - /* TI-specific rate */ - CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */ - - 3, /* CONF_HW_RXTX_RATE_18 */ - 2, /* CONF_HW_RXTX_RATE_12 */ - CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */ - 1, /* CONF_HW_RXTX_RATE_9 */ - 0, /* CONF_HW_RXTX_RATE_6 */ - CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */ - CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */ - CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */ -}; - static struct ieee80211_supported_band wl1271_band_5ghz = { .channels = wl1271_channels_5ghz, .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), .bitrates = wl1271_rates_5ghz, .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), - .ht_cap = WL12XX_HT_CAP, -}; - -static const u8 *wl1271_band_rate_to_idx[] = { - [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz, - [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz }; static const struct ieee80211_ops wl1271_ops = { @@ -4862,18 +4384,18 @@ static const struct ieee80211_ops wl1271_ops = { }; -u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band) +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) { u8 idx; - BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); + BUG_ON(band >= 2); - if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { + if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) { wl1271_error("Illegal RX rate from HW: %d", rate); return 0; } - idx = wl1271_band_rate_to_idx[band][rate]; + idx = wl->band_rate_to_idx[band][rate]; if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { wl1271_error("Unsupported RX rate from HW: %d", rate); return 0; @@ -5027,34 +4549,6 @@ static struct bin_attribute fwlog_attr = { .read = wl1271_sysfs_read_fwlog, }; -static bool wl12xx_mac_in_fuse(struct wl1271 *wl) -{ - bool supported = false; - u8 major, minor; - - if (wl->chip.id == CHIP_ID_1283_PG20) { - major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver); - minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver); - - /* in wl128x we have the MAC address if the PG is >= (2, 1) */ - if (major > 2 || (major == 2 && minor >= 1)) - supported = true; - } else { - major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver); - minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver); - - /* in wl127x we have the MAC address if the PG is >= (3, 1) */ - if (major == 3 && minor >= 1) - supported = true; - } - - wl1271_debug(DEBUG_PROBE, - "PG Ver major = %d minor = %d, MAC %s present", - major, minor, supported ? "is" : "is not"); - - return supported; -} - static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic, int n) { @@ -5080,47 +4574,23 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl, wl->hw->wiphy->addresses = wl->addresses; } -static void wl12xx_get_fuse_mac(struct wl1271 *wl) -{ - u32 mac1, mac2; - - wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]); - - mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1); - mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2); - - /* these are the two parts of the BD_ADDR */ - wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) + - ((mac1 & 0xff000000) >> 24); - wl->fuse_nic_addr = mac1 & 0xffffff; - - wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); -} - static int wl12xx_get_hw_info(struct wl1271 *wl) { int ret; - u32 die_info; ret = wl12xx_set_power_on(wl); if (ret < 0) goto out; - wl->chip.id = wl1271_read32(wl, CHIP_ID_B); + wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B); - if (wl->chip.id == CHIP_ID_1283_PG20) - die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); - else - die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1); + wl->fuse_oui_addr = 0; + wl->fuse_nic_addr = 0; - wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; + wl->hw_pg_ver = wl->ops->get_pg_ver(wl); - if (!wl12xx_mac_in_fuse(wl)) { - wl->fuse_oui_addr = 0; - wl->fuse_nic_addr = 0; - } else { - wl12xx_get_fuse_mac(wl); - } + if (wl->ops->get_mac) + wl->ops->get_mac(wl); wl1271_power_off(wl); out: @@ -5242,7 +4712,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); - wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; /* make sure all our channels fit in the scanned_ch bitmask */ BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + @@ -5254,8 +4725,12 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) */ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, sizeof(wl1271_band_2ghz)); + memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap, + sizeof(wl->ht_cap)); memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, sizeof(wl1271_band_5ghz)); + memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap, + sizeof(wl->ht_cap)); wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl->bands[IEEE80211_BAND_2GHZ]; @@ -5279,14 +4754,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->sta_data_size = sizeof(struct wl1271_station); wl->hw->vif_data_size = sizeof(struct wl12xx_vif); - wl->hw->max_rx_aggregation_subframes = 8; + wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size; return 0; } #define WL1271_DEFAULT_CHANNEL 0 -static struct ieee80211_hw *wl1271_alloc_hw(void) +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size) { struct ieee80211_hw *hw; struct wl1271 *wl; @@ -5305,6 +4780,13 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) wl = hw->priv; memset(wl, 0, sizeof(*wl)); + wl->priv = kzalloc(priv_size, GFP_KERNEL); + if (!wl->priv) { + wl1271_error("could not alloc wl priv"); + ret = -ENOMEM; + goto err_priv_alloc; + } + INIT_LIST_HEAD(&wl->wlvif_list); wl->hw = hw; @@ -5341,7 +4823,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) wl->quirks = 0; wl->platform_quirks = 0; wl->sched_scanning = false; - wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; wl->system_hlid = WL12XX_SYSTEM_HLID; wl->active_sta_count = 0; wl->fwlog_size = 0; @@ -5351,7 +4832,7 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); - for (i = 0; i < ACX_TX_DESCRIPTORS; i++) + for (i = 0; i < wl->num_tx_desc; i++) wl->tx_frames[i] = NULL; spin_lock_init(&wl->wl_lock); @@ -5360,9 +4841,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) wl->fw_type = WL12XX_FW_TYPE_NONE; mutex_init(&wl->mutex); - /* Apply default driver configuration. */ - wl1271_conf_init(wl); - order = get_order(WL1271_AGGR_BUFFER_SIZE); wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); if (!wl->aggr_buf) { @@ -5383,8 +4861,17 @@ static struct ieee80211_hw *wl1271_alloc_hw(void) goto err_dummy_packet; } + wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA); + if (!wl->mbox) { + ret = -ENOMEM; + goto err_fwlog; + } + return hw; +err_fwlog: + free_page((unsigned long)wl->fwlog); + err_dummy_packet: dev_kfree_skb(wl->dummy_packet); @@ -5396,14 +4883,18 @@ err_wq: err_hw: wl1271_debugfs_exit(wl); + kfree(wl->priv); + +err_priv_alloc: ieee80211_free_hw(hw); err_hw_alloc: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(wlcore_alloc_hw); -static int wl1271_free_hw(struct wl1271 *wl) +int wlcore_free_hw(struct wl1271 *wl) { /* Unblock any fwlog readers */ mutex_lock(&wl->mutex); @@ -5433,10 +4924,12 @@ static int wl1271_free_hw(struct wl1271 *wl) kfree(wl->tx_res_if); destroy_workqueue(wl->freezable_wq); + kfree(wl->priv); ieee80211_free_hw(wl->hw); return 0; } +EXPORT_SYMBOL_GPL(wlcore_free_hw); static irqreturn_t wl12xx_hardirq(int irq, void *cookie) { @@ -5467,22 +4960,22 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie) return IRQ_WAKE_THREAD; } -static int __devinit wl12xx_probe(struct platform_device *pdev) +int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) { struct wl12xx_platform_data *pdata = pdev->dev.platform_data; - struct ieee80211_hw *hw; - struct wl1271 *wl; unsigned long irqflags; - int ret = -ENODEV; + int ret; - hw = wl1271_alloc_hw(); - if (IS_ERR(hw)) { - wl1271_error("can't allocate hw"); - ret = PTR_ERR(hw); - goto out; + if (!wl->ops || !wl->ptable) { + ret = -EINVAL; + goto out_free_hw; } - wl = hw->priv; + BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); + + /* adjust some runtime configuration parameters */ + wlcore_adjust_conf(wl); + wl->irq = platform_get_irq(pdev, 0); wl->ref_clock = pdata->board_ref_clock; wl->tcxo_clock = pdata->board_tcxo_clock; @@ -5511,7 +5004,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev) wl->irq_wake_enabled = true; device_init_wakeup(wl->dev, 1); if (pdata->pwr_in_suspend) - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; + wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; } disable_irq(wl->irq); @@ -5545,7 +5038,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev) goto out_hw_pg_ver; } - return 0; + goto out; out_hw_pg_ver: device_remove_file(wl->dev, &dev_attr_hw_pg_ver); @@ -5557,13 +5050,14 @@ out_irq: free_irq(wl->irq, wl); out_free_hw: - wl1271_free_hw(wl); + wlcore_free_hw(wl); out: return ret; } +EXPORT_SYMBOL_GPL(wlcore_probe); -static int __devexit wl12xx_remove(struct platform_device *pdev) +int __devexit wlcore_remove(struct platform_device *pdev) { struct wl1271 *wl = platform_get_drvdata(pdev); @@ -5573,38 +5067,11 @@ static int __devexit wl12xx_remove(struct platform_device *pdev) } wl1271_unregister_hw(wl); free_irq(wl->irq, wl); - wl1271_free_hw(wl); + wlcore_free_hw(wl); return 0; } - -static const struct platform_device_id wl12xx_id_table[] __devinitconst = { - { "wl12xx", 0 }, - { } /* Terminating Entry */ -}; -MODULE_DEVICE_TABLE(platform, wl12xx_id_table); - -static struct platform_driver wl12xx_driver = { - .probe = wl12xx_probe, - .remove = __devexit_p(wl12xx_remove), - .id_table = wl12xx_id_table, - .driver = { - .name = "wl12xx_driver", - .owner = THIS_MODULE, - } -}; - -static int __init wl12xx_init(void) -{ - return platform_driver_register(&wl12xx_driver); -} -module_init(wl12xx_init); - -static void __exit wl12xx_exit(void) -{ - platform_driver_unregister(&wl12xx_driver); -} -module_exit(wl12xx_exit); +EXPORT_SYMBOL_GPL(wlcore_remove); u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); @@ -5618,6 +5085,9 @@ MODULE_PARM_DESC(fwlog, module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); +module_param(no_recovery, bool, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index 78f598b4f97..756eee2257b 100644 --- a/drivers/net/wireless/wl12xx/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -21,7 +21,6 @@ * */ -#include "reg.h" #include "ps.h" #include "io.h" #include "tx.h" @@ -62,7 +61,7 @@ void wl1271_elp_work(struct work_struct *work) } wl1271_debug(DEBUG_PSM, "chip to elp"); - wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); set_bit(WL1271_FLAG_IN_ELP, &wl->flags); out: @@ -74,6 +73,9 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl) { struct wl12xx_vif *wlvif; + if (wl->quirks & WLCORE_QUIRK_NO_ELP) + return; + /* we shouldn't get consecutive sleep requests */ if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) return; @@ -125,7 +127,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl) wl->elp_compl = &compl; spin_unlock_irqrestore(&wl->wl_lock, flags); - wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (!pending) { ret = wait_for_completion_timeout( diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/ti/wlcore/ps.h index 5f19d4fbbf2..de4f9da8ed2 100644 --- a/drivers/net/wireless/wl12xx/ps.h +++ b/drivers/net/wireless/ti/wlcore/ps.h @@ -24,7 +24,7 @@ #ifndef __PS_H__ #define __PS_H__ -#include "wl12xx.h" +#include "wlcore.h" #include "acx.h" int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index cfa6071704c..89bd9385e90 100644 --- a/drivers/net/wireless/wl12xx/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -24,34 +24,36 @@ #include <linux/gfp.h> #include <linux/sched.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "acx.h" -#include "reg.h" #include "rx.h" #include "tx.h" #include "io.h" +#include "hw_ops.h" -static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, - u32 drv_rx_counter) -{ - return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & - RX_MEM_BLOCK_MASK; -} +/* + * TODO: this is here just for now, it must be removed when the data + * operations are in place. + */ +#include "../wl12xx/reg.h" -static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status, - u32 drv_rx_counter) +static u32 wlcore_rx_get_buf_size(struct wl1271 *wl, + u32 rx_pkt_desc) { - return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & - RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; + if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) + return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >> + ALIGNED_RX_BUF_SIZE_SHIFT; + + return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; } -static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status, - u32 drv_rx_counter) +static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len) { - /* Convert the value to bool */ - return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & - RX_BUF_UNALIGNED_PAYLOAD); + if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) + return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE); + + return pkt_len; } static void wl1271_rx_status(struct wl1271 *wl, @@ -66,10 +68,10 @@ static void wl1271_rx_status(struct wl1271 *wl, else status->band = IEEE80211_BAND_5GHZ; - status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band); + status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); /* 11n support */ - if (desc->rate <= CONF_HW_RXTX_RATE_MCS0) + if (desc->rate <= wl->hw_min_ht_rate) status->flag |= RX_FLAG_HT; status->signal = desc->rssi; @@ -98,7 +100,7 @@ static void wl1271_rx_status(struct wl1271 *wl, } static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, - bool unaligned, u8 *hlid) + enum wl_rx_buf_align rx_align, u8 *hlid) { struct wl1271_rx_descriptor *desc; struct sk_buff *skb; @@ -106,8 +108,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, u8 *buf; u8 beacon = 0; u8 is_data = 0; - u8 reserved = unaligned ? NET_IP_ALIGN : 0; + u8 reserved = 0; u16 seq_num; + u32 pkt_data_len; /* * In PLT mode we seem to get frames and mac80211 warns about them, @@ -116,6 +119,16 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, if (unlikely(wl->plt)) return -EINVAL; + pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length); + if (!pkt_data_len) { + wl1271_error("Invalid packet arrived from HW. length %d", + length); + return -EINVAL; + } + + if (rx_align == WLCORE_RX_BUF_UNALIGNED) + reserved = NET_IP_ALIGN; + /* the data read starts with the descriptor */ desc = (struct wl1271_rx_descriptor *) data; @@ -142,8 +155,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, return -EINVAL; } - /* skb length not included rx descriptor */ - skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL); + /* skb length not including rx descriptor */ + skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL); if (!skb) { wl1271_error("Couldn't allocate RX frame"); return -ENOMEM; @@ -152,7 +165,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, /* reserve the unaligned payload(if any) */ skb_reserve(skb, reserved); - buf = skb_put(skb, length - sizeof(*desc)); + buf = skb_put(skb, pkt_data_len); /* * Copy packets from aggregation buffer to the skbs without rx @@ -160,7 +173,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, * packets copy the packets in offset of 2 bytes guarantee IP header * payload aligned to 4 bytes. */ - memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); + memcpy(buf, data + sizeof(*desc), pkt_data_len); + if (rx_align == WLCORE_RX_BUF_PADDED) + skb_pull(skb, NET_IP_ALIGN); + *hlid = desc->hlid; hdr = (struct ieee80211_hdr *)skb->data; @@ -177,36 +193,35 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, beacon ? "beacon" : "", seq_num, *hlid); - skb_trim(skb, skb->len - desc->pad_len); - skb_queue_tail(&wl->deferred_rx_queue, skb); queue_work(wl->freezable_wq, &wl->netstack_work); return is_data; } -void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) +void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status) { - struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 rx_counter; - u32 mem_block; - u32 pkt_length; - u32 pkt_offset; + u32 pkt_len, align_pkt_len; + u32 pkt_offset, des; u8 hlid; - bool unaligned = false; + enum wl_rx_buf_align rx_align; while (drv_rx_counter != fw_rx_counter) { buf_size = 0; rx_counter = drv_rx_counter; while (rx_counter != fw_rx_counter) { - pkt_length = wl12xx_rx_get_buf_size(status, rx_counter); - if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) + des = le32_to_cpu(status->rx_pkt_descs[rx_counter]); + pkt_len = wlcore_rx_get_buf_size(wl, des); + align_pkt_len = wlcore_rx_get_align_buf_size(wl, + pkt_len); + if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE) break; - buf_size += pkt_length; + buf_size += align_pkt_len; rx_counter++; rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; } @@ -216,38 +231,18 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) break; } - if (wl->chip.id != CHIP_ID_1283_PG20) { - /* - * Choose the block we want to read - * For aggregated packets, only the first memory block - * should be retrieved. The FW takes care of the rest. - */ - mem_block = wl12xx_rx_get_mem_block(status, - drv_rx_counter); - - wl->rx_mem_pool_addr.addr = (mem_block << 8) + - le32_to_cpu(wl_mem_map->packet_memory_pool_start); - - wl->rx_mem_pool_addr.addr_extra = - wl->rx_mem_pool_addr.addr + 4; - - wl1271_write(wl, WL1271_SLV_REG_DATA, - &wl->rx_mem_pool_addr, - sizeof(wl->rx_mem_pool_addr), false); - } - /* Read all available packets at once */ - wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, - buf_size, true); + des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); + wlcore_hw_prepare_read(wl, des, buf_size); + wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_size, true); /* Split data into separate packets */ pkt_offset = 0; while (pkt_offset < buf_size) { - pkt_length = wl12xx_rx_get_buf_size(status, - drv_rx_counter); - - unaligned = wl12xx_rx_get_unaligned(status, - drv_rx_counter); + des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); + pkt_len = wlcore_rx_get_buf_size(wl, des); + rx_align = wlcore_hw_get_rx_buf_align(wl, des); /* * the handle data call can only fail in memory-outage @@ -256,7 +251,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) */ if (wl1271_rx_handle_data(wl, wl->aggr_buf + pkt_offset, - pkt_length, unaligned, + pkt_len, rx_align, &hlid) == 1) { if (hlid < WL12XX_MAX_LINKS) __set_bit(hlid, active_hlids); @@ -269,7 +264,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) wl->rx_counter++; drv_rx_counter++; drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; - pkt_offset += pkt_length; + pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len); } } @@ -277,8 +272,9 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) * Write the driver's packet counter to the FW. This is only required * for older hardware revisions */ - if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) - wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); + if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) + wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER, + wl->rx_counter); wl12xx_rearm_rx_streaming(wl, active_hlids); } diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/ti/wlcore/rx.h index 86ba6b1d0cd..6e129e2a854 100644 --- a/drivers/net/wireless/wl12xx/rx.h +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -96,9 +96,19 @@ #define RX_MEM_BLOCK_MASK 0xFF #define RX_BUF_SIZE_MASK 0xFFF00 #define RX_BUF_SIZE_SHIFT_DIV 6 +#define ALIGNED_RX_BUF_SIZE_MASK 0xFFFF00 +#define ALIGNED_RX_BUF_SIZE_SHIFT 8 + /* If set, the start of IP payload is not 4 bytes aligned */ #define RX_BUF_UNALIGNED_PAYLOAD BIT(20) +/* Describes the alignment state of a Rx buffer */ +enum wl_rx_buf_align { + WLCORE_RX_BUF_ALIGNED, + WLCORE_RX_BUF_UNALIGNED, + WLCORE_RX_BUF_PADDED, +}; + enum { WL12XX_RX_CLASS_UNKNOWN, WL12XX_RX_CLASS_MANAGEMENT, @@ -126,7 +136,7 @@ struct wl1271_rx_descriptor { u8 reserved; } __packed; -void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status); +void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); #endif diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index fcba055ef19..ade21a011c4 100644 --- a/drivers/net/wireless/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -23,7 +23,7 @@ #include <linux/ieee80211.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "cmd.h" #include "scan.h" @@ -417,6 +417,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, int i, j; u32 flags; bool force_passive = !req->n_ssids; + u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe; + u32 dwell_time_passive, dwell_time_dfs; + + if (band == IEEE80211_BAND_5GHZ) + delta_per_probe = c->dwell_time_delta_per_probe_5; + else + delta_per_probe = c->dwell_time_delta_per_probe; + + min_dwell_time_active = c->base_dwell_time + + req->n_ssids * c->num_probe_reqs * delta_per_probe; + + max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta; + + min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000); + max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000); + dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000); + dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000); for (i = 0, j = start; i < req->n_channels && j < max_channels; @@ -440,21 +457,24 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl, req->channels[i]->flags); wl1271_debug(DEBUG_SCAN, "max_power %d", req->channels[i]->max_power); + wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d", + min_dwell_time_active, + max_dwell_time_active); if (flags & IEEE80211_CHAN_RADAR) { channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; channels[j].passive_duration = - cpu_to_le16(c->dwell_time_dfs); + cpu_to_le16(dwell_time_dfs); } else { channels[j].passive_duration = - cpu_to_le16(c->dwell_time_passive); + cpu_to_le16(dwell_time_passive); } channels[j].min_duration = - cpu_to_le16(c->min_dwell_time_active); + cpu_to_le16(min_dwell_time_active); channels[j].max_duration = - cpu_to_le16(c->max_dwell_time_active); + cpu_to_le16(max_dwell_time_active); channels[j].tx_power_att = req->channels[i]->max_power; channels[j].channel = req->channels[i]->hw_value; diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/ti/wlcore/scan.h index 96ff457a3a0..81ee36ac207 100644 --- a/drivers/net/wireless/wl12xx/scan.h +++ b/drivers/net/wireless/ti/wlcore/scan.h @@ -24,7 +24,7 @@ #ifndef __SCAN_H__ #define __SCAN_H__ -#include "wl12xx.h" +#include "wlcore.h" int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, const u8 *ssid, size_t ssid_len, @@ -55,7 +55,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl); #define WL1271_SCAN_BAND_2_4_GHZ 0 #define WL1271_SCAN_BAND_5_GHZ 1 -#define WL1271_SCAN_TIMEOUT 10000 /* msec */ +#define WL1271_SCAN_TIMEOUT 30000 /* msec */ enum { WL1271_SCAN_STATE_IDLE, diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 4b3c32774ba..0a72347cfc4 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -33,7 +33,7 @@ #include <linux/wl12xx.h> #include <linux/pm_runtime.h> -#include "wl12xx.h" +#include "wlcore.h" #include "wl12xx_80211.h" #include "io.h" @@ -76,7 +76,7 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, sdio_claim_host(func); - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", addr, ((u8 *)buf)[0]); @@ -105,7 +105,7 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, sdio_claim_host(func); - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", addr, ((u8 *)buf)[0]); diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 2fc18a8dcce..553cd3cbb98 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -30,12 +30,10 @@ #include <linux/platform_device.h> #include <linux/slab.h> -#include "wl12xx.h" +#include "wlcore.h" #include "wl12xx_80211.h" #include "io.h" -#include "reg.h" - #define WSPI_CMD_READ 0x40000000 #define WSPI_CMD_WRITE 0x00000000 #define WSPI_CMD_FIXED 0x20000000 diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index 1e93bb9c024..0e59ea2cdd3 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -25,10 +25,9 @@ #include <linux/slab.h> #include <net/genetlink.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "acx.h" -#include "reg.h" #include "ps.h" #include "io.h" @@ -116,7 +115,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) goto out_sleep; } - NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); + if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) + goto nla_put_failure; ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_sleep; @@ -178,7 +178,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) goto out_free; } - NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); + if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) + goto nla_put_failure; ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_free; @@ -297,7 +298,8 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); + if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) + goto nla_put_failure; ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out; diff --git a/drivers/net/wireless/wl12xx/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h index 8071654259e..8071654259e 100644 --- a/drivers/net/wireless/wl12xx/testmode.h +++ b/drivers/net/wireless/ti/wlcore/testmode.h diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 43ae49143d6..6893bc20799 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -25,13 +25,19 @@ #include <linux/module.h> #include <linux/etherdevice.h> -#include "wl12xx.h" +#include "wlcore.h" #include "debug.h" #include "io.h" -#include "reg.h" #include "ps.h" #include "tx.h" #include "event.h" +#include "hw_ops.h" + +/* + * TODO: this is here just for now, it must be removed when the data + * operations are in place. + */ +#include "../wl12xx/reg.h" static int wl1271_set_default_wep_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 id) @@ -56,8 +62,8 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) { int id; - id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS); - if (id >= ACX_TX_DESCRIPTORS) + id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); + if (id >= wl->num_tx_desc) return -EBUSY; __set_bit(id, wl->tx_frames_map); @@ -69,7 +75,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) static void wl1271_free_tx_id(struct wl1271 *wl, int id) { if (__test_and_clear_bit(id, wl->tx_frames_map)) { - if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS)) + if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); wl->tx_frames[id] = NULL; @@ -167,14 +173,15 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, return wlvif->dev_hlid; } -static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, - unsigned int packet_length) +unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, + unsigned int packet_length) { - if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) - return ALIGN(packet_length, WL1271_TX_ALIGN_TO); - else + if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); + else + return ALIGN(packet_length, WL1271_TX_ALIGN_TO); } +EXPORT_SYMBOL(wlcore_calc_packet_alignment); static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, u32 extra, u32 buf_offset, @@ -182,10 +189,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, { struct wl1271_tx_hw_descr *desc; u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; - u32 len; u32 total_blocks; int id, ret = -EBUSY, ac; - u32 spare_blocks = wl->tx_spare_blocks; + u32 spare_blocks = wl->normal_tx_spare; bool is_dummy = false; if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) @@ -196,30 +202,19 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (id < 0) return id; - /* approximate the number of blocks required for this packet - in the firmware */ - len = wl12xx_calc_packet_alignment(wl, total_len); - - /* in case of a dummy packet, use default amount of spare mem blocks */ - if (unlikely(wl12xx_is_dummy_packet(wl, skb))) { + if (unlikely(wl12xx_is_dummy_packet(wl, skb))) is_dummy = true; - spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; - } + else if (wlvif->is_gem) + spare_blocks = wl->gem_tx_spare; - total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + - spare_blocks; + total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); if (total_blocks <= wl->tx_blocks_available) { desc = (struct wl1271_tx_hw_descr *)skb_push( skb, total_len - skb->len); - /* HW descriptor fields change between wl127x and wl128x */ - if (wl->chip.id == CHIP_ID_1283_PG20) { - desc->wl128x_mem.total_mem_blocks = total_blocks; - } else { - desc->wl127x_mem.extra_blocks = spare_blocks; - desc->wl127x_mem.total_mem_blocks = total_blocks; - } + wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, + spare_blocks); desc->id = id; @@ -256,7 +251,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, { struct timespec ts; struct wl1271_tx_hw_descr *desc; - int aligned_len, ac, rate_idx; + int ac, rate_idx; s64 hosttime; u16 tx_attr = 0; __le16 frame_control; @@ -329,44 +324,16 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, } tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; - desc->reserved = 0; - - aligned_len = wl12xx_calc_packet_alignment(wl, skb->len); - - if (wl->chip.id == CHIP_ID_1283_PG20) { - desc->wl128x_mem.extra_bytes = aligned_len - skb->len; - desc->length = cpu_to_le16(aligned_len >> 2); - - wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d " - "tx_attr: 0x%x len: %d life: %d mem: %d", - desc->hlid, tx_attr, - le16_to_cpu(desc->length), - le16_to_cpu(desc->life_time), - desc->wl128x_mem.total_mem_blocks); - } else { - int pad; - - /* Store the aligned length in terms of words */ - desc->length = cpu_to_le16(aligned_len >> 2); - - /* calculate number of padding bytes */ - pad = aligned_len - skb->len; - tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; - - wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d " - "tx_attr: 0x%x len: %d life: %d mem: %d", pad, - desc->hlid, tx_attr, - le16_to_cpu(desc->length), - le16_to_cpu(desc->life_time), - desc->wl127x_mem.total_mem_blocks); - } /* for WEP shared auth - no fw encryption is needed */ if (ieee80211_is_auth(frame_control) && ieee80211_has_protected(frame_control)) tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; + desc->reserved = 0; desc->tx_attr = cpu_to_le16(tx_attr); + + wlcore_hw_set_tx_desc_data_len(wl, desc, skb); } /* caller must hold wl->mutex */ @@ -432,7 +399,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, * In special cases, we want to align to a specific block size * (eg. for wl128x with SDIO we align to 256). */ - total_len = wl12xx_calc_packet_alignment(wl, skb->len); + total_len = wlcore_calc_packet_alignment(wl, skb->len); memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); @@ -718,8 +685,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl) * Flush buffer and try again. */ wl1271_skb_queue_head(wl, wlvif, skb); - wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, - buf_offset, true); + wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_offset, true); sent_packets = true; buf_offset = 0; continue; @@ -753,8 +720,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl) out_ack: if (buf_offset) { - wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, - buf_offset, true); + wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_offset, true); sent_packets = true; } if (sent_packets) { @@ -762,8 +729,8 @@ out_ack: * Interrupt the firmware with the new packets. This is only * required for older hardware revisions */ - if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) - wl1271_write32(wl, WL1271_HOST_WR_ACCESS, + if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) + wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, wl->tx_packets_count); wl1271_handle_tx_low_watermark(wl); @@ -792,11 +759,20 @@ static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) { u8 flags = 0; - if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN && - rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX) + /* + * TODO: use wl12xx constants when this code is moved to wl12xx, as + * only it uses Tx-completion. + */ + if (rate_class_index <= 8) flags |= IEEE80211_TX_RC_MCS; - if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI) + + /* + * TODO: use wl12xx constants when this code is moved to wl12xx, as + * only it uses Tx-completion. + */ + if (rate_class_index == 0) flags |= IEEE80211_TX_RC_SHORT_GI; + return flags; } @@ -813,7 +789,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, u8 retries = 0; /* check for id legality */ - if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) { + if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { wl1271_warning("TX result illegal id: %d", id); return; } @@ -834,7 +810,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, if (result->status == TX_SUCCESS) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - rate = wl1271_rate_to_idx(result->rate_class_index, + rate = wlcore_rate_to_idx(wl, result->rate_class_index, wlvif->band); rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); retries = result->ack_failures; @@ -929,6 +905,7 @@ void wl1271_tx_complete(struct wl1271 *wl) wl->tx_results_count++; } } +EXPORT_SYMBOL(wl1271_tx_complete); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) { @@ -1006,7 +983,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) if (reset_tx_queues) wl1271_handle_tx_low_watermark(wl); - for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { + for (i = 0; i < wl->num_tx_desc; i++) { if (wl->tx_frames[i] == NULL) continue; diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/ti/wlcore/tx.h index 5cf8c32d40d..2fd6e5dc6f7 100644 --- a/drivers/net/wireless/wl12xx/tx.h +++ b/drivers/net/wireless/ti/wlcore/tx.h @@ -25,9 +25,6 @@ #ifndef __TX_H__ #define __TX_H__ -#define TX_HW_BLOCK_SPARE_DEFAULT 1 -#define TX_HW_BLOCK_SIZE 252 - #define TX_HW_MGMT_PKT_LIFETIME_TU 2000 #define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 @@ -212,7 +209,7 @@ void wl1271_tx_complete(struct wl1271 *wl); void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); void wl1271_tx_flush(struct wl1271 *wl); -u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, enum ieee80211_band rate_band); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); @@ -224,6 +221,8 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); void wl1271_handle_tx_low_watermark(struct wl1271 *wl); bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); +unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, + unsigned int packet_length); /* from main.c */ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h index 749a15a75d3..a9b220c43e5 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/ti/wlcore/wl12xx.h @@ -89,8 +89,6 @@ #define WL1271_AP_BSS_INDEX 0 #define WL1271_AP_DEF_BEACON_EXP 20 -#define ACX_TX_DESCRIPTORS 16 - #define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) enum wl1271_state { @@ -105,26 +103,6 @@ enum wl12xx_fw_type { WL12XX_FW_TYPE_PLT, }; -enum wl1271_partition_type { - PART_DOWN, - PART_WORK, - PART_DRPW, - - PART_TABLE_LEN -}; - -struct wl1271_partition { - u32 size; - u32 start; -}; - -struct wl1271_partition_set { - struct wl1271_partition mem; - struct wl1271_partition reg; - struct wl1271_partition mem2; - struct wl1271_partition mem3; -}; - struct wl1271; enum { @@ -167,8 +145,21 @@ struct wl1271_stats { #define AP_MAX_STATIONS 8 +struct wl_fw_packet_counters { + /* Cumulative counter of released packets per AC */ + u8 tx_released_pkts[NUM_TX_QUEUES]; + + /* Cumulative counter of freed packets per HLID */ + u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS]; + + /* Cumulative counter of released Voice memory blocks */ + u8 tx_voice_released_blks; + + u8 padding[3]; +} __packed; + /* FW status registers */ -struct wl12xx_fw_status { +struct wl_fw_status { __le32 intr; u8 fw_rx_counter; u8 drv_rx_counter; @@ -195,16 +186,12 @@ struct wl12xx_fw_status { /* Size (in Memory Blocks) of TX pool */ __le32 tx_total; - /* Cumulative counter of released packets per AC */ - u8 tx_released_pkts[NUM_TX_QUEUES]; + struct wl_fw_packet_counters counters; - /* Cumulative counter of freed packets per HLID */ - u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS]; - - /* Cumulative counter of released Voice memory blocks */ - u8 tx_voice_released_blks; - u8 padding_1[3]; __le32 log_start_addr; + + /* Private status to be used by the lower drivers */ + u8 priv[0]; } __packed; struct wl1271_rx_mem_pool_addr { @@ -292,214 +279,6 @@ struct wl1271_link { u8 ba_bitmap; }; -struct wl1271 { - struct ieee80211_hw *hw; - bool mac80211_registered; - - struct device *dev; - - void *if_priv; - - struct wl1271_if_operations *if_ops; - - void (*set_power)(bool enable); - int irq; - int ref_clock; - - spinlock_t wl_lock; - - enum wl1271_state state; - enum wl12xx_fw_type fw_type; - bool plt; - u8 last_vif_count; - struct mutex mutex; - - unsigned long flags; - - struct wl1271_partition_set part; - - struct wl1271_chip chip; - - int cmd_box_addr; - int event_box_addr; - - u8 *fw; - size_t fw_len; - void *nvs; - size_t nvs_len; - - s8 hw_pg_ver; - - /* address read from the fuse ROM */ - u32 fuse_oui_addr; - u32 fuse_nic_addr; - - /* we have up to 2 MAC addresses */ - struct mac_address addresses[2]; - int channel; - u8 system_hlid; - - unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; - unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; - unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; - unsigned long rate_policies_map[ - BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; - - struct list_head wlvif_list; - - u8 sta_count; - u8 ap_count; - - struct wl1271_acx_mem_map *target_mem_map; - - /* Accounting for allocated / available TX blocks on HW */ - u32 tx_blocks_freed; - u32 tx_blocks_available; - u32 tx_allocated_blocks; - u32 tx_results_count; - - /* amount of spare TX blocks to use */ - u32 tx_spare_blocks; - - /* Accounting for allocated / available Tx packets in HW */ - u32 tx_pkts_freed[NUM_TX_QUEUES]; - u32 tx_allocated_pkts[NUM_TX_QUEUES]; - - /* Transmitted TX packets counter for chipset interface */ - u32 tx_packets_count; - - /* Time-offset between host and chipset clocks */ - s64 time_offset; - - /* Frames scheduled for transmission, not handled yet */ - int tx_queue_count[NUM_TX_QUEUES]; - long stopped_queues_map; - - /* Frames received, not handled yet by mac80211 */ - struct sk_buff_head deferred_rx_queue; - - /* Frames sent, not returned yet to mac80211 */ - struct sk_buff_head deferred_tx_queue; - - struct work_struct tx_work; - struct workqueue_struct *freezable_wq; - - /* Pending TX frames */ - unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)]; - struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; - int tx_frames_cnt; - - /* FW Rx counter */ - u32 rx_counter; - - /* Rx memory pool address */ - struct wl1271_rx_mem_pool_addr rx_mem_pool_addr; - - /* Intermediate buffer, used for packet aggregation */ - u8 *aggr_buf; - - /* Reusable dummy packet template */ - struct sk_buff *dummy_packet; - - /* Network stack work */ - struct work_struct netstack_work; - - /* FW log buffer */ - u8 *fwlog; - - /* Number of valid bytes in the FW log buffer */ - ssize_t fwlog_size; - - /* Sysfs FW log entry readers wait queue */ - wait_queue_head_t fwlog_waitq; - - /* Hardware recovery work */ - struct work_struct recovery_work; - - /* The mbox event mask */ - u32 event_mask; - - /* Mailbox pointers */ - u32 mbox_ptr[2]; - - /* Are we currently scanning */ - struct ieee80211_vif *scan_vif; - struct wl1271_scan scan; - struct delayed_work scan_complete_work; - - bool sched_scanning; - - /* The current band */ - enum ieee80211_band band; - - struct completion *elp_compl; - struct delayed_work elp_work; - - /* in dBm */ - int power_level; - - struct wl1271_stats stats; - - __le32 buffer_32; - u32 buffer_cmd; - u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; - - struct wl12xx_fw_status *fw_status; - struct wl1271_tx_hw_res_if *tx_res_if; - - /* Current chipset configuration */ - struct conf_drv_settings conf; - - bool sg_enabled; - - bool enable_11a; - - /* Most recently reported noise in dBm */ - s8 noise; - - /* bands supported by this instance of wl12xx */ - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; - - int tcxo_clock; - - /* - * wowlan trigger was configured during suspend. - * (currently, only "ANY" trigger is supported) - */ - bool wow_enabled; - bool irq_wake_enabled; - - /* - * AP-mode - links indexed by HLID. The global and broadcast links - * are always active. - */ - struct wl1271_link links[WL12XX_MAX_LINKS]; - - /* AP-mode - a bitmap of links currently in PS mode according to FW */ - u32 ap_fw_ps_map; - - /* AP-mode - a bitmap of links currently in PS mode in mac80211 */ - unsigned long ap_ps_map; - - /* Quirks of specific hardware revisions */ - unsigned int quirks; - - /* Platform limitations */ - unsigned int platform_quirks; - - /* number of currently active RX BA sessions */ - int ba_rx_session_count; - - /* AP-mode - number of currently connected stations */ - int active_sta_count; - - /* last wlvif we transmitted from */ - struct wl12xx_vif *last_wlvif; - - /* work to fire when Tx is stuck */ - struct delayed_work tx_watchdog_work; -}; - struct wl1271_station { u8 hlid; }; @@ -605,6 +384,9 @@ struct wl12xx_vif { struct work_struct rx_streaming_disable_work; struct timer_list rx_streaming_timer; + /* does the current role use GEM for encryption (AP or STA) */ + bool is_gem; + /* * This struct must be last! * data that has to be saved acrossed reconfigs (e.g. recovery) @@ -679,17 +461,6 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); #define HW_BG_RATES_MASK 0xffff #define HW_HT_RATES_OFFSET 16 -/* Quirks */ - -/* Each RX/TX transaction requires an end-of-transaction transfer */ -#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) - -/* wl127x and SPI don't support SDIO block size alignment */ -#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2) - -/* Older firmwares did not implement the FW logger over bus feature */ -#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) - #define WL12XX_HW_BLOCK_SIZE 256 #endif diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h index 22b0bc98d7b..22b0bc98d7b 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c index 998e95895f9..998e95895f9 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c +++ b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h new file mode 100644 index 00000000000..39f9fadfebd --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -0,0 +1,448 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WLCORE_H__ +#define __WLCORE_H__ + +#include <linux/platform_device.h> + +#include "wl12xx.h" +#include "event.h" + +/* The maximum number of Tx descriptors in all chip families */ +#define WLCORE_MAX_TX_DESCRIPTORS 32 + +/* forward declaration */ +struct wl1271_tx_hw_descr; +enum wl_rx_buf_align; + +struct wlcore_ops { + int (*identify_chip)(struct wl1271 *wl); + int (*identify_fw)(struct wl1271 *wl); + int (*boot)(struct wl1271 *wl); + void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr, + void *buf, size_t len); + void (*ack_event)(struct wl1271 *wl); + u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks); + void (*set_tx_desc_blocks)(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + u32 blks, u32 spare_blks); + void (*set_tx_desc_data_len)(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb); + enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl, + u32 rx_desc); + void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len); + u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data, + u32 data_len); + void (*tx_delayed_compl)(struct wl1271 *wl); + void (*tx_immediate_compl)(struct wl1271 *wl); + int (*hw_init)(struct wl1271 *wl); + int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl, + struct wl12xx_vif *wlvif); + s8 (*get_pg_ver)(struct wl1271 *wl); + void (*get_mac)(struct wl1271 *wl); +}; + +enum wlcore_partitions { + PART_DOWN, + PART_WORK, + PART_BOOT, + PART_DRPW, + PART_TOP_PRCM_ELP_SOC, + PART_PHY_INIT, + + PART_TABLE_LEN, +}; + +struct wlcore_partition { + u32 size; + u32 start; +}; + +struct wlcore_partition_set { + struct wlcore_partition mem; + struct wlcore_partition reg; + struct wlcore_partition mem2; + struct wlcore_partition mem3; +}; + +enum wlcore_registers { + /* register addresses, used with partition translation */ + REG_ECPU_CONTROL, + REG_INTERRUPT_NO_CLEAR, + REG_INTERRUPT_ACK, + REG_COMMAND_MAILBOX_PTR, + REG_EVENT_MAILBOX_PTR, + REG_INTERRUPT_TRIG, + REG_INTERRUPT_MASK, + REG_PC_ON_RECOVERY, + REG_CHIP_ID_B, + REG_CMD_MBOX_ADDRESS, + + /* data access memory addresses, used with partition translation */ + REG_SLV_MEM_DATA, + REG_SLV_REG_DATA, + + /* raw data access memory addresses */ + REG_RAW_FW_STATUS_ADDR, + + REG_TABLE_LEN, +}; + +struct wl1271 { + struct ieee80211_hw *hw; + bool mac80211_registered; + + struct device *dev; + + void *if_priv; + + struct wl1271_if_operations *if_ops; + + void (*set_power)(bool enable); + int irq; + int ref_clock; + + spinlock_t wl_lock; + + enum wl1271_state state; + enum wl12xx_fw_type fw_type; + bool plt; + u8 last_vif_count; + struct mutex mutex; + + unsigned long flags; + + struct wlcore_partition_set curr_part; + + struct wl1271_chip chip; + + int cmd_box_addr; + + u8 *fw; + size_t fw_len; + void *nvs; + size_t nvs_len; + + s8 hw_pg_ver; + + /* address read from the fuse ROM */ + u32 fuse_oui_addr; + u32 fuse_nic_addr; + + /* we have up to 2 MAC addresses */ + struct mac_address addresses[2]; + int channel; + u8 system_hlid; + + unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; + unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; + unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; + unsigned long rate_policies_map[ + BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; + + struct list_head wlvif_list; + + u8 sta_count; + u8 ap_count; + + struct wl1271_acx_mem_map *target_mem_map; + + /* Accounting for allocated / available TX blocks on HW */ + u32 tx_blocks_freed; + u32 tx_blocks_available; + u32 tx_allocated_blocks; + u32 tx_results_count; + + /* Accounting for allocated / available Tx packets in HW */ + u32 tx_pkts_freed[NUM_TX_QUEUES]; + u32 tx_allocated_pkts[NUM_TX_QUEUES]; + + /* Transmitted TX packets counter for chipset interface */ + u32 tx_packets_count; + + /* Time-offset between host and chipset clocks */ + s64 time_offset; + + /* Frames scheduled for transmission, not handled yet */ + int tx_queue_count[NUM_TX_QUEUES]; + long stopped_queues_map; + + /* Frames received, not handled yet by mac80211 */ + struct sk_buff_head deferred_rx_queue; + + /* Frames sent, not returned yet to mac80211 */ + struct sk_buff_head deferred_tx_queue; + + struct work_struct tx_work; + struct workqueue_struct *freezable_wq; + + /* Pending TX frames */ + unsigned long tx_frames_map[BITS_TO_LONGS(WLCORE_MAX_TX_DESCRIPTORS)]; + struct sk_buff *tx_frames[WLCORE_MAX_TX_DESCRIPTORS]; + int tx_frames_cnt; + + /* FW Rx counter */ + u32 rx_counter; + + /* Rx memory pool address */ + struct wl1271_rx_mem_pool_addr rx_mem_pool_addr; + + /* Intermediate buffer, used for packet aggregation */ + u8 *aggr_buf; + + /* Reusable dummy packet template */ + struct sk_buff *dummy_packet; + + /* Network stack work */ + struct work_struct netstack_work; + + /* FW log buffer */ + u8 *fwlog; + + /* Number of valid bytes in the FW log buffer */ + ssize_t fwlog_size; + + /* Sysfs FW log entry readers wait queue */ + wait_queue_head_t fwlog_waitq; + + /* Hardware recovery work */ + struct work_struct recovery_work; + + /* Pointer that holds DMA-friendly block for the mailbox */ + struct event_mailbox *mbox; + + /* The mbox event mask */ + u32 event_mask; + + /* Mailbox pointers */ + u32 mbox_ptr[2]; + + /* Are we currently scanning */ + struct ieee80211_vif *scan_vif; + struct wl1271_scan scan; + struct delayed_work scan_complete_work; + + bool sched_scanning; + + /* The current band */ + enum ieee80211_band band; + + struct completion *elp_compl; + struct delayed_work elp_work; + + /* in dBm */ + int power_level; + + struct wl1271_stats stats; + + __le32 buffer_32; + u32 buffer_cmd; + u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; + + struct wl_fw_status *fw_status; + struct wl1271_tx_hw_res_if *tx_res_if; + + /* Current chipset configuration */ + struct wlcore_conf conf; + + bool sg_enabled; + + bool enable_11a; + + /* Most recently reported noise in dBm */ + s8 noise; + + /* bands supported by this instance of wl12xx */ + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + int tcxo_clock; + + /* + * wowlan trigger was configured during suspend. + * (currently, only "ANY" trigger is supported) + */ + bool wow_enabled; + bool irq_wake_enabled; + + /* + * AP-mode - links indexed by HLID. The global and broadcast links + * are always active. + */ + struct wl1271_link links[WL12XX_MAX_LINKS]; + + /* AP-mode - a bitmap of links currently in PS mode according to FW */ + u32 ap_fw_ps_map; + + /* AP-mode - a bitmap of links currently in PS mode in mac80211 */ + unsigned long ap_ps_map; + + /* Quirks of specific hardware revisions */ + unsigned int quirks; + + /* Platform limitations */ + unsigned int platform_quirks; + + /* number of currently active RX BA sessions */ + int ba_rx_session_count; + + /* AP-mode - number of currently connected stations */ + int active_sta_count; + + /* last wlvif we transmitted from */ + struct wl12xx_vif *last_wlvif; + + /* work to fire when Tx is stuck */ + struct delayed_work tx_watchdog_work; + + struct wlcore_ops *ops; + /* pointer to the lower driver partition table */ + const struct wlcore_partition_set *ptable; + /* pointer to the lower driver register table */ + const int *rtable; + /* name of the firmwares to load - for PLT, single role, multi-role */ + const char *plt_fw_name; + const char *sr_fw_name; + const char *mr_fw_name; + + /* per-chip-family private structure */ + void *priv; + + /* number of TX descriptors the HW supports. */ + u32 num_tx_desc; + + /* spare Tx blocks for normal/GEM operating modes */ + u32 normal_tx_spare; + u32 gem_tx_spare; + + /* translate HW Tx rates to standard rate-indices */ + const u8 **band_rate_to_idx; + + /* size of table for HW rates that can be received from chip */ + u8 hw_tx_rate_tbl_size; + + /* this HW rate and below are considered HT rates for this chip */ + u8 hw_min_ht_rate; + + /* HW HT (11n) capabilities */ + struct ieee80211_sta_ht_cap ht_cap; + + /* size of the private FW status data */ + size_t fw_status_priv_len; +}; + +int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); +int __devexit wlcore_remove(struct platform_device *pdev); +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size); +int wlcore_free_hw(struct wl1271 *wl); + +/* Firmware image load chunk size */ +#define CHUNK_SIZE 16384 + +/* Quirks */ + +/* Each RX/TX transaction requires an end-of-transaction transfer */ +#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) + +/* wl127x and SPI don't support SDIO block size alignment */ +#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) + +/* means aggregated Rx packets are aligned to a SDIO block */ +#define WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN BIT(3) + +/* Older firmwares did not implement the FW logger over bus feature */ +#define WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) + +/* Older firmwares use an old NVS format */ +#define WLCORE_QUIRK_LEGACY_NVS BIT(5) + +/* Some firmwares may not support ELP */ +#define WLCORE_QUIRK_NO_ELP BIT(6) + +/* TODO: move to the lower drivers when all usages are abstracted */ +#define CHIP_ID_1271_PG10 (0x4030101) +#define CHIP_ID_1271_PG20 (0x4030111) +#define CHIP_ID_1283_PG10 (0x05030101) +#define CHIP_ID_1283_PG20 (0x05030111) + +/* TODO: move all these common registers and values elsewhere */ +#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC + +/* ELP register commands */ +#define ELPCTRL_WAKE_UP 0x1 +#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 +#define ELPCTRL_SLEEP 0x0 +/* ELP WLAN_READY bit */ +#define ELPCTRL_WLAN_READY 0x2 + +/************************************************************************* + + Interrupt Trigger Register (Host -> WiLink) + +**************************************************************************/ + +/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ + +/* + * The host sets this bit to inform the Wlan + * FW that a TX packet is in the XFER + * Buffer #0. + */ +#define INTR_TRIG_TX_PROC0 BIT(2) + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #0. + */ +#define INTR_TRIG_RX_PROC0 BIT(3) + +#define INTR_TRIG_DEBUG_ACK BIT(4) + +#define INTR_TRIG_STATE_CHANGED BIT(5) + +/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #1. + */ +#define INTR_TRIG_RX_PROC1 BIT(17) + +/* + * The host sets this bit to inform the Wlan + * hardware that a TX packet is in the XFER + * Buffer #1. + */ +#define INTR_TRIG_TX_PROC1 BIT(18) + +#define ACX_SLV_SOFT_RESET_BIT BIT(1) +#define SOFT_RESET_MAX_TIME 1000000 +#define SOFT_RESET_STALL_TIME 1000 + +#define ECPU_CONTROL_HALT 0x00000101 + +#define WELP_ARM_COMMAND_VAL 0x4 + +#endif /* __WLCORE_H__ */ diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig deleted file mode 100644 index af08c8609c6..00000000000 --- a/drivers/net/wireless/wl12xx/Kconfig +++ /dev/null @@ -1,48 +0,0 @@ -menuconfig WL12XX_MENU - tristate "TI wl12xx driver support" - depends on MAC80211 && EXPERIMENTAL - ---help--- - This will enable TI wl12xx driver support for the following chips: - wl1271, wl1273, wl1281 and wl1283. - The drivers make use of the mac80211 stack. - -config WL12XX - tristate "TI wl12xx support" - depends on WL12XX_MENU && GENERIC_HARDIRQS - depends on INET - select FW_LOADER - ---help--- - This module adds support for wireless adapters based on TI wl1271 and - TI wl1273 chipsets. This module does *not* include support for wl1251. - For wl1251 support, use the separate homonymous driver instead. - - If you choose to build a module, it will be called wl12xx. Say N if - unsure. - -config WL12XX_SPI - tristate "TI wl12xx SPI support" - depends on WL12XX && SPI_MASTER - select CRC7 - ---help--- - This module adds support for the SPI interface of adapters using - TI wl12xx chipsets. Select this if your platform is using - the SPI bus. - - If you choose to build a module, it'll be called wl12xx_spi. - Say N if unsure. - -config WL12XX_SDIO - tristate "TI wl12xx SDIO support" - depends on WL12XX && MMC - ---help--- - This module adds support for the SDIO interface of adapters using - TI wl12xx chipsets. Select this if your platform is using - the SDIO bus. - - If you choose to build a module, it'll be called wl12xx_sdio. - Say N if unsure. - -config WL12XX_PLATFORM_DATA - bool - depends on WL12XX_SDIO != n || WL1251_SDIO != n - default y diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile deleted file mode 100644 index 98f289c907a..00000000000 --- a/drivers/net/wireless/wl12xx/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ - boot.o init.o debugfs.o scan.o - -wl12xx_spi-objs = spi.o -wl12xx_sdio-objs = sdio.o - -wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o -obj-$(CONFIG_WL12XX) += wl12xx.o -obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o -obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o - -# small builtin driver bit -obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o - -ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c deleted file mode 100644 index 954101d03f0..00000000000 --- a/drivers/net/wireless/wl12xx/boot.c +++ /dev/null @@ -1,786 +0,0 @@ -/* - * This file is part of wl1271 - * - * Copyright (C) 2008-2010 Nokia Corporation - * - * Contact: Luciano Coelho <luciano.coelho@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#include <linux/slab.h> -#include <linux/wl12xx.h> -#include <linux/export.h> - -#include "debug.h" -#include "acx.h" -#include "reg.h" -#include "boot.h" -#include "io.h" -#include "event.h" -#include "rx.h" - -static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) -{ - u32 cpu_ctrl; - - /* 10.5.0 run the firmware (I) */ - cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL); - - /* 10.5.1 run the firmware (II) */ - cpu_ctrl |= flag; - wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); -} - -static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl) -{ - unsigned int quirks = 0; - unsigned int *fw_ver = wl->chip.fw_ver; - - /* Only new station firmwares support routing fw logs to the host */ - if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && - (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN)) - quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED; - - /* This feature is not yet supported for AP mode */ - if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) - quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED; - - return quirks; -} - -static void wl1271_parse_fw_ver(struct wl1271 *wl) -{ - int ret; - - ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", - &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], - &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], - &wl->chip.fw_ver[4]); - - if (ret != 5) { - wl1271_warning("fw version incorrect value"); - memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); - return; - } - - /* Check if any quirks are needed with older fw versions */ - wl->quirks |= wl12xx_get_fw_ver_quirks(wl); -} - -static void wl1271_boot_fw_version(struct wl1271 *wl) -{ - struct wl1271_static_data static_data; - - wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data), - false); - - strncpy(wl->chip.fw_ver_str, static_data.fw_version, - sizeof(wl->chip.fw_ver_str)); - - /* make sure the string is NULL-terminated */ - wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; - - wl1271_parse_fw_ver(wl); -} - -static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, - size_t fw_data_len, u32 dest) -{ - struct wl1271_partition_set partition; - int addr, chunk_num, partition_limit; - u8 *p, *chunk; - - /* whal_FwCtrl_LoadFwImageSm() */ - - wl1271_debug(DEBUG_BOOT, "starting firmware upload"); - - wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", - fw_data_len, CHUNK_SIZE); - - if ((fw_data_len % 4) != 0) { - wl1271_error("firmware length not multiple of four"); - return -EIO; - } - - chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL); - if (!chunk) { - wl1271_error("allocation for firmware upload chunk failed"); - return -ENOMEM; - } - - memcpy(&partition, &wl12xx_part_table[PART_DOWN], sizeof(partition)); - partition.mem.start = dest; - wl1271_set_partition(wl, &partition); - - /* 10.1 set partition limit and chunk num */ - chunk_num = 0; - partition_limit = wl12xx_part_table[PART_DOWN].mem.size; - - while (chunk_num < fw_data_len / CHUNK_SIZE) { - /* 10.2 update partition, if needed */ - addr = dest + (chunk_num + 2) * CHUNK_SIZE; - if (addr > partition_limit) { - addr = dest + chunk_num * CHUNK_SIZE; - partition_limit = chunk_num * CHUNK_SIZE + - wl12xx_part_table[PART_DOWN].mem.size; - partition.mem.start = addr; - wl1271_set_partition(wl, &partition); - } - - /* 10.3 upload the chunk */ - addr = dest + chunk_num * CHUNK_SIZE; - p = buf + chunk_num * CHUNK_SIZE; - memcpy(chunk, p, CHUNK_SIZE); - wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", - p, addr); - wl1271_write(wl, addr, chunk, CHUNK_SIZE, false); - - chunk_num++; - } - - /* 10.4 upload the last chunk */ - addr = dest + chunk_num * CHUNK_SIZE; - p = buf + chunk_num * CHUNK_SIZE; - memcpy(chunk, p, fw_data_len % CHUNK_SIZE); - wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", - fw_data_len % CHUNK_SIZE, p, addr); - wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); - - kfree(chunk); - return 0; -} - -static int wl1271_boot_upload_firmware(struct wl1271 *wl) -{ - u32 chunks, addr, len; - int ret = 0; - u8 *fw; - - fw = wl->fw; - chunks = be32_to_cpup((__be32 *) fw); - fw += sizeof(u32); - - wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); - - while (chunks--) { - addr = be32_to_cpup((__be32 *) fw); - fw += sizeof(u32); - len = be32_to_cpup((__be32 *) fw); - fw += sizeof(u32); - - if (len > 300000) { - wl1271_info("firmware chunk too long: %u", len); - return -EINVAL; - } - wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", - chunks, addr, len); - ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); - if (ret != 0) - break; - fw += len; - } - - return ret; -} - -static int wl1271_boot_upload_nvs(struct wl1271 *wl) -{ - size_t nvs_len, burst_len; - int i; - u32 dest_addr, val; - u8 *nvs_ptr, *nvs_aligned; - - if (wl->nvs == NULL) - return -ENODEV; - - if (wl->chip.id == CHIP_ID_1283_PG20) { - struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; - - if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) { - if (nvs->general_params.dual_mode_select) - wl->enable_11a = true; - } else { - wl1271_error("nvs size is not as expected: %zu != %zu", - wl->nvs_len, - sizeof(struct wl128x_nvs_file)); - kfree(wl->nvs); - wl->nvs = NULL; - wl->nvs_len = 0; - return -EILSEQ; - } - - /* only the first part of the NVS needs to be uploaded */ - nvs_len = sizeof(nvs->nvs); - nvs_ptr = (u8 *)nvs->nvs; - - } else { - struct wl1271_nvs_file *nvs = - (struct wl1271_nvs_file *)wl->nvs; - /* - * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz - * band configurations) can be removed when those NVS files stop - * floating around. - */ - if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || - wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { - if (nvs->general_params.dual_mode_select) - wl->enable_11a = true; - } - - if (wl->nvs_len != sizeof(struct wl1271_nvs_file) && - (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE || - wl->enable_11a)) { - wl1271_error("nvs size is not as expected: %zu != %zu", - wl->nvs_len, sizeof(struct wl1271_nvs_file)); - kfree(wl->nvs); - wl->nvs = NULL; - wl->nvs_len = 0; - return -EILSEQ; - } - - /* only the first part of the NVS needs to be uploaded */ - nvs_len = sizeof(nvs->nvs); - nvs_ptr = (u8 *) nvs->nvs; - } - - /* update current MAC address to NVS */ - nvs_ptr[11] = wl->addresses[0].addr[0]; - nvs_ptr[10] = wl->addresses[0].addr[1]; - nvs_ptr[6] = wl->addresses[0].addr[2]; - nvs_ptr[5] = wl->addresses[0].addr[3]; - nvs_ptr[4] = wl->addresses[0].addr[4]; - nvs_ptr[3] = wl->addresses[0].addr[5]; - - /* - * Layout before the actual NVS tables: - * 1 byte : burst length. - * 2 bytes: destination address. - * n bytes: data to burst copy. - * - * This is ended by a 0 length, then the NVS tables. - */ - - /* FIXME: Do we need to check here whether the LSB is 1? */ - while (nvs_ptr[0]) { - burst_len = nvs_ptr[0]; - dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); - - /* - * Due to our new wl1271_translate_reg_addr function, - * we need to add the REGISTER_BASE to the destination - */ - dest_addr += REGISTERS_BASE; - - /* We move our pointer to the data */ - nvs_ptr += 3; - - for (i = 0; i < burst_len; i++) { - if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; - - val = (nvs_ptr[0] | (nvs_ptr[1] << 8) - | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); - - wl1271_debug(DEBUG_BOOT, - "nvs burst write 0x%x: 0x%x", - dest_addr, val); - wl1271_write32(wl, dest_addr, val); - - nvs_ptr += 4; - dest_addr += 4; - } - - if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; - } - - /* - * We've reached the first zero length, the first NVS table - * is located at an aligned offset which is at least 7 bytes further. - * NOTE: The wl->nvs->nvs element must be first, in order to - * simplify the casting, we assume it is at the beginning of - * the wl->nvs structure. - */ - nvs_ptr = (u8 *)wl->nvs + - ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); - - if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) - goto out_badnvs; - - nvs_len -= nvs_ptr - (u8 *)wl->nvs; - - /* Now we must set the partition correctly */ - wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]); - - /* Copy the NVS tables to a new block to ensure alignment */ - nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); - if (!nvs_aligned) - return -ENOMEM; - - /* And finally we upload the NVS tables */ - wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false); - - kfree(nvs_aligned); - return 0; - -out_badnvs: - wl1271_error("nvs data is malformed"); - return -EILSEQ; -} - -static void wl1271_boot_enable_interrupts(struct wl1271 *wl) -{ - wl1271_enable_interrupts(wl); - wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, - WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); - wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL); -} - -static int wl1271_boot_soft_reset(struct wl1271 *wl) -{ - unsigned long timeout; - u32 boot_data; - - /* perform soft reset */ - wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); - - /* SOFT_RESET is self clearing */ - timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); - while (1) { - boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET); - wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); - if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) - break; - - if (time_after(jiffies, timeout)) { - /* 1.2 check pWhalBus->uSelfClearTime if the - * timeout was reached */ - wl1271_error("soft reset timeout"); - return -1; - } - - udelay(SOFT_RESET_STALL_TIME); - } - - /* disable Rx/Tx */ - wl1271_write32(wl, ENABLE, 0x0); - - /* disable auto calibration on start*/ - wl1271_write32(wl, SPARE_A2, 0xffff); - - return 0; -} - -static int wl1271_boot_run_firmware(struct wl1271 *wl) -{ - int loop, ret; - u32 chip_id, intr; - - wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); - - chip_id = wl1271_read32(wl, CHIP_ID_B); - - wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); - - if (chip_id != wl->chip.id) { - wl1271_error("chip id doesn't match after firmware boot"); - return -EIO; - } - - /* wait for init to complete */ - loop = 0; - while (loop++ < INIT_LOOP) { - udelay(INIT_LOOP_DELAY); - intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); - - if (intr == 0xffffffff) { - wl1271_error("error reading hardware complete " - "init indication"); - return -EIO; - } - /* check that ACX_INTR_INIT_COMPLETE is enabled */ - else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) { - wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, - WL1271_ACX_INTR_INIT_COMPLETE); - break; - } - } - - if (loop > INIT_LOOP) { - wl1271_error("timeout waiting for the hardware to " - "complete initialization"); - return -EIO; - } - - /* get hardware config command mail box */ - wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR); - - /* get hardware config event mail box */ - wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR); - - /* set the working partition to its "running" mode offset */ - wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]); - - wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", - wl->cmd_box_addr, wl->event_box_addr); - - wl1271_boot_fw_version(wl); - - /* - * in case of full asynchronous mode the firmware event must be - * ready to receive event from the command mailbox - */ - - /* unmask required mbox events */ - wl->event_mask = BSS_LOSE_EVENT_ID | - SCAN_COMPLETE_EVENT_ID | - ROLE_STOP_COMPLETE_EVENT_ID | - RSSI_SNR_TRIGGER_0_EVENT_ID | - PSPOLL_DELIVERY_FAILURE_EVENT_ID | - SOFT_GEMINI_SENSE_EVENT_ID | - PERIODIC_SCAN_REPORT_EVENT_ID | - PERIODIC_SCAN_COMPLETE_EVENT_ID | - DUMMY_PACKET_EVENT_ID | - PEER_REMOVE_COMPLETE_EVENT_ID | - BA_SESSION_RX_CONSTRAINT_EVENT_ID | - REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID | - INACTIVE_STA_EVENT_ID | - MAX_TX_RETRY_EVENT_ID | - CHANNEL_SWITCH_COMPLETE_EVENT_ID; - - ret = wl1271_event_unmask(wl); - if (ret < 0) { - wl1271_error("EVENT mask setting failed"); - return ret; - } - - wl1271_event_mbox_config(wl); - - /* firmware startup completed */ - return 0; -} - -static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) -{ - u32 polarity; - - polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY); - - /* We use HIGH polarity, so unset the LOW bit */ - polarity &= ~POLARITY_LOW; - wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity); - - return 0; -} - -static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl) -{ - u16 spare_reg; - - /* Mask bits [2] & [8:4] in the sys_clk_cfg register */ - spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG); - if (spare_reg == 0xFFFF) - return -EFAULT; - spare_reg |= (BIT(3) | BIT(5) | BIT(6)); - wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg); - - /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */ - wl1271_top_reg_write(wl, SYS_CLK_CFG_REG, - WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF); - - /* Delay execution for 15msec, to let the HW settle */ - mdelay(15); - - return 0; -} - -static bool wl128x_is_tcxo_valid(struct wl1271 *wl) -{ - u16 tcxo_detection; - - tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG); - if (tcxo_detection & TCXO_DET_FAILED) - return false; - - return true; -} - -static bool wl128x_is_fref_valid(struct wl1271 *wl) -{ - u16 fref_detection; - - fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG); - if (fref_detection & FREF_CLK_DETECT_FAIL) - return false; - - return true; -} - -static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl) -{ - wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL); - wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL); - wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL); - - return 0; -} - -static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk) -{ - u16 spare_reg; - u16 pll_config; - u8 input_freq; - - /* Mask bits [3:1] in the sys_clk_cfg register */ - spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG); - if (spare_reg == 0xFFFF) - return -EFAULT; - spare_reg |= BIT(2); - wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg); - - /* Handle special cases of the TCXO clock */ - if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 || - wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6) - return wl128x_manually_configure_mcs_pll(wl); - - /* Set the input frequency according to the selected clock source */ - input_freq = (clk & 1) + 1; - - pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG); - if (pll_config == 0xFFFF) - return -EFAULT; - pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT); - pll_config |= MCS_PLL_ENABLE_HP; - wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config); - - return 0; -} - -/* - * WL128x has two clocks input - TCXO and FREF. - * TCXO is the main clock of the device, while FREF is used to sync - * between the GPS and the cellular modem. - * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used - * as the WLAN/BT main clock. - */ -static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock) -{ - u16 sys_clk_cfg; - - /* For XTAL-only modes, FREF will be used after switching from TCXO */ - if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL || - wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) { - if (!wl128x_switch_tcxo_to_fref(wl)) - return -EINVAL; - goto fref_clk; - } - - /* Query the HW, to determine which clock source we should use */ - sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG); - if (sys_clk_cfg == 0xFFFF) - return -EINVAL; - if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF) - goto fref_clk; - - /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */ - if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 || - wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) { - if (!wl128x_switch_tcxo_to_fref(wl)) - return -EINVAL; - goto fref_clk; - } - - /* TCXO clock is selected */ - if (!wl128x_is_tcxo_valid(wl)) - return -EINVAL; - *selected_clock = wl->tcxo_clock; - goto config_mcs_pll; - -fref_clk: - /* FREF clock is selected */ - if (!wl128x_is_fref_valid(wl)) - return -EINVAL; - *selected_clock = wl->ref_clock; - -config_mcs_pll: - return wl128x_configure_mcs_pll(wl, *selected_clock); -} - -static int wl127x_boot_clk(struct wl1271 *wl) -{ - u32 pause; - u32 clk; - - if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3) - wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION; - - if (wl->ref_clock == CONF_REF_CLK_19_2_E || - wl->ref_clock == CONF_REF_CLK_38_4_E || - wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL) - /* ref clk: 19.2/38.4/38.4-XTAL */ - clk = 0x3; - else if (wl->ref_clock == CONF_REF_CLK_26_E || - wl->ref_clock == CONF_REF_CLK_52_E) - /* ref clk: 26/52 */ - clk = 0x5; - else - return -EINVAL; - - if (wl->ref_clock != CONF_REF_CLK_19_2_E) { - u16 val; - /* Set clock type (open drain) */ - val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); - val &= FREF_CLK_TYPE_BITS; - wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val); - - /* Set clock pull mode (no pull) */ - val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL); - val |= NO_PULL; - wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val); - } else { - u16 val; - /* Set clock polarity */ - val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY); - val &= FREF_CLK_POLARITY_BITS; - val |= CLK_REQ_OUTN_SEL; - wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); - } - - wl1271_write32(wl, PLL_PARAMETERS, clk); - - pause = wl1271_read32(wl, PLL_PARAMETERS); - - wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); - - pause &= ~(WU_COUNTER_PAUSE_VAL); - pause |= WU_COUNTER_PAUSE_VAL; - wl1271_write32(wl, WU_COUNTER_PAUSE, pause); - - return 0; -} - -/* uploads NVS and firmware */ -int wl1271_load_firmware(struct wl1271 *wl) -{ - int ret = 0; - u32 tmp, clk; - int selected_clock = -1; - - if (wl->chip.id == CHIP_ID_1283_PG20) { - ret = wl128x_boot_clk(wl, &selected_clock); - if (ret < 0) - goto out; - } else { - ret = wl127x_boot_clk(wl); - if (ret < 0) - goto out; - } - - /* Continue the ELP wake up sequence */ - wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); - udelay(500); - - wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]); - - /* Read-modify-write DRPW_SCRATCH_START register (see next state) - to be used by DRPw FW. The RTRIM value will be added by the FW - before taking DRPw out of reset */ - - wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); - clk = wl1271_read32(wl, DRPW_SCRATCH_START); - - wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); - - if (wl->chip.id == CHIP_ID_1283_PG20) { - clk |= ((selected_clock & 0x3) << 1) << 4; - } else { - clk |= (wl->ref_clock << 1) << 4; - } - - wl1271_write32(wl, DRPW_SCRATCH_START, clk); - - wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]); - - /* Disable interrupts */ - wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); - - ret = wl1271_boot_soft_reset(wl); - if (ret < 0) - goto out; - - /* 2. start processing NVS file */ - ret = wl1271_boot_upload_nvs(wl); - if (ret < 0) - goto out; - - /* write firmware's last address (ie. it's length) to - * ACX_EEPROMLESS_IND_REG */ - wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); - - wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); - - tmp = wl1271_read32(wl, CHIP_ID_B); - - wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); - - /* 6. read the EEPROM parameters */ - tmp = wl1271_read32(wl, SCR_PAD2); - - /* WL1271: The reference driver skips steps 7 to 10 (jumps directly - * to upload_fw) */ - - if (wl->chip.id == CHIP_ID_1283_PG20) - wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds); - - ret = wl1271_boot_upload_firmware(wl); - if (ret < 0) - goto out; - -out: - return ret; -} -EXPORT_SYMBOL_GPL(wl1271_load_firmware); - -int wl1271_boot(struct wl1271 *wl) -{ - int ret; - - /* upload NVS and firmware */ - ret = wl1271_load_firmware(wl); - if (ret) - return ret; - - /* 10.5 start firmware */ - ret = wl1271_boot_run_firmware(wl); - if (ret < 0) - goto out; - - ret = wl1271_boot_write_irq_polarity(wl); - if (ret < 0) - goto out; - - wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, - WL1271_ACX_ALL_EVENTS_VECTOR); - - /* Enable firmware interrupts now */ - wl1271_boot_enable_interrupts(wl); - - wl1271_event_mbox_config(wl); - -out: - return ret; -} diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h deleted file mode 100644 index c3adc09f403..00000000000 --- a/drivers/net/wireless/wl12xx/boot.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * This file is part of wl1271 - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Contact: Luciano Coelho <luciano.coelho@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#ifndef __BOOT_H__ -#define __BOOT_H__ - -#include "wl12xx.h" - -int wl1271_boot(struct wl1271 *wl); -int wl1271_load_firmware(struct wl1271 *wl); - -#define WL1271_NO_SUBBANDS 8 -#define WL1271_NO_POWER_LEVELS 4 -#define WL1271_FW_VERSION_MAX_LEN 20 - -struct wl1271_static_data { - u8 mac_address[ETH_ALEN]; - u8 padding[2]; - u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; - u32 hw_version; - u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; -}; - -/* number of times we try to read the INIT interrupt */ -#define INIT_LOOP 20000 - -/* delay between retries */ -#define INIT_LOOP_DELAY 50 - -#define WU_COUNTER_PAUSE_VAL 0x3FF -#define WELP_ARM_COMMAND_VAL 0x4 - -#define OCP_REG_POLARITY 0x0064 -#define OCP_REG_CLK_TYPE 0x0448 -#define OCP_REG_CLK_POLARITY 0x0cb2 -#define OCP_REG_CLK_PULL 0x0cb4 - -#define CMD_MBOX_ADDRESS 0x407B4 - -#define POLARITY_LOW BIT(1) -#define NO_PULL (BIT(14) | BIT(15)) - -#define FREF_CLK_TYPE_BITS 0xfffffe7f -#define CLK_REQ_PRCM 0x100 -#define FREF_CLK_POLARITY_BITS 0xfffff8ff -#define CLK_REQ_OUTN_SEL 0x700 - -/* PLL configuration algorithm for wl128x */ -#define SYS_CLK_CFG_REG 0x2200 -/* Bit[0] - 0-TCXO, 1-FREF */ -#define MCS_PLL_CLK_SEL_FREF BIT(0) -/* Bit[3:2] - 01-TCXO, 10-FREF */ -#define WL_CLK_REQ_TYPE_FREF BIT(3) -#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2)) -/* Bit[4] - 0-TCXO, 1-FREF */ -#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4) - -#define TCXO_ILOAD_INT_REG 0x2264 -#define TCXO_CLK_DETECT_REG 0x2266 - -#define TCXO_DET_FAILED BIT(4) - -#define FREF_ILOAD_INT_REG 0x2084 -#define FREF_CLK_DETECT_REG 0x2086 -#define FREF_CLK_DETECT_FAIL BIT(4) - -/* Use this reg for masking during driver access */ -#define WL_SPARE_REG 0x2320 -#define WL_SPARE_VAL BIT(2) -/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */ -#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3)) - -#define PLL_LOCK_COUNTERS_REG 0xD8C -#define PLL_LOCK_COUNTERS_COEX 0x0F -#define PLL_LOCK_COUNTERS_MCS 0xF0 -#define MCS_PLL_OVERRIDE_REG 0xD90 -#define MCS_PLL_CONFIG_REG 0xD92 -#define MCS_SEL_IN_FREQ_MASK 0x0070 -#define MCS_SEL_IN_FREQ_SHIFT 4 -#define MCS_PLL_CONFIG_REG_VAL 0x73 -#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1)) - -#define MCS_PLL_M_REG 0xD94 -#define MCS_PLL_N_REG 0xD96 -#define MCS_PLL_M_REG_VAL 0xC8 -#define MCS_PLL_N_REG_VAL 0x07 - -#define SDIO_IO_DS 0xd14 - -/* SDIO/wSPI DS configuration values */ -enum { - HCI_IO_DS_8MA = 0, - HCI_IO_DS_4MA = 1, /* default */ - HCI_IO_DS_6MA = 2, - HCI_IO_DS_2MA = 3, -}; - -/* end PLL configuration algorithm for wl128x */ - -#endif diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index cb6204f7830..e6ec16d92e6 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -266,9 +266,13 @@ struct pn533 { int in_maxlen; struct pn533_frame *in_frame; - struct tasklet_struct tasklet; - struct pn533_frame *tklt_in_frame; - int tklt_in_error; + struct sk_buff_head resp_q; + + struct workqueue_struct *wq; + struct work_struct cmd_work; + struct work_struct mi_work; + struct pn533_frame *wq_in_frame; + int wq_in_error; pn533_cmd_complete_t cmd_complete; void *cmd_complete_arg; @@ -383,15 +387,21 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd) return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd)); } -static void pn533_tasklet_cmd_complete(unsigned long arg) + +static void pn533_wq_cmd_complete(struct work_struct *work) { - struct pn533 *dev = (struct pn533 *) arg; - struct pn533_frame *in_frame = dev->tklt_in_frame; + struct pn533 *dev = container_of(work, struct pn533, cmd_work); + struct pn533_frame *in_frame; int rc; - if (dev->tklt_in_error) + if (dev == NULL) + return; + + in_frame = dev->wq_in_frame; + + if (dev->wq_in_error) rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL, - dev->tklt_in_error); + dev->wq_in_error); else rc = dev->cmd_complete(dev, dev->cmd_complete_arg, PN533_FRAME_CMD_PARAMS_PTR(in_frame), @@ -406,7 +416,7 @@ static void pn533_recv_response(struct urb *urb) struct pn533 *dev = urb->context; struct pn533_frame *in_frame; - dev->tklt_in_frame = NULL; + dev->wq_in_frame = NULL; switch (urb->status) { case 0: @@ -417,36 +427,36 @@ static void pn533_recv_response(struct urb *urb) case -ESHUTDOWN: nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" " status: %d", urb->status); - dev->tklt_in_error = urb->status; - goto sched_tasklet; + dev->wq_in_error = urb->status; + goto sched_wq; default: nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" " %d", urb->status); - dev->tklt_in_error = urb->status; - goto sched_tasklet; + dev->wq_in_error = urb->status; + goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!pn533_rx_frame_is_valid(in_frame)) { nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); - dev->tklt_in_error = -EIO; - goto sched_tasklet; + dev->wq_in_error = -EIO; + goto sched_wq; } if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) { nfc_dev_err(&dev->interface->dev, "The received frame is not " "response to the last command"); - dev->tklt_in_error = -EIO; - goto sched_tasklet; + dev->wq_in_error = -EIO; + goto sched_wq; } nfc_dev_dbg(&dev->interface->dev, "Received a valid frame"); - dev->tklt_in_error = 0; - dev->tklt_in_frame = in_frame; + dev->wq_in_error = 0; + dev->wq_in_frame = in_frame; -sched_tasklet: - tasklet_schedule(&dev->tasklet); +sched_wq: + queue_work(dev->wq, &dev->cmd_work); } static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) @@ -471,21 +481,21 @@ static void pn533_recv_ack(struct urb *urb) case -ESHUTDOWN: nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" " status: %d", urb->status); - dev->tklt_in_error = urb->status; - goto sched_tasklet; + dev->wq_in_error = urb->status; + goto sched_wq; default: nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" " %d", urb->status); - dev->tklt_in_error = urb->status; - goto sched_tasklet; + dev->wq_in_error = urb->status; + goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!pn533_rx_frame_is_ack(in_frame)) { nfc_dev_err(&dev->interface->dev, "Received an invalid ack"); - dev->tklt_in_error = -EIO; - goto sched_tasklet; + dev->wq_in_error = -EIO; + goto sched_wq; } nfc_dev_dbg(&dev->interface->dev, "Received a valid ack"); @@ -494,15 +504,15 @@ static void pn533_recv_ack(struct urb *urb) if (rc) { nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with" " result %d", rc); - dev->tklt_in_error = rc; - goto sched_tasklet; + dev->wq_in_error = rc; + goto sched_wq; } return; -sched_tasklet: - dev->tklt_in_frame = NULL; - tasklet_schedule(&dev->tasklet); +sched_wq: + dev->wq_in_frame = NULL; + queue_work(dev->wq, &dev->cmd_work); } static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) @@ -1249,6 +1259,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) dev->tgt_active_prot = 0; + skb_queue_purge(&dev->resp_q); + pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE); tg = 1; @@ -1447,11 +1459,49 @@ struct pn533_data_exchange_arg { void *cb_context; }; +static struct sk_buff *pn533_build_response(struct pn533 *dev) +{ + struct sk_buff *skb, *tmp, *t; + unsigned int skb_len = 0, tmp_len = 0; + + nfc_dev_dbg(&dev->interface->dev, "%s\n", __func__); + + if (skb_queue_empty(&dev->resp_q)) + return NULL; + + if (skb_queue_len(&dev->resp_q) == 1) { + skb = skb_dequeue(&dev->resp_q); + goto out; + } + + skb_queue_walk_safe(&dev->resp_q, tmp, t) + skb_len += tmp->len; + + nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n", + __func__, skb_len); + + skb = alloc_skb(skb_len, GFP_KERNEL); + if (skb == NULL) + goto out; + + skb_put(skb, skb_len); + + skb_queue_walk_safe(&dev->resp_q, tmp, t) { + memcpy(skb->data + tmp_len, tmp->data, tmp->len); + tmp_len += tmp->len; + } + +out: + skb_queue_purge(&dev->resp_q); + + return skb; +} + static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, u8 *params, int params_len) { struct pn533_data_exchange_arg *arg = _arg; - struct sk_buff *skb_resp = arg->skb_resp; + struct sk_buff *skb = NULL, *skb_resp = arg->skb_resp; struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data; int err = 0; u8 status; @@ -1459,15 +1509,13 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, nfc_dev_dbg(&dev->interface->dev, "%s", __func__); - dev_kfree_skb_irq(arg->skb_out); + dev_kfree_skb(arg->skb_out); if (params_len < 0) { /* error */ err = params_len; goto error; } - skb_put(skb_resp, PN533_FRAME_SIZE(in_frame)); - status = params[0]; cmd_ret = status & PN533_CMD_RET_MASK; @@ -1478,25 +1526,27 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, goto error; } + skb_put(skb_resp, PN533_FRAME_SIZE(in_frame)); + skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN); + skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE); + skb_queue_tail(&dev->resp_q, skb_resp); + if (status & PN533_CMD_MI_MASK) { - /* TODO: Implement support to multi-part data exchange */ - nfc_dev_err(&dev->interface->dev, "Multi-part message not yet" - " supported"); - /* Prevent the other messages from controller */ - pn533_send_ack(dev, GFP_ATOMIC); - err = -ENOSYS; - goto error; + queue_work(dev->wq, &dev->mi_work); + return -EINPROGRESS; } - skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN); - skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE); + skb = pn533_build_response(dev); + if (skb == NULL) + goto error; - arg->cb(arg->cb_context, skb_resp, 0); + arg->cb(arg->cb_context, skb, 0); kfree(arg); return 0; error: - dev_kfree_skb_irq(skb_resp); + skb_queue_purge(&dev->resp_q); + dev_kfree_skb(skb_resp); arg->cb(arg->cb_context, NULL, err); kfree(arg); return 0; @@ -1571,6 +1621,68 @@ error: return rc; } +static void pn533_wq_mi_recv(struct work_struct *work) +{ + struct pn533 *dev = container_of(work, struct pn533, mi_work); + struct sk_buff *skb_cmd; + struct pn533_data_exchange_arg *arg = dev->cmd_complete_arg; + struct pn533_frame *out_frame, *in_frame; + struct sk_buff *skb_resp; + int skb_resp_len; + int rc; + + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + + /* This is a zero payload size skb */ + skb_cmd = alloc_skb(PN533_CMD_DATAEXCH_HEAD_LEN + PN533_FRAME_TAIL_SIZE, + GFP_KERNEL); + if (skb_cmd == NULL) + goto error_cmd; + + skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN); + + rc = pn533_data_exchange_tx_frame(dev, skb_cmd); + if (rc) + goto error_frame; + + skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN + + PN533_CMD_DATAEXCH_DATA_MAXLEN + + PN533_FRAME_TAIL_SIZE; + skb_resp = alloc_skb(skb_resp_len, GFP_KERNEL); + if (!skb_resp) { + rc = -ENOMEM; + goto error_frame; + } + + in_frame = (struct pn533_frame *) skb_resp->data; + out_frame = (struct pn533_frame *) skb_cmd->data; + + arg->skb_resp = skb_resp; + arg->skb_out = skb_cmd; + + rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, + skb_resp_len, + pn533_data_exchange_complete, + dev->cmd_complete_arg, GFP_KERNEL); + if (!rc) + return; + + nfc_dev_err(&dev->interface->dev, "Error %d when trying to" + " perform data_exchange", rc); + + kfree_skb(skb_resp); + +error_frame: + kfree_skb(skb_cmd); + +error_cmd: + pn533_send_ack(dev, GFP_KERNEL); + + kfree(arg); + + up(&dev->cmd_lock); +} + static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, u8 cfgdata_len) { @@ -1668,7 +1780,15 @@ static int pn533_probe(struct usb_interface *interface, NULL, 0, pn533_send_complete, dev); - tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev); + INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete); + INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); + dev->wq = alloc_workqueue("pn533", + WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, + 1); + if (dev->wq == NULL) + goto error; + + skb_queue_head_init(&dev->resp_q); usb_set_intfdata(interface, dev); @@ -1678,7 +1798,7 @@ static int pn533_probe(struct usb_interface *interface, rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, dev->in_maxlen); if (rc) - goto kill_tasklet; + goto destroy_wq; fw_ver = (struct pn533_fw_version *) PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame); @@ -1694,7 +1814,7 @@ static int pn533_probe(struct usb_interface *interface, PN533_CMD_DATAEXCH_HEAD_LEN, PN533_FRAME_TAIL_SIZE); if (!dev->nfc_dev) - goto kill_tasklet; + goto destroy_wq; nfc_set_parent_dev(dev->nfc_dev, &interface->dev); nfc_set_drvdata(dev->nfc_dev, dev); @@ -1720,8 +1840,8 @@ static int pn533_probe(struct usb_interface *interface, free_nfc_dev: nfc_free_device(dev->nfc_dev); -kill_tasklet: - tasklet_kill(&dev->tasklet); +destroy_wq: + destroy_workqueue(dev->wq); error: kfree(dev->in_frame); usb_free_urb(dev->in_urb); @@ -1744,7 +1864,9 @@ static void pn533_disconnect(struct usb_interface *interface) usb_kill_urb(dev->in_urb); usb_kill_urb(dev->out_urb); - tasklet_kill(&dev->tasklet); + destroy_workqueue(dev->wq); + + skb_queue_purge(&dev->resp_q); kfree(dev->in_frame); usb_free_urb(dev->in_urb); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 483c0adcad8..2574abde8d9 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -45,6 +45,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) for (i=0; i<PHY_MAX_ADDR; i++) mdio->irq[i] = PHY_POLL; + mdio->dev.of_node = np; + /* Register the MDIO bus */ rc = mdiobus_register(mdio); if (rc) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4bf71028556..953ec3f0847 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2626,6 +2626,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062, + quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063, + quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060, + quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062, + quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073, + quirk_msi_intx_disable_bug); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083, + quirk_msi_intx_disable_bug); #endif /* CONFIG_PCI_MSI */ /* Allow manual resource allocation for PCI hotplug bridges diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index cd9bc3b129b..5648dad71fb 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -78,9 +78,13 @@ config PTP_1588_CLOCK_PCH depends on PCH_GBE help This driver adds support for using the PCH EG20T as a PTP - clock. This clock is only useful if your PTP programs are - getting hardware time stamps on the PTP Ethernet packets - using the SO_TIMESTAMPING API. + clock. The hardware supports time stamping of PTP packets + when using the end-to-end delay (E2E) mechansim. The peer + delay mechansim (P2P) is not supported. + + This clock is only useful if your PTP programs are getting + hardware time stamps on the PTP Ethernet packets using the + SO_TIMESTAMPING API. To compile this driver as a module, choose M here: the module will be called ptp_pch. diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index f519a131238..1e528b539a0 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -304,6 +304,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) } EXPORT_SYMBOL(ptp_clock_event); +int ptp_clock_index(struct ptp_clock *ptp) +{ + return ptp->index; +} +EXPORT_SYMBOL(ptp_clock_index); + /* module operations */ static void __exit ptp_exit(void) diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c index 6f2782bb5f4..e03c40692b0 100644 --- a/drivers/ptp/ptp_ixp46x.c +++ b/drivers/ptp/ptp_ixp46x.c @@ -284,6 +284,7 @@ static void __exit ptp_ixp_exit(void) { free_irq(MASTER_IRQ, &ixp_clock); free_irq(SLAVE_IRQ, &ixp_clock); + ixp46x_phc_index = -1; ptp_clock_unregister(ixp_clock.ptp_clock); } @@ -302,6 +303,8 @@ static int __init ptp_ixp_init(void) if (IS_ERR(ixp_clock.ptp_clock)) return PTR_ERR(ixp_clock.ptp_clock); + ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock); + __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); __raw_writel(1, &ixp_clock.regs->trgt_lo); __raw_writel(0, &ixp_clock.regs->trgt_hi); diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index 6fff6802048..3a9c17eced1 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -262,6 +262,7 @@ u64 pch_rx_snap_read(struct pci_dev *pdev) ns = ((u64) hi) << 32; ns |= lo; + ns <<= TICKS_NS_SHIFT; return ns; } @@ -278,6 +279,7 @@ u64 pch_tx_snap_read(struct pci_dev *pdev) ns = ((u64) hi) << 32; ns |= lo; + ns <<= TICKS_NS_SHIFT; return ns; } @@ -307,7 +309,7 @@ static void pch_reset(struct pch_dev *chip) * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. */ -static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) +int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { s32 i; struct pch_dev *chip = pci_get_drvdata(pdev); @@ -351,6 +353,7 @@ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) } return 0; } +EXPORT_SYMBOL(pch_set_station_address); /* * Interrupt service routine @@ -650,8 +653,6 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) iowrite32(1, &chip->regs->trgt_lo); iowrite32(0, &chip->regs->trgt_hi); iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); - /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */ - iowrite32(0x80020000, &chip->regs->ch_control); pch_eth_enable_set(chip); diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 9b66d2d1809..dfda748c400 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -4,11 +4,10 @@ menu "S/390 network device drivers" config LCS def_tristate m prompt "Lan Channel Station Interface" - depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI) + depends on CCW && NETDEVICES && (ETHERNET || FDDI) help Select this option if you want to use LCS networking on IBM System z. - This device driver supports Token Ring (IEEE 802.5), - FDDI (IEEE 802.7) and Ethernet. + This device driver supports FDDI (IEEE 802.7) and Ethernet. To compile as a module, choose M. The module name is lcs. If you do not know what it is, it's safe to choose Y. diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 687efe4d589..6056cf6da03 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -30,7 +30,6 @@ #include <linux/if.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/trdevice.h> #include <linux/fddidevice.h> #include <linux/inetdevice.h> #include <linux/in.h> @@ -50,8 +49,7 @@ #include "lcs.h" -#if !defined(CONFIG_ETHERNET) && \ - !defined(CONFIG_TR) && !defined(CONFIG_FDDI) +#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) #error Cannot compile lcs.c without some net devices switched on. #endif @@ -1166,10 +1164,7 @@ static void lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) { LCS_DBF_TEXT(4,trace, "getmac"); - if (dev->type == ARPHRD_IEEE802_TR) - ip_tr_mc_map(ipm, mac); - else - ip_eth_mc_map(ipm, mac); + ip_eth_mc_map(ipm, mac); } /** @@ -1641,12 +1636,6 @@ lcs_startlan_auto(struct lcs_card *card) return 0; #endif -#ifdef CONFIG_TR - card->lan_type = LCS_FRAME_TYPE_TR; - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - return 0; -#endif #ifdef CONFIG_FDDI card->lan_type = LCS_FRAME_TYPE_FDDI; rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); @@ -2172,12 +2161,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) dev = alloc_etherdev(0); break; #endif -#ifdef CONFIG_TR - case LCS_FRAME_TYPE_TR: - card->lan_type_trans = tr_type_trans; - dev = alloc_trdev(0); - break; -#endif #ifdef CONFIG_FDDI case LCS_FRAME_TYPE_FDDI: card->lan_type_trans = fddi_type_trans; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index ec7921b5138..40285dc9ae5 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -13,8 +13,6 @@ #include <linux/if.h> #include <linux/if_arp.h> -#include <linux/if_tr.h> -#include <linux/trdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/ctype.h> @@ -676,8 +674,6 @@ struct qeth_card_options { struct qeth_ipa_info adp; /*Adapter parameters*/ struct qeth_routing_info route6; struct qeth_ipa_info ipa6; - int broadcast_mode; - int macaddr_mode; int fake_broadcast; int add_hhlen; int layer2; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8334dadc681..e000001539b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1329,8 +1329,6 @@ static void qeth_set_intial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; - card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; - card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; @@ -4911,11 +4909,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, break; case QETH_HEADER_TYPE_LAYER3: skb_len = (*hdr)->hdr.l3.length; - if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || - (card->info.link_type == QETH_LINK_TYPE_HSTR)) - headroom = TR_HLEN; - else - headroom = ETH_HLEN; + headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: skb_len = (*hdr)->hdr.osn.pdu_length; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index ff41e42004a..a11b30c3842 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -70,16 +70,6 @@ enum qeth_link_types { QETH_LINK_TYPE_ATM_NATIVE = 0x90, }; -enum qeth_tr_macaddr_modes { - QETH_TR_MACADDR_NONCANONICAL = 0, - QETH_TR_MACADDR_CANONICAL = 1, -}; - -enum qeth_tr_broadcast_modes { - QETH_TR_BROADCAST_ALLRINGS = 0, - QETH_TR_BROADCAST_LOCAL = 1, -}; - /* * Routing stuff */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index f8592160768..e7ad03209cb 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -976,57 +976,6 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) return ct | QETH_CAST_UNICAST; } -static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, - __u32 mode) -{ - int rc; - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "adpmode"); - - iob = qeth_get_adapter_cmd(card, command, - sizeof(struct qeth_ipacmd_setadpparms)); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setadapterparms.data.mode = mode; - rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb, - NULL); - return rc; -} - -static int qeth_l3_setadapter_hstr(struct qeth_card *card) -{ - int rc; - - QETH_CARD_TEXT(card, 4, "adphstr"); - - if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { - rc = qeth_l3_send_setadp_mode(card, - IPA_SETADP_SET_BROADCAST_MODE, - card->options.broadcast_mode); - if (rc) - QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " - "device %s: x%x\n", - CARD_BUS_ID(card), rc); - rc = qeth_l3_send_setadp_mode(card, - IPA_SETADP_ALTER_MAC_ADDRESS, - card->options.macaddr_mode); - if (rc) - QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); - return rc; - } - if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) - QETH_DBF_MESSAGE(2, "set adapter parameters not available " - "to set broadcast mode, using ALLRINGS " - "on device %s:\n", CARD_BUS_ID(card)); - if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) - QETH_DBF_MESSAGE(2, "set adapter parameters not available " - "to set macaddr mode, using NONCANONICAL " - "on device %s:\n", CARD_BUS_ID(card)); - return 0; -} - static int qeth_l3_setadapter_parms(struct qeth_card *card) { int rc; @@ -1052,10 +1001,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) " address failed\n"); } - if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) - rc = qeth_l3_setadapter_hstr(card); - return rc; } @@ -1671,10 +1616,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev) { - if (dev->type == ARPHRD_IEEE802_TR) - ip_tr_mc_map(ipm, mac); - else - ip_eth_mc_map(ipm, mac); + ip_eth_mc_map(ipm, mac); } static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) @@ -1922,8 +1864,6 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card, #endif case __constant_htons(ETH_P_IP): ip_hdr = (struct iphdr *)skb->data; - (card->dev->type == ARPHRD_IEEE802_TR) ? - ip_tr_mc_map(ip_hdr->daddr, tg_addr): ip_eth_mc_map(ip_hdr->daddr, tg_addr); break; default: @@ -1959,12 +1899,7 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card, tg_addr, "FAKELL", card->dev->addr_len); } -#ifdef CONFIG_TR - if (card->dev->type == ARPHRD_IEEE802_TR) - skb->protocol = tr_type_trans(skb, card->dev); - else -#endif - skb->protocol = eth_type_trans(skb, card->dev); + skb->protocol = eth_type_trans(skb, card->dev); if (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { @@ -2138,7 +2073,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev, struct net_device *netdev; rcu_read_lock(); - netdev = __vlan_find_dev_deep(dev, vid); + netdev = __vlan_find_dev_deep(card->dev, vid); rcu_read_unlock(); if (netdev == dev) { rc = QETH_VLAN_CARD; @@ -2883,13 +2818,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; memcpy(hdr->hdr.l3.dest_addr, pkey, 16); } else { - /* passthrough */ - if ((skb->dev->type == ARPHRD_IEEE802_TR) && - !memcmp(skb->data + sizeof(struct qeth_hdr) + - sizeof(__u16), skb->dev->broadcast, 6)) { - hdr->hdr.l3.flags = QETH_CAST_BROADCAST | - QETH_HDR_PASSTHRU; - } else if (!memcmp(skb->data + sizeof(struct qeth_hdr), + if (!memcmp(skb->data + sizeof(struct qeth_hdr), skb->dev->broadcast, 6)) { /* broadcast? */ hdr->hdr.l3.flags = QETH_CAST_BROADCAST | @@ -3031,10 +2960,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_pull(new_skb, ETH_HLEN); } else { if (ipv == 4) { - if (card->dev->type == ARPHRD_IEEE802_TR) - skb_pull(new_skb, TR_HLEN); - else - skb_pull(new_skb, ETH_HLEN); + skb_pull(new_skb, ETH_HLEN); } if (ipv != 4 && vlan_tx_tag_present(new_skb)) { @@ -3318,12 +3244,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || (card->info.link_type == QETH_LINK_TYPE_HSTR)) { -#ifdef CONFIG_TR - card->dev = alloc_trdev(0); -#endif - if (!card->dev) - return -ENODEV; - card->dev->netdev_ops = &qeth_l3_netdev_ops; + pr_info("qeth_l3: ignoring TR device\n"); + return -ENODEV; } else { card->dev = alloc_etherdev(0); if (!card->dev) @@ -3680,9 +3602,9 @@ static int qeth_l3_ip_event(struct notifier_block *this, return NOTIFY_DONE; card = qeth_l3_get_card_from_dev(dev); - QETH_CARD_TEXT(card, 3, "ipevent"); if (!card) return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ipevent"); addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (addr != NULL) { diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index d979bb26522..4cafedf950a 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -175,116 +175,6 @@ out: static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, qeth_l3_dev_fake_broadcast_store); -static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) - return sprintf(buf, "n/a\n"); - - return sprintf(buf, "%s\n", (card->options.broadcast_mode == - QETH_TR_BROADCAST_ALLRINGS)? - "all rings":"local"); -} - -static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - int rc = 0; - - if (!card) - return -EINVAL; - - mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { - rc = -EPERM; - goto out; - } - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { - rc = -EINVAL; - goto out; - } - - tmp = strsep((char **) &buf, "\n"); - - if (!strcmp(tmp, "local")) - card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL; - else if (!strcmp(tmp, "all_rings")) - card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; - else - rc = -EINVAL; -out: - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; -} - -static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show, - qeth_l3_dev_broadcast_mode_store); - -static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) - return sprintf(buf, "n/a\n"); - - return sprintf(buf, "%i\n", (card->options.macaddr_mode == - QETH_TR_MACADDR_CANONICAL)? 1:0); -} - -static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - int i, rc = 0; - - if (!card) - return -EINVAL; - - mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { - rc = -EPERM; - goto out; - } - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { - rc = -EINVAL; - goto out; - } - - i = simple_strtoul(buf, &tmp, 16); - if ((i == 0) || (i == 1)) - card->options.macaddr_mode = i? - QETH_TR_MACADDR_CANONICAL : - QETH_TR_MACADDR_NONCANONICAL; - else - rc = -EINVAL; -out: - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; -} - -static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show, - qeth_l3_dev_canonical_macaddr_store); - static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -458,8 +348,6 @@ static struct attribute *qeth_l3_device_attrs[] = { &dev_attr_route4.attr, &dev_attr_route6.attr, &dev_attr_fake_broadcast.attr, - &dev_attr_broadcast_mode.attr, - &dev_attr_canonical_macaddr.attr, &dev_attr_sniffer.attr, &dev_attr_hsuid.attr, NULL, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 453a740fa68..922086105b4 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -662,7 +662,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, /* setup Socket parameters */ sk = sock->sk; - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_allocation = GFP_ATOMIC; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 91a97b3e45c..5877b2c64e2 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -345,7 +345,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) } if (unlikely (skb->truesize != - sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { + sizeof(*skb) + skb_end_offset(skb))) { /* printk("TX buffer truesize has been changed\n"); */ diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c index 3af1b2c51b7..b9721c1055b 100644 --- a/drivers/staging/ramster/cluster/tcp.c +++ b/drivers/staging/ramster/cluster/tcp.c @@ -2106,7 +2106,7 @@ static int r2net_open_listening_sock(__be32 addr, __be16 port) r2net_listen_sock = sock; INIT_WORK(&r2net_listen_work, r2net_accept_many); - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); if (ret < 0) { printk(KERN_ERR "ramster: Error %d while binding socket at " diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 52343654f5d..d4f823f463e 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -637,7 +637,7 @@ static void rndis_open(struct gether *geth) DBG(cdev, "%s\n", __func__); - rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, bitrate(cdev->gadget) / 100); rndis_signal_connect(rndis->config); } @@ -648,7 +648,7 @@ static void rndis_close(struct gether *geth) DBG(geth->func.config->cdev, "%s\n", __func__); - rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); rndis_signal_disconnect(rndis->config); } @@ -765,7 +765,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) goto fail; rndis->config = status; - rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); rndis_set_host_mac(rndis->config, rndis->ethaddr); #if 0 diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h index b0e52fc277b..a19f72dec0c 100644 --- a/drivers/usb/gadget/ndis.h +++ b/drivers/usb/gadget/ndis.h @@ -15,11 +15,6 @@ #ifndef _LINUX_NDIS_H #define _LINUX_NDIS_H - -#define NDIS_STATUS_MULTICAST_FULL 0xC0010009 -#define NDIS_STATUS_MULTICAST_EXISTS 0xC001000A -#define NDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B - enum NDIS_DEVICE_POWER_STATE { NdisDeviceStateUnspecified = 0, NdisDeviceStateD0, @@ -35,11 +30,6 @@ struct NDIS_PM_WAKE_UP_CAPABILITIES { enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp; }; -/* NDIS_PNP_CAPABILITIES.Flags constants */ -#define NDIS_DEVICE_WAKE_UP_ENABLE 0x00000001 -#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002 -#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004 - struct NDIS_PNP_CAPABILITIES { __le32 Flags; struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities; @@ -54,158 +44,4 @@ struct NDIS_PM_PACKET_PATTERN { __le32 PatternFlags; }; - -/* Required Object IDs (OIDs) */ -#define OID_GEN_SUPPORTED_LIST 0x00010101 -#define OID_GEN_HARDWARE_STATUS 0x00010102 -#define OID_GEN_MEDIA_SUPPORTED 0x00010103 -#define OID_GEN_MEDIA_IN_USE 0x00010104 -#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 -#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 -#define OID_GEN_LINK_SPEED 0x00010107 -#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 -#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 -#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A -#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B -#define OID_GEN_VENDOR_ID 0x0001010C -#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D -#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E -#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F -#define OID_GEN_DRIVER_VERSION 0x00010110 -#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 -#define OID_GEN_PROTOCOL_OPTIONS 0x00010112 -#define OID_GEN_MAC_OPTIONS 0x00010113 -#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 -#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 -#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 -#define OID_GEN_SUPPORTED_GUIDS 0x00010117 -#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 -#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 -#define OID_GEN_MACHINE_NAME 0x0001021A -#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B -#define OID_GEN_VLAN_ID 0x0001021C - -/* Optional OIDs */ -#define OID_GEN_MEDIA_CAPABILITIES 0x00010201 -#define OID_GEN_PHYSICAL_MEDIUM 0x00010202 - -/* Required statistics OIDs */ -#define OID_GEN_XMIT_OK 0x00020101 -#define OID_GEN_RCV_OK 0x00020102 -#define OID_GEN_XMIT_ERROR 0x00020103 -#define OID_GEN_RCV_ERROR 0x00020104 -#define OID_GEN_RCV_NO_BUFFER 0x00020105 - -/* Optional statistics OIDs */ -#define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 -#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 -#define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 -#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 -#define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 -#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 -#define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 -#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 -#define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 -#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A -#define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B -#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C -#define OID_GEN_RCV_CRC_ERROR 0x0002020D -#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E -#define OID_GEN_GET_TIME_CAPS 0x0002020F -#define OID_GEN_GET_NETCARD_TIME 0x00020210 -#define OID_GEN_NETCARD_LOAD 0x00020211 -#define OID_GEN_DEVICE_PROFILE 0x00020212 -#define OID_GEN_INIT_TIME_MS 0x00020213 -#define OID_GEN_RESET_COUNTS 0x00020214 -#define OID_GEN_MEDIA_SENSE_COUNTS 0x00020215 -#define OID_GEN_FRIENDLY_NAME 0x00020216 -#define OID_GEN_MINIPORT_INFO 0x00020217 -#define OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218 - -/* IEEE 802.3 (Ethernet) OIDs */ -#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001 - -#define OID_802_3_PERMANENT_ADDRESS 0x01010101 -#define OID_802_3_CURRENT_ADDRESS 0x01010102 -#define OID_802_3_MULTICAST_LIST 0x01010103 -#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 -#define OID_802_3_MAC_OPTIONS 0x01010105 -#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 -#define OID_802_3_XMIT_ONE_COLLISION 0x01020102 -#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 -#define OID_802_3_XMIT_DEFERRED 0x01020201 -#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 -#define OID_802_3_RCV_OVERRUN 0x01020203 -#define OID_802_3_XMIT_UNDERRUN 0x01020204 -#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 -#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 -#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 - -/* OID_GEN_MINIPORT_INFO constants */ -#define NDIS_MINIPORT_BUS_MASTER 0x00000001 -#define NDIS_MINIPORT_WDM_DRIVER 0x00000002 -#define NDIS_MINIPORT_SG_LIST 0x00000004 -#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008 -#define NDIS_MINIPORT_INDICATES_PACKETS 0x00000010 -#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020 -#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040 -#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080 -#define NDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100 -#define NDIS_MINIPORT_IS_NDIS_5 0x00000200 -#define NDIS_MINIPORT_IS_CO 0x00000400 -#define NDIS_MINIPORT_DESERIALIZE 0x00000800 -#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000 -#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000 -#define NDIS_MINIPORT_NETBOOT_CARD 0x00004000 -#define NDIS_MINIPORT_PM_SUPPORTED 0x00008000 -#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000 -#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000 -#define NDIS_MINIPORT_HIDDEN 0x00040000 -#define NDIS_MINIPORT_SWENUM 0x00080000 -#define NDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000 -#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000 -#define NDIS_MINIPORT_HARDWARE_DEVICE 0x00400000 -#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000 -#define NDIS_MINIPORT_64BITS_DMA 0x01000000 - -#define NDIS_MEDIUM_802_3 0x00000000 -#define NDIS_MEDIUM_802_5 0x00000001 -#define NDIS_MEDIUM_FDDI 0x00000002 -#define NDIS_MEDIUM_WAN 0x00000003 -#define NDIS_MEDIUM_LOCAL_TALK 0x00000004 -#define NDIS_MEDIUM_DIX 0x00000005 -#define NDIS_MEDIUM_ARCENT_RAW 0x00000006 -#define NDIS_MEDIUM_ARCENT_878_2 0x00000007 -#define NDIS_MEDIUM_ATM 0x00000008 -#define NDIS_MEDIUM_WIRELESS_LAN 0x00000009 -#define NDIS_MEDIUM_IRDA 0x0000000A -#define NDIS_MEDIUM_BPC 0x0000000B -#define NDIS_MEDIUM_CO_WAN 0x0000000C -#define NDIS_MEDIUM_1394 0x0000000D - -#define NDIS_PACKET_TYPE_DIRECTED 0x00000001 -#define NDIS_PACKET_TYPE_MULTICAST 0x00000002 -#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 -#define NDIS_PACKET_TYPE_BROADCAST 0x00000008 -#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 -#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 -#define NDIS_PACKET_TYPE_SMT 0x00000040 -#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 -#define NDIS_PACKET_TYPE_GROUP 0x00000100 -#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200 -#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 -#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 - -#define NDIS_MEDIA_STATE_CONNECTED 0x00000000 -#define NDIS_MEDIA_STATE_DISCONNECTED 0x00000001 - -#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001 -#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002 -#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004 -#define NDIS_MAC_OPTION_NO_LOOPBACK 0x00000008 -#define NDIS_MAC_OPTION_FULL_DUPLEX 0x00000010 -#define NDIS_MAC_OPTION_EOTX_INDICATION 0x00000020 -#define NDIS_MAC_OPTION_8021P_PRIORITY 0x00000040 -#define NDIS_MAC_OPTION_RESERVED 0x80000000 - #endif /* _LINUX_NDIS_H */ diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 73a934a170d..b35babed6fc 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -73,65 +73,65 @@ static rndis_resp_t *rndis_add_response(int configNr, u32 length); static const u32 oid_supported_list[] = { /* the general stuff */ - OID_GEN_SUPPORTED_LIST, - OID_GEN_HARDWARE_STATUS, - OID_GEN_MEDIA_SUPPORTED, - OID_GEN_MEDIA_IN_USE, - OID_GEN_MAXIMUM_FRAME_SIZE, - OID_GEN_LINK_SPEED, - OID_GEN_TRANSMIT_BLOCK_SIZE, - OID_GEN_RECEIVE_BLOCK_SIZE, - OID_GEN_VENDOR_ID, - OID_GEN_VENDOR_DESCRIPTION, - OID_GEN_VENDOR_DRIVER_VERSION, - OID_GEN_CURRENT_PACKET_FILTER, - OID_GEN_MAXIMUM_TOTAL_SIZE, - OID_GEN_MEDIA_CONNECT_STATUS, - OID_GEN_PHYSICAL_MEDIUM, + RNDIS_OID_GEN_SUPPORTED_LIST, + RNDIS_OID_GEN_HARDWARE_STATUS, + RNDIS_OID_GEN_MEDIA_SUPPORTED, + RNDIS_OID_GEN_MEDIA_IN_USE, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + RNDIS_OID_GEN_LINK_SPEED, + RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE, + RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE, + RNDIS_OID_GEN_VENDOR_ID, + RNDIS_OID_GEN_VENDOR_DESCRIPTION, + RNDIS_OID_GEN_VENDOR_DRIVER_VERSION, + RNDIS_OID_GEN_CURRENT_PACKET_FILTER, + RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE, + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + RNDIS_OID_GEN_PHYSICAL_MEDIUM, /* the statistical stuff */ - OID_GEN_XMIT_OK, - OID_GEN_RCV_OK, - OID_GEN_XMIT_ERROR, - OID_GEN_RCV_ERROR, - OID_GEN_RCV_NO_BUFFER, + RNDIS_OID_GEN_XMIT_OK, + RNDIS_OID_GEN_RCV_OK, + RNDIS_OID_GEN_XMIT_ERROR, + RNDIS_OID_GEN_RCV_ERROR, + RNDIS_OID_GEN_RCV_NO_BUFFER, #ifdef RNDIS_OPTIONAL_STATS - OID_GEN_DIRECTED_BYTES_XMIT, - OID_GEN_DIRECTED_FRAMES_XMIT, - OID_GEN_MULTICAST_BYTES_XMIT, - OID_GEN_MULTICAST_FRAMES_XMIT, - OID_GEN_BROADCAST_BYTES_XMIT, - OID_GEN_BROADCAST_FRAMES_XMIT, - OID_GEN_DIRECTED_BYTES_RCV, - OID_GEN_DIRECTED_FRAMES_RCV, - OID_GEN_MULTICAST_BYTES_RCV, - OID_GEN_MULTICAST_FRAMES_RCV, - OID_GEN_BROADCAST_BYTES_RCV, - OID_GEN_BROADCAST_FRAMES_RCV, - OID_GEN_RCV_CRC_ERROR, - OID_GEN_TRANSMIT_QUEUE_LENGTH, + RNDIS_OID_GEN_DIRECTED_BYTES_XMIT, + RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT, + RNDIS_OID_GEN_MULTICAST_BYTES_XMIT, + RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT, + RNDIS_OID_GEN_BROADCAST_BYTES_XMIT, + RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT, + RNDIS_OID_GEN_DIRECTED_BYTES_RCV, + RNDIS_OID_GEN_DIRECTED_FRAMES_RCV, + RNDIS_OID_GEN_MULTICAST_BYTES_RCV, + RNDIS_OID_GEN_MULTICAST_FRAMES_RCV, + RNDIS_OID_GEN_BROADCAST_BYTES_RCV, + RNDIS_OID_GEN_BROADCAST_FRAMES_RCV, + RNDIS_OID_GEN_RCV_CRC_ERROR, + RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH, #endif /* RNDIS_OPTIONAL_STATS */ /* mandatory 802.3 */ /* the general stuff */ - OID_802_3_PERMANENT_ADDRESS, - OID_802_3_CURRENT_ADDRESS, - OID_802_3_MULTICAST_LIST, - OID_802_3_MAC_OPTIONS, - OID_802_3_MAXIMUM_LIST_SIZE, + RNDIS_OID_802_3_PERMANENT_ADDRESS, + RNDIS_OID_802_3_CURRENT_ADDRESS, + RNDIS_OID_802_3_MULTICAST_LIST, + RNDIS_OID_802_3_MAC_OPTIONS, + RNDIS_OID_802_3_MAXIMUM_LIST_SIZE, /* the statistical stuff */ - OID_802_3_RCV_ERROR_ALIGNMENT, - OID_802_3_XMIT_ONE_COLLISION, - OID_802_3_XMIT_MORE_COLLISIONS, + RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT, + RNDIS_OID_802_3_XMIT_ONE_COLLISION, + RNDIS_OID_802_3_XMIT_MORE_COLLISIONS, #ifdef RNDIS_OPTIONAL_STATS - OID_802_3_XMIT_DEFERRED, - OID_802_3_XMIT_MAX_COLLISIONS, - OID_802_3_RCV_OVERRUN, - OID_802_3_XMIT_UNDERRUN, - OID_802_3_XMIT_HEARTBEAT_FAILURE, - OID_802_3_XMIT_TIMES_CRS_LOST, - OID_802_3_XMIT_LATE_COLLISIONS, + RNDIS_OID_802_3_XMIT_DEFERRED, + RNDIS_OID_802_3_XMIT_MAX_COLLISIONS, + RNDIS_OID_802_3_RCV_OVERRUN, + RNDIS_OID_802_3_XMIT_UNDERRUN, + RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE, + RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST, + RNDIS_OID_802_3_XMIT_LATE_COLLISIONS, #endif /* RNDIS_OPTIONAL_STATS */ #ifdef RNDIS_PM @@ -200,8 +200,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, /* general oids (table 4-1) */ /* mandatory */ - case OID_GEN_SUPPORTED_LIST: - pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__); + case RNDIS_OID_GEN_SUPPORTED_LIST: + pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__); length = sizeof(oid_supported_list); count = length / sizeof(u32); for (i = 0; i < count; i++) @@ -210,8 +210,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_HARDWARE_STATUS: - pr_debug("%s: OID_GEN_HARDWARE_STATUS\n", __func__); + case RNDIS_OID_GEN_HARDWARE_STATUS: + pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__); /* Bogus question! * Hardware must be ready to receive high level protocols. * BTW: @@ -223,23 +223,23 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_MEDIA_SUPPORTED: - pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__); + case RNDIS_OID_GEN_MEDIA_SUPPORTED: + pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__); *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); retval = 0; break; /* mandatory */ - case OID_GEN_MEDIA_IN_USE: - pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__); + case RNDIS_OID_GEN_MEDIA_IN_USE: + pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__); /* one medium, one transport... (maybe you do it better) */ *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); retval = 0; break; /* mandatory */ - case OID_GEN_MAXIMUM_FRAME_SIZE: - pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); + case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE: + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); @@ -248,11 +248,11 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_LINK_SPEED: + case RNDIS_OID_GEN_LINK_SPEED: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__); if (rndis_per_dev_params[configNr].media_state - == NDIS_MEDIA_STATE_DISCONNECTED) + == RNDIS_MEDIA_STATE_DISCONNECTED) *outbuf = cpu_to_le32(0); else *outbuf = cpu_to_le32( @@ -261,8 +261,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_TRANSMIT_BLOCK_SIZE: - pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); + case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE: + pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); @@ -271,8 +271,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_RECEIVE_BLOCK_SIZE: - pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); + case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE: + pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); @@ -281,16 +281,16 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_VENDOR_ID: - pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__); + case RNDIS_OID_GEN_VENDOR_ID: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__); *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].vendorID); retval = 0; break; /* mandatory */ - case OID_GEN_VENDOR_DESCRIPTION: - pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); + case RNDIS_OID_GEN_VENDOR_DESCRIPTION: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__); if (rndis_per_dev_params[configNr].vendorDescr) { length = strlen(rndis_per_dev_params[configNr]. vendorDescr); @@ -303,38 +303,38 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, retval = 0; break; - case OID_GEN_VENDOR_DRIVER_VERSION: - pr_debug("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __func__); + case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__); /* Created as LE */ *outbuf = rndis_driver_version; retval = 0; break; /* mandatory */ - case OID_GEN_CURRENT_PACKET_FILTER: - pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__); + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__); *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter); retval = 0; break; /* mandatory */ - case OID_GEN_MAXIMUM_TOTAL_SIZE: - pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); + case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE: + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE); retval = 0; break; /* mandatory */ - case OID_GEN_MEDIA_CONNECT_STATUS: + case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); *outbuf = cpu_to_le32(rndis_per_dev_params[configNr] .media_state); retval = 0; break; - case OID_GEN_PHYSICAL_MEDIUM: - pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__); + case RNDIS_OID_GEN_PHYSICAL_MEDIUM: + pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__); *outbuf = cpu_to_le32(0); retval = 0; break; @@ -343,20 +343,20 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, * of MS-Windows expect OIDs that aren't specified there. Other * versions emit undefined RNDIS messages. DOCUMENT ALL THESE! */ - case OID_GEN_MAC_OPTIONS: /* from WinME */ - pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__); + case RNDIS_OID_GEN_MAC_OPTIONS: /* from WinME */ + pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__); *outbuf = cpu_to_le32( - NDIS_MAC_OPTION_RECEIVE_SERIALIZED - | NDIS_MAC_OPTION_FULL_DUPLEX); + RNDIS_MAC_OPTION_RECEIVE_SERIALIZED + | RNDIS_MAC_OPTION_FULL_DUPLEX); retval = 0; break; /* statistics OIDs (table 4-2) */ /* mandatory */ - case OID_GEN_XMIT_OK: + case RNDIS_OID_GEN_XMIT_OK: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_XMIT_OK\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->tx_packets - stats->tx_errors - stats->tx_dropped); @@ -365,9 +365,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_RCV_OK: + case RNDIS_OID_GEN_RCV_OK: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_RCV_OK\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->rx_packets - stats->rx_errors - stats->rx_dropped); @@ -376,9 +376,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_XMIT_ERROR: + case RNDIS_OID_GEN_XMIT_ERROR: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_XMIT_ERROR\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->tx_errors); retval = 0; @@ -386,9 +386,9 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_RCV_ERROR: + case RNDIS_OID_GEN_RCV_ERROR: if (rndis_debug > 1) - pr_debug("%s: OID_GEN_RCV_ERROR\n", __func__); + pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->rx_errors); retval = 0; @@ -396,8 +396,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_GEN_RCV_NO_BUFFER: - pr_debug("%s: OID_GEN_RCV_NO_BUFFER\n", __func__); + case RNDIS_OID_GEN_RCV_NO_BUFFER: + pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->rx_dropped); retval = 0; @@ -407,8 +407,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, /* ieee802.3 OIDs (table 4-3) */ /* mandatory */ - case OID_802_3_PERMANENT_ADDRESS: - pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__); + case RNDIS_OID_802_3_PERMANENT_ADDRESS: + pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__); if (rndis_per_dev_params[configNr].dev) { length = ETH_ALEN; memcpy(outbuf, @@ -419,8 +419,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_802_3_CURRENT_ADDRESS: - pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__); + case RNDIS_OID_802_3_CURRENT_ADDRESS: + pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__); if (rndis_per_dev_params[configNr].dev) { length = ETH_ALEN; memcpy(outbuf, @@ -431,23 +431,23 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_802_3_MULTICAST_LIST: - pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); + case RNDIS_OID_802_3_MULTICAST_LIST: + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); /* Multicast base address only */ *outbuf = cpu_to_le32(0xE0000000); retval = 0; break; /* mandatory */ - case OID_802_3_MAXIMUM_LIST_SIZE: - pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); + case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE: + pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); /* Multicast base address only */ *outbuf = cpu_to_le32(1); retval = 0; break; - case OID_802_3_MAC_OPTIONS: - pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__); + case RNDIS_OID_802_3_MAC_OPTIONS: + pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__); *outbuf = cpu_to_le32(0); retval = 0; break; @@ -455,8 +455,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, /* ieee802.3 statistics OIDs (table 4-4) */ /* mandatory */ - case OID_802_3_RCV_ERROR_ALIGNMENT: - pr_debug("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__); + case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT: + pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__); if (stats) { *outbuf = cpu_to_le32(stats->rx_frame_errors); retval = 0; @@ -464,15 +464,15 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, break; /* mandatory */ - case OID_802_3_XMIT_ONE_COLLISION: - pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__); + case RNDIS_OID_802_3_XMIT_ONE_COLLISION: + pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__); *outbuf = cpu_to_le32(0); retval = 0; break; /* mandatory */ - case OID_802_3_XMIT_MORE_COLLISIONS: - pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); + case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS: + pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); *outbuf = cpu_to_le32(0); retval = 0; break; @@ -516,7 +516,7 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len, params = &rndis_per_dev_params[configNr]; switch (OID) { - case OID_GEN_CURRENT_PACKET_FILTER: + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: /* these NDIS_PACKET_TYPE_* bitflags are shared with * cdc_filter; it's not RNDIS-specific @@ -525,7 +525,7 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len, * MULTICAST, ALL_MULTICAST, BROADCAST */ *params->filter = (u16)get_unaligned_le32(buf); - pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n", __func__, *params->filter); /* this call has a significant side effect: it's @@ -545,9 +545,9 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len, } break; - case OID_802_3_MULTICAST_LIST: + case RNDIS_OID_802_3_MULTICAST_LIST: /* I think we can ignore this */ - pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__); + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); retval = 0; break; @@ -577,7 +577,7 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf) return -ENOMEM; resp = (rndis_init_cmplt_type *)r->buf; - resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT); + resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C); resp->MessageLength = cpu_to_le32(52); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); @@ -621,7 +621,7 @@ static int rndis_query_response(int configNr, rndis_query_msg_type *buf) return -ENOMEM; resp = (rndis_query_cmplt_type *)r->buf; - resp->MessageType = cpu_to_le32(REMOTE_NDIS_QUERY_CMPLT); + resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID), @@ -668,7 +668,7 @@ static int rndis_set_response(int configNr, rndis_set_msg_type *buf) pr_debug("\n"); #endif - resp->MessageType = cpu_to_le32(REMOTE_NDIS_SET_CMPLT); + resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C); resp->MessageLength = cpu_to_le32(16); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID), @@ -692,7 +692,7 @@ static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf) return -ENOMEM; resp = (rndis_reset_cmplt_type *)r->buf; - resp->MessageType = cpu_to_le32(REMOTE_NDIS_RESET_CMPLT); + resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C); resp->MessageLength = cpu_to_le32(16); resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); /* resent information */ @@ -716,8 +716,7 @@ static int rndis_keepalive_response(int configNr, return -ENOMEM; resp = (rndis_keepalive_cmplt_type *)r->buf; - resp->MessageType = cpu_to_le32( - REMOTE_NDIS_KEEPALIVE_CMPLT); + resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C); resp->MessageLength = cpu_to_le32(16); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); @@ -745,7 +744,7 @@ static int rndis_indicate_status_msg(int configNr, u32 status) return -ENOMEM; resp = (rndis_indicate_status_msg_type *)r->buf; - resp->MessageType = cpu_to_le32(REMOTE_NDIS_INDICATE_STATUS_MSG); + resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE); resp->MessageLength = cpu_to_le32(20); resp->Status = cpu_to_le32(status); resp->StatusBufferLength = cpu_to_le32(0); @@ -758,7 +757,7 @@ static int rndis_indicate_status_msg(int configNr, u32 status) int rndis_signal_connect(int configNr) { rndis_per_dev_params[configNr].media_state - = NDIS_MEDIA_STATE_CONNECTED; + = RNDIS_MEDIA_STATE_CONNECTED; return rndis_indicate_status_msg(configNr, RNDIS_STATUS_MEDIA_CONNECT); } @@ -766,7 +765,7 @@ int rndis_signal_connect(int configNr) int rndis_signal_disconnect(int configNr) { rndis_per_dev_params[configNr].media_state - = NDIS_MEDIA_STATE_DISCONNECTED; + = RNDIS_MEDIA_STATE_DISCONNECTED; return rndis_indicate_status_msg(configNr, RNDIS_STATUS_MEDIA_DISCONNECT); } @@ -817,15 +816,15 @@ int rndis_msg_parser(u8 configNr, u8 *buf) /* For USB: responses may take up to 10 seconds */ switch (MsgType) { - case REMOTE_NDIS_INITIALIZE_MSG: - pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n", + case RNDIS_MSG_INIT: + pr_debug("%s: RNDIS_MSG_INIT\n", __func__); params->state = RNDIS_INITIALIZED; return rndis_init_response(configNr, (rndis_init_msg_type *)buf); - case REMOTE_NDIS_HALT_MSG: - pr_debug("%s: REMOTE_NDIS_HALT_MSG\n", + case RNDIS_MSG_HALT: + pr_debug("%s: RNDIS_MSG_HALT\n", __func__); params->state = RNDIS_UNINITIALIZED; if (params->dev) { @@ -834,24 +833,24 @@ int rndis_msg_parser(u8 configNr, u8 *buf) } return 0; - case REMOTE_NDIS_QUERY_MSG: + case RNDIS_MSG_QUERY: return rndis_query_response(configNr, (rndis_query_msg_type *)buf); - case REMOTE_NDIS_SET_MSG: + case RNDIS_MSG_SET: return rndis_set_response(configNr, (rndis_set_msg_type *)buf); - case REMOTE_NDIS_RESET_MSG: - pr_debug("%s: REMOTE_NDIS_RESET_MSG\n", + case RNDIS_MSG_RESET: + pr_debug("%s: RNDIS_MSG_RESET\n", __func__); return rndis_reset_response(configNr, (rndis_reset_msg_type *)buf); - case REMOTE_NDIS_KEEPALIVE_MSG: + case RNDIS_MSG_KEEPALIVE: /* For USB: host does this every 5 seconds */ if (rndis_debug > 1) - pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n", + pr_debug("%s: RNDIS_MSG_KEEPALIVE\n", __func__); return rndis_keepalive_response(configNr, (rndis_keepalive_msg_type *) @@ -963,7 +962,7 @@ void rndis_add_hdr(struct sk_buff *skb) return; header = (void *)skb_push(skb, sizeof(*header)); memset(header, 0, sizeof *header); - header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG); + header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET); header->MessageLength = cpu_to_le32(skb->len); header->DataOffset = cpu_to_le32(36); header->DataLength = cpu_to_le32(skb->len - sizeof(*header)); @@ -1031,7 +1030,7 @@ int rndis_rm_hdr(struct gether *port, __le32 *tmp = (void *)skb->data; /* MessageType, MessageLength */ - if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG) + if (cpu_to_le32(RNDIS_MSG_PACKET) != get_unaligned(tmp++)) { dev_kfree_skb_any(skb); return -EINVAL; @@ -1173,7 +1172,7 @@ int rndis_init(void) rndis_per_dev_params[i].used = 0; rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED; rndis_per_dev_params[i].media_state - = NDIS_MEDIA_STATE_DISCONNECTED; + = RNDIS_MEDIA_STATE_DISCONNECTED; INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); } diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h index 907c3300811..0647f2f34e8 100644 --- a/drivers/usb/gadget/rndis.h +++ b/drivers/usb/gadget/rndis.h @@ -15,58 +15,12 @@ #ifndef _LINUX_RNDIS_H #define _LINUX_RNDIS_H +#include <linux/rndis.h> #include "ndis.h" #define RNDIS_MAXIMUM_FRAME_SIZE 1518 #define RNDIS_MAX_TOTAL_SIZE 1558 -/* Remote NDIS Versions */ -#define RNDIS_MAJOR_VERSION 1 -#define RNDIS_MINOR_VERSION 0 - -/* Status Values */ -#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */ -#define RNDIS_STATUS_FAILURE 0xC0000001U /* Unspecified error */ -#define RNDIS_STATUS_INVALID_DATA 0xC0010015U /* Invalid data */ -#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BBU /* Unsupported request */ -#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000BU /* Device connected */ -#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000CU /* Device disconnected */ -/* For all not specified status messages: - * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx - */ - -/* Message Set for Connectionless (802.3) Devices */ -#define REMOTE_NDIS_PACKET_MSG 0x00000001U -#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002U /* Initialize device */ -#define REMOTE_NDIS_HALT_MSG 0x00000003U -#define REMOTE_NDIS_QUERY_MSG 0x00000004U -#define REMOTE_NDIS_SET_MSG 0x00000005U -#define REMOTE_NDIS_RESET_MSG 0x00000006U -#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007U -#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008U - -/* Message completion */ -#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002U -#define REMOTE_NDIS_QUERY_CMPLT 0x80000004U -#define REMOTE_NDIS_SET_CMPLT 0x80000005U -#define REMOTE_NDIS_RESET_CMPLT 0x80000006U -#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008U - -/* Device Flags */ -#define RNDIS_DF_CONNECTIONLESS 0x00000001U -#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U - -#define RNDIS_MEDIUM_802_3 0x00000000U - -/* from drivers/net/sk98lin/h/skgepnmi.h */ -#define OID_PNP_CAPABILITIES 0xFD010100 -#define OID_PNP_SET_POWER 0xFD010101 -#define OID_PNP_QUERY_POWER 0xFD010102 -#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 -#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 -#define OID_PNP_ENABLE_WAKE_UP 0xFD010106 - - typedef struct rndis_init_msg_type { __le32 MessageType; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5c170100de9..f82a7394756 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -167,7 +167,7 @@ static void handle_tx(struct vhost_net *net) if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); hdr_size = vq->vhost_hlen; - zcopy = vhost_sock_zcopy(sock); + zcopy = vq->ubufs; for (;;) { /* Release DMAs done buffers first */ @@ -258,7 +258,8 @@ static void handle_tx(struct vhost_net *net) UIO_MAXIOV; } vhost_discard_vq_desc(vq, 1); - tx_poll_start(net, sock); + if (err == -EAGAIN || err == -ENOBUFS) + tx_poll_start(net, sock); break; } if (err != len) @@ -266,6 +267,8 @@ static void handle_tx(struct vhost_net *net) " len %d != %zd\n", err, len); if (!zcopy) vhost_add_used_and_signal(&net->dev, vq, head, 0); + else + vhost_zerocopy_signal_used(vq); total_len += len; if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 51e4c1eeec4..94dbd25caa3 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1603,6 +1603,7 @@ void vhost_zerocopy_callback(struct ubuf_info *ubuf) struct vhost_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; + vhost_poll_queue(&vq->poll); /* set len = 1 to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; kref_put(&ubufs->kref, vhost_zerocopy_done_signal); diff --git a/firmware/3com/3C359.bin.ihex b/firmware/3com/3C359.bin.ihex deleted file mode 100644 index 781bac3b2a0..00000000000 --- a/firmware/3com/3C359.bin.ihex +++ /dev/null @@ -1,1573 +0,0 @@ -:10000000FE3A0000000000000000000000000000B8 -:1000100000000000000000000000000000000000E0 -:1000200000000000000000000000000000000000D0 -:1000300000000000000000000000000000000000C0 -:1000400000000000000030332F30322F39392031CA -:10005000373A3133000000000000000000000000CB -:1000600030313233343536373839414243444546EE -:10007000000007FF0200FE9F0600007C48000070A1 -:100080008200FFFF8600FFFF8800FFFF9A00FFFF4E -:10009000FFFF1100C000FFFFFFFF11223344556630 -:1000A00033434F4D20424142451140C000FFFFFF06 -:1000B000FF1122334455665374617274206F6620B9 -:1000C0004C4C43206672616D652E2020546F746124 -:1000D0006C20646174612073697A6520697320788B -:1000E000787820202042414245E8D201833EF7340F -:1000F000007521E84100833EF734007517E882005F -:10010000833EF73400750DE8BF00833EF734007579 -:1001100003E84102C31EB800F08ED833F6B9008060 -:1001200033DBAD03D8E2FB1FB8000083FB00740390 -:10013000B82200A3F734C3FABA5600B0FFEE33C0BA -:100140008EC033F6B9FF7F833EFF340074088D3EC6 -:100150003061D1EF2BCF268B1C26C704FFFF2683EF -:100160003CFF751726C704000026833C00750C264B -:10017000891C4646E2E0B80000EB03B82400A3F770 -:1001800034C3FAB4D79E733A753879367B349FB14D -:1001900005D2EC732DB040D0E071277925D0E07303 -:1001A000217B1F32C0751B32E49E721674147812C4 -:1001B0007A109FD2EC720BD0E470077505B800007E -:1001C000EB03B82600A3F734C3FABA5A0033C0EFE2 -:1001D000EFEFEFB000E656B000E654BA5200B801B7 -:1001E00001EFE8CA003C01757FE88300BA5200B80D -:1001F0000202EFE8B9003C02756EE87A00BA5200DC -:10020000B80404EFE8A8003C04755DE87100BA5238 -:1002100000B80808EFE897003C08754CE86800BA99 -:100220005200B81010EFE886003C10753BE85F0004 -:10023000BA5200B82020EFE875003C20752AE85635 -:1002400000BA5200B84040EFE864003C407519E83D -:100250004D00BA5200B88080EFE853003C8075082A -:10026000E84400B80000EB03B82800A3F734C3BA91 -:100270005A00B80080EFC3BA5A00B80180EFC3BA81 -:100280005A00B80280EFC3BA5A00B80380EFC3BA6D -:100290005A00B80480EFC3BA5A00B80580EFC3BA59 -:1002A0005A00B80680EFC3BA5A00B80780EFC3B946 -:1002B000FFFFE458E4543C0075034975F7C3FA3274 -:1002C000C0E656E4563C007403E98200B0FFE656EF -:1002D000E4563CFF7578BA5200B8FFFFEFED3CFFE3 -:1002E000756CB800FFEFED3C007563B0FFE654E4B9 -:1002F000543CFF755932C0E654E4543C00754FB08D -:100300000FE650E450240F3C0F7543B000E650E474 -:1003100050240F3C0075378CC88EC0BE7000268BF1 -:1003200014268B5C02B80000EFED23C33D0000757E -:100330001DB8FFFF23C3EF8BC8ED23C33BC1750E70 -:1003400083C60426833CFF75D5B80000EB03B82AAA -:1003500000A3F734C3FA33C0BF0020B91700F3ABD2 -:10036000BF0030B91700F3ABBF0022B94000F3ABB8 -:10037000BF0032B94000F3ABFC1E8CC88ED833C02E -:100380008EC0BE9200BF0020B91700F3A4BEA90022 -:10039000BF0022B94000F3A41FC706FB346400BAB3 -:1003A0000800B80F00EFE88201E89B01720DC70654 -:1003B000F7342C00C706F9340400C3BA0A0033C06E -:1003C000EFE89801E8B501B81700BA9C00EFB80053 -:1003D00010BA9A00EFB81700A90100740140BA8C56 -:1003E00000EFB80018BA8600EFB80C00BA8200EF30 -:1003F000BA0200ED25F9FF0D0200EFBA060033C086 -:10040000EFBA0400B86000EFBA0000B81800EFBA05 -:100410008000B9FFFFEDA901007504E2F8EB3EBAD8 -:100420000A00EDA900407435A90020743033C0EFF4 -:1004300051B9C800E2FE591E061F268B0E023083FA -:10044000F91775184949BE0220BF0630F3A61F23CD -:10045000C9750AFF0EFB347412E94DFF1FB82C005A -:10046000BB0000A3F734891EF934C3C706FB34640C -:1004700000E8D300720DC706F7342C00C706F93424 -:100480000400C3E8D600E8F300B80300BA8200EF26 -:10049000B84080BA9800EFB80011BA9600EFB840A3 -:1004A00000A90100740140BA9200EFB80019BA8E99 -:1004B00000EFBA0200ED25F9FF0D0600EFBA0600C5 -:1004C00033C0EFBA0000B81800EFBA8000B9FFFFE0 -:1004D000EDA920007504E2F8EB43BA0A00EDA9008B -:1004E00040743AA90020743533C0EF51B9C800E216 -:1004F000FE591E061F268B0E023283F940751D49D8 -:1005000049BE0222BF0632F3A61F23C9750FFF0E94 -:10051000FB347403E95AFFB80000EB0B1FB82C0042 -:10052000BB0200891EF934A3F734C3BA0200B80035 -:100530009CEFBA0000B80084EF33C0EFBA0A00EFB6 -:10054000BA0E0033C0EFC3BA0A00B9FFFFED2500B1 -:10055000603D00607404E2F5F8C3F9C3B000E656EC -:10056000B800FFBA5200EFB9FFFFBA5800ED25EF0F -:10057000007408BA5A0033C0EFE2EFC3BA8000ED4E -:10058000BA8400EFBA8000EDC30000000000000054 -:10059000C606EC341533C08ED88EC01E8CC8BE4043 -:1005A00054BF60FE8ED8B91000F3A41FC706803672 -:1005B0001035C7068C3630358D063835A33035A357 -:1005C0003235053301A33435C70636355001C70629 -:1005D000843680FEC7068836C0FEC606C2FEFFC649 -:1005E00006933680C606923600C60680FE80C70691 -:1005F00082FE5450C70684FE2B4DE5CEA90200753D -:1006000008C60681FE23E90500C60681FE22A1F781 -:1006100034A386FEB8483486E0A388FE8D064E34A7 -:1006200086E0A38AFEB8583486E0A38CFEB89C34DA -:1006300086E0A38EFE8D06200386E0A390FE33C0E5 -:10064000BA7200EF33C0BA7400EFBA7600EFB88028 -:10065000FE86E0BA7200EFE8BF07BA0C01B840406E -:10066000EFEDBA6A00B80300C1E0080D0300EFB96E -:100670000A00E89400BA6A00B80300C1E008EFA1DC -:100680003234A3A233C706A63304008D06A033C1BB -:10069000E804CD39C7069036FFFFE9E300630D6635 -:1006A0000D660D8A0DE60E75122E0F030F500F60AA -:1006B0000D600D600DED0FE912600D600D600D60B5 -:1006C0000D600D2210600D600D600D600DFE10605C -:1006D0000D600D600D600D600D600DAF0F321037B5 -:1006E0000D600D600D600D600D600D600D600D60A2 -:1006F0000D600D600D600D600D600D600D600D6092 -:100700000D640E000F9509600A49BBFFFFBA6A002D -:10071000EDA900207438803E80FE127531E84A0051 -:10072000A13234A3A233C706A63304008D06A0333A -:10073000C1E804CD39E82200C706F3344600C706F5 -:10074000F534FFFFC7069036FFFF58E932004B83B0 -:10075000FB0075B983F90075B0C352BA6A00B803DB -:1007600000C1E0080D0300EF5AC352BA6A00B80393 -:1007700000C1E008EF5AC3000000000000000000C4 -:10078000688007A19036CD358B3624022EFFA43524 -:100790000AFA8A2694368826E834C606943600FB80 -:1007A00022E47501C3F6C420747DF6C40874058084 -:1007B0000E9236048026E834D7C41E8436268B3742 -:1007C00081E6FF0083FE207605B001E9280053068C -:1007D000D1E62EFF949D06075B268847023CFF74F6 -:1007E000073CFE7511E93B00F6069236087534F6B3 -:1007F00006923604742D80269236F3803E9536009C -:10080000752126803F057513C60695360026807F24 -:1008100006007407268B4704A29536BA0C01B8402F -:1008200040EFED8A26E834F6C4107503E95B00F664 -:10083000C4047405800E9236018026E834EBC43E71 -:100840008836268B3583E67F83FE12720826C645DE -:100850000201E9240083C620D1E62EFF949D06C440 -:100860003E8836268845023CFF750EF60692360114 -:100870007414F606923602750D80269236FCBA0C78 -:1008800001B82020EFED8A26E834F6C408742280EF -:1008900026E834F7800E923604F606923608741174 -:1008A00080269236F3BA0C01B84040EFED8A26E874 -:1008B00034F6C40474228026E834FB800E9236019C -:1008C000F606923602751180269236FEBA0C01B8F1 -:1008D0002020EFED8A26E834F6C40174678026E80C -:1008E00034FE803EE8FF007439803EE8FF04743235 -:1008F000803EE8FF017521E580A90007740ABA9ED1 -:1009000000B80002EFE9EFFFC606E8FF03BA0C01EA -:10091000B80808EFEDE92800803EE8FF037406E917 -:100920001E00E90000BA1001B80202EFEDE5000D6B -:100930001800E700E5820D0200E782C606E8FF0422 -:100940008A26E834F6C402740D8026E834FD802639 -:100950009236BFE84F0BFAA0E83408069436C60674 -:10096000E83400FBC3E8E70FC41E84362EFF1601EF -:100970000726884702E97EFEE82D10C41E84362E25 -:10098000FF16030726884702E96BFE8E0626022E15 -:10099000FF160707C3C3833EF53400740FFF0EF341 -:1009A000347509E8C4FDC706F5340000F606933631 -:1009B000207430A1C2343B06E934A3E934742480A6 -:1009C0003E953600751DF706E63420007412A92006 -:1009D00000740D8326C234DF8326E934DFE9030087 -:1009E000E8DD09BA0601ED8BD081E200C0C1EA0E54 -:1009F00003167434C1E002110672347304FF0674E6 -:100A000034BA0201ED8BD081E200C0C1EA0E0316B8 -:100A10007034C1E00211066E347304FF067034C7EF -:100A200006A6330400C706AA3300008D06A033C112 -:100A3000E804CD39C39509950965097809950995A3 -:100A4000099107950996098B0995099509950995C5 -:100A500009950995098BC08BC08BC08BC08BC0904A -:100A6000F6069336207503E9CC008CC0408EC02674 -:100A70008B0E060086E926890E06008CC2C1E204B0 -:100A8000BE0E0026A10400D0E024C08AE0C0EC0421 -:100A90000AC426A2050026A10800A900C07403E923 -:100AA0009E0026F6061000807503E90A0026A016AF -:100AB00000241F32E403F0803EEC3406725C803E7A -:100AC00095360075668BFA33DB8EC326891D268822 -:100AD0005D045150C41E8C36B90F0033C0E82109A3 -:100AE00058590BDB7434FE0EE63A26C6078126C63B -:100AF00047010026C64702FF26C747040000268993 -:100B00004F0A86F2268957062689770826C647099E -:100B10000026C6470C02E88C09C3FF06EC338CC0E4 -:100B2000488EC0FAE89710FBE9EBFF8CC0488EC0F6 -:100B3000FAE88A10FBC38CC08EC0FAE88010FBC3B1 -:100B4000803E9536007503E9C200BF080026F60610 -:100B5000100080750503FEE90C0026A01600241F76 -:100B600032E403F003FEA095363C007503E99C00D7 -:100B70003C01740B3C0274143C03741DE98D00C6E7 -:100B800006963601E83C017227E98000C6069636D3 -:100B900002E88300721AE97300C606963601E8225D -:100BA00001720DC606963602E86C007203E95C001D -:100BB000530650C41E8C36B90B0033C0E8420858A7 -:100BC00026C6078226C64702FF8D06E0FE86C4269B -:100BD000894706A0963626884708E8C808075B8339 -:100BE00026AD36FEA1AD36E704BA1001B88080EF1D -:100BF000EDBA1001B80202EFED52BAE000B84110B0 -:100C0000EF5AB89C03CD39C6069536008CC0488E85 -:100C1000C0FAE8A90FFBC31E061F0633C08EC08BA7 -:100C2000F08D3E20F351B10A26837D0C01752A57C1 -:100C300026837D0E007406E82F00E90300E86607AE -:100C40005F731633C08ED8268B4D128D75208D3E66 -:100C5000E0FEF3A459071FF9C3FEC9740781C7203A -:100C600001E9C4FF59071FF8C35150535652573377 -:100C7000DB268A5D0E268B4D128D7D205A87D72666 -:100C80008A451487D74232FF80FF087508FECB22C1 -:100C9000DB75EA33DB23DB7406FEC7D0C8730C5068 -:100CA000268A053804587403E90A0049464723C9CF -:100CB000740AE9D3FF5A5E5B5859F8C35A5E5B5811 -:100CC00059F9C31E061F0633C08EC086CD2BCE8BAE -:100CD000F78BC133C9803CFF741680F90673093263 -:100CE000C94648742EE9EDFF3D6000730CE923000E -:100CF000FEC14648741DE9DCFFB810008D3E183473 -:100D000032EDB106F3A67403E908004823C0740766 -:100D1000E9E9FF071FF8C38D36183433C08ED88D2C -:100D20003EE0FEB81000B9060056F3A45E483D0050 -:100D30000075F3071FF9C3FF06E433C606EB340062 -:100D4000268B450686E0C1E80448068EC0FE06E60E -:100D50003AFAE8690EFB07B0FFC30000000000008C -:100D6000B001C3B000C3F6069336207503B004C3C8 -:100D70008B0E973681E18030268B4704257FCF0B81 -:100D8000C1A39736A3E634B000C3F60693362074A9 -:100D900003B003C3268B4708A39736A3E634268AFD -:100DA0004720A2FD343C017506C706A13600002687 -:100DB0008A4721A2FE34268B470AA31834A358344D -:100DC000268B470CA31A34A35A34268B470EA31C38 -:100DD00034A35C34C6062A34C0268B4714257FFF13 -:100DE00009062C34268B471625FFFE25FFFC090635 -:100DF0002E34C6060034C0268B4710A30234268B3F -:100E00004712A304340653E8840A5B073D000075CB -:100E100007800E923608B0FEC3B90001A1AC33338F -:100E2000D2F7F9A3AE33914933D2F7E905003BA3DA -:100E30004634BF003B893E4434BA6800B8E0E0EF76 -:100E4000A1AE33E762A1AE33BA0801EFA14434E7A3 -:100E500064A14434BA0A01EFB800012D04000D006A -:100E600010E792C33D0000740A26894707E8833AD9 -:100E7000B007C3A1AE332689472BA1443426894746 -:100E80002DA146342689472F800E933620A188361F -:100E900086E026894708A1843686E02689470AA18C -:100EA000803686E02689470CB860FE86E0268947B2 -:100EB0000EA0A136268847108B36883626C64402F7 -:100EC000FFE59EA90008740CBA8400ED0D0800EF40 -:100ED000BA8E00EFE50225F9FFE702BA1001B80269 -:100EE00002EFEDB000C3F6069336207503B001C3E0 -:100EF000802693369FE88D0A800E923608B0FEC396 -:100F0000B000C3F6069336207503B004C3C6062AA4 -:100F100034C0268B4706257FFFA32C34268B470839 -:100F200025FFFE25FFFCA32E34CD52B000C3F606EC -:100F30009336207503B004C3C6060034C0268B4721 -:100F400006A30234268B4708A30434CD52B000C355 -:100F5000F6069336207503B004C3578D7F0651B94A -:100F6000070033C0F3AB598D7F06A17A34030639ED -:100F700037268805A1953726884502A180340306C7 -:100F8000763426884507A1C63426884509A1D8337A -:100F90002688450A33C0A37A34A33937A39537A3EB -:100FA0008034A37634A3C634A3D8335FB000C3F62D -:100FB000069336207503B004C3268B4F0483F906CD -:100FC000741283F904740D83F900740883F90274B0 -:100FD00003B001C3890EE83A8326AB36F9090EAB9C -:100FE00036E50225F9FF0BC1E702B000C3F6069310 -:100FF00036207503B004C3268B4F0480F9FF7408B4 -:1010000080F9007410B001C3830EAD3602A1AD3675 -:10101000E704E90A008326AD36FDA1AD36E704B04A -:1010200000C3F6069336207503B004C3E8D504B0B8 -:1010300000C3F6069336807503B001C326837F068E -:10104000057503E99D00268B5704268B47082681EA -:101050007F0600807508ED2689470AE99D002683F2 -:101060007F06017504EFE9920026817F06018075F5 -:1010700009EFED2689470AE9810026837F0602757C -:101080000726214704E9730026817F060280750C3C -:1010900026214704ED2689470AE95F0026837F065B -:1010A00003750726094704E9510026817F0603805E -:1010B000750C26094704ED2689470AE93D00268379 -:1010C0007F0604750726314704E92F0026817F0635 -:1010D0000480750C26314704ED2689470AE91B0078 -:1010E000B001C3FA53268B4F080BC9740C8D1EE058 -:1010F000FEE852FF83C308E2F85BFBB000C3F606CC -:10110000933680750AF6069336207503B001C38DB9 -:101110003EE0FEE500268905E50226894502A1ADEF -:101120003626894504E50626894506E508268945CB -:1011300008E50A2689450AE50E2689450CE5482674 -:1011400089450EE54A26894510E54C26894512A1B8 -:10115000B73626894514E55026894516E552268975 -:101160004518E5542689451AE5562689451CE55853 -:101170002689451EE56226894520E56426894522A3 -:10118000E56626894524E56826894526E56A268997 -:101190004528E56C2689452AE5702689452CE572A7 -:1011A0002689452EE57426894530E576268945321F -:1011B000E57C26894534E57E26894536E580268905 -:1011C0004538E5822689453AE5862689453CE58805 -:1011D0002689453EE59A26894540E59E2689454271 -:1011E000E5CC26894544E5CE26894546E5D02689C5 -:1011F0004548E5D22689454ABA0001ED1106663414 -:101200007304FF0668342689454CBA0201EDC1E03B -:101210000211066E347304FF0670342689454EBAF7 -:101220000401ED11066A347304FF066C3426894507 -:1012300050BA0601EDC1E002110672347304FF06D4 -:10124000743426894552BA0801ED26894554BA0AF4 -:1012500001ED26894556BA0C01ED26894558BA0E8E -:1012600001ED01067A342689455EBA1001ED268922 -:10127000455CB000C3F6069336807407F6069336D5 -:10128000207503B001C326807F06007530803E952F -:1012900036007452C6069536008326AD36FEA1ADE3 -:1012A00036E704BA1001B88080EFEDBA1001B80239 -:1012B00002EFEDBAE000B80010EFB000C3268B4794 -:1012C000043D000074203D0300771BBA1001B802F2 -:1012D00000EFBAE000B80110EF830EAD3601A1AD0A -:1012E00036E704B000C3B006C3F606933680750334 -:1012F000B001C326837F0401740A26837F0402742D -:1013000019B006C326837F060C77F626837F0A6012 -:1013100077EFE81000720BB046C3E84E007203B0DE -:1013200046C3B000C351B10A8B3E20F326837D0C27 -:10133000027503E90E00FEC9740781C72001E9EBBD -:10134000FF59F8C3578D7D0E8D7706B91200F3A4AF -:101350008D7D208D36E0FE268B4D12F3A4FF060115 -:10136000355F26C7450C010059F9C351B10A8D3EBE -:1013700020F38D36E0FE26837D0C01751B57E82592 -:10138000005F731433C0B92001F3AA26C7450C02CD -:1013900000FF0E013559F9C3FEC9740781C720014A -:1013A000E9D3FF59F8C351268B4D128D7D20F3A64A -:1013B000740359F8C359F9C300000000000000008D -:1013C000803EEC34067233FF06F03350C41E8C3678 -:1013D000B90F0033C0E82900588126C234DF7F816D -:1013E00026E934DF7F0BDB741126C6078426C64747 -:1013F00002FF26894706E8AC00C3FF06EA33E9F599 -:10140000FF57268B3F03F9263B7F027416263B7F4E -:10141000047C2A3D000075138D7F0803F9263B7F6D -:10142000027C14FF06DE3333DB5FC3268B7F02268C -:10143000893F03F9E9060026893F26290F26C705BB -:10144000FFFF26873F26890D8D5D02508BFB83E9C8 -:101450000233C0F3AA58FE0EEC345FC38B7C023B10 -:101460003C742F833DFF750B8D7C08897C02833D86 -:10147000FF741E8A45023C81750C803EEB3400747B -:101480000533C0E90B008B0D014C028D750283E919 -:1014900002C3803EEC3406720533C0E9F3FFFF0659 -:1014A000EE33E9BEFFF6069236407401C35756513B -:1014B000528B368C36E8A4FF7503E91A00E91C004C -:1014C000FE06EC34C43E8036F3A4800E923640BA59 -:1014D0000C01B88080EFED5A595E5FC3FF06E03320 -:1014E000803C81750CFF06E233C606EB3401E9CF80 -:1014F000FF803C847507FF06E633E9C3FFFF06E87B -:1015000033E9BCFF8D3EE0FEA17234C706723400A1 -:10151000008905A17434C70674340000894502BAF5 -:101520000401ED894504C745060000A16E34C706D5 -:101530006E340000894508A17034C706703400007D -:1015400089450ABA0001ED89450CC7450E000032F5 -:10155000E4BA0E01EC894510A17E34C7067E340042 -:1015600000894512A18C34C7068C340000894514CB -:10157000A18A34C7068A340000894516A17C34C785 -:10158000067C340000894518A18834C706883400D9 -:101590000089451AA1CA33C706CA33000089451C11 -:1015A000A17834C7067834000089451EA1C634C727 -:1015B00006C6340000894520C3000000000000007A -:1015C000FA33C08ED88EC0B8A001C1E8048ED08D89 -:1015D000268000E80001E810EB8B1EF7348B16F92B -:1015E000348B36FF3433C0B9EFFF8D3E14002BCF60 -:1015F0002BCED1E9F3AB891EF7348916F93483FE7B -:1016000000740CB9EFFFBF80FE2BCFD1E9F3ABB96B -:10161000FFFF81E9003B83FE007403E91B00511EBC -:10162000B800E08ED833F68D3E00D8B9000CF3A593 -:101630001F59BEFFFF81EE00D82BCE81E100FF894C -:101640000EAC338D062002C1E804A332348ED036AE -:10165000C7061E00801836C7062200FF7F36C70661 -:101660000A00FFFF36C7061C0080008D06A002C1DD -:10167000E804A330348ED036C7061E00502836C783 -:10168000060A00FFFF36C7061C008000B8A001C193 -:10169000E804A33434A3F2338ED08D268000B80042 -:1016A00090E7028D3E70018BC7C1E804B903008941 -:1016B000450E894502C705FFFF83C710050100E2FB -:1016C000EEE85B01E5CEA3B536E82100E84501A1CF -:1016D00032348CCBCD370E58A900F0740733F6891D -:1016E00036FF34C38D3630618936FF34C333C08B47 -:1016F000D08BF2B968002E80BCAC17807501EF83E7 -:10170000C20246E2F1B80200E750B95A0033FFC7FF -:101710000565188C4D0283C704E2F433C08EC08C7B -:10172000C88ED88D3E80008D369C17B90800E837EA -:10173000008D3620218D3EC000B90D00E829008DB6 -:101740003E4001B90A00E81F00E84B0E33C08ED8B6 -:10175000C7064E376F17E748E74CB8409CE74AE5A5 -:101760004890B80070E748C3A583C702E2FAC3E512 -:101770004CC35051565752061E33C08ED8E558D12F -:10178000E073118BF0D1E633C08ED88BB480008328 -:10179000C60BFFE61F075A5F5E5958CF581CE41C62 -:1017A0006C1C8E1AC01F401A441C6518808080FF74 -:1017B00080030280FFFFFFFFFFFFFFFFFFFFFFFF30 -:1017C000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF29 -:1017D0008003034380800280420302FF0301030170 -:1017E00001030203FFFFFFFFFFFFFFFF02030103EF -:1017F00003FF0101FF01FF0101030303FFFFFFFFDF -:10180000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE8 -:10181000FFFFFF02B80F00E784B80FF8E782C3B9F3 -:101820000800890EE63A8D0620038BD0C1E804A398 -:1018300090018BC28BD8C1E8048EC005610026A33D -:101840000000A1303426A3020083C314D1EB268903 -:101850001E080081C21006E2D926C7060000FFFF5D -:101860008C069201C35051565752061E33C08ED873 -:10187000E75AFF06BE33BAD200EDCF0000000000E9 -:101880008CCBA13034CD37E906EDB83200C3E88CFB -:1018900001FE06E234E8210175F0E8530E810EAF37 -:1018A0003600C0C706AD366000F706E63480007526 -:1018B0001AF706E63400087409C706AB360B00E9D0 -:1018C0000F00C706AB360300E90600C706AB3611AA -:1018D0009CC706A9361800F706E6348000750DF798 -:1018E00006B53602007405830EA93620A1A936E795 -:1018F00000A1AB36E702F706E6348000742EE8F26A -:101900002F33C00D4100E756A1B1360D0010E70896 -:10191000A1B336E70AA1AF36E706B84000E74E3379 -:10192000C0E70EC70626020000E92300C7064E37AF -:101930003F208E06303426F7060A00008074072602 -:10194000810E08000080C606E03401B80000C3FE26 -:1019500006E134C606E03400A126020BC07401C3C0 -:10196000E80400B80000C3A1A936E7008B1EAB361F -:1019700083E306E50225F9FF0BC30D1000E702A182 -:10198000AD36E704C3B80A00E784FE06E534C606B0 -:10199000E334018E06303426F7060A00004074074F -:1019A00026810E08000040C3C7064E376F17FE069B -:1019B000E434C606E33400C3C3F606183480750D5C -:1019C000A118340B061A340B061C347501C3A12E62 -:1019D0003425FFFE8B16E73681E200010BC2A32EF1 -:1019E000348D161000BF0000B908008B850034EF5D -:1019F00083C2108B850234EF83C2108B850434EFD1 -:101A000083C2E283C7064975E2B800008EC0BE00FB -:101A100034BFB936B91800F3A5B80000C333C08E7F -:101A2000C08D3EB033B90800F3AB8D3E3E34B903F0 -:101A300000F3ABC300000000000000000000000045 -:101A40005051565752061E33C08ED8E75AFF06BA79 -:101A500033E5560D2000E756BA7A00ED0826943695 -:101A600033C0B10832ED068EC08D3EE0FFF3AA8E82 -:101A700006323426810E0800000207E55625DFFFF6 -:101A8000E756E9F8FC00BD1B101BD91AF31A505198 -:101A9000565752061E33C08ED8E75AFF06B6335348 -:101AA0000651E580A3B4338BD88BC8251000A3ED75 -:101AB000340BC07414FF068034803EFE340074037F -:101AC000E90600B88000E89D0483E303D1E32EFF1C -:101AD00097861A59075BE9A4FCBA20008E063C34AD -:101AE000833E3C34007503E9F000C7063C34000037 -:101AF000E92A00BA10008E063A34833E3A34007563 -:101B000003E9D5FFC7063A340000E81000E9C9FF31 -:101B1000BA10008E063A34C7063A34000026A114E3 -:101B20000026A30C0026A1160026A30E0026C6063A -:101B30000A0000C1EA0223D1741CBA200026C7069D -:101B40000E00EA05260B160C002689160C00FF066F -:101B50008634FF06DC3326A10C00A9003774162654 -:101B6000C6060A0002A900307404FF067A34FF0694 -:101B7000DA33E94900C0EC0783168A340024073CB5 -:101B8000077504FF068C34FF067E34A130348CC305 -:101B90008EC08EDB26830E0800408CD82687061662 -:101BA0000026833E1400FF740A8EC0268C1E00009F -:101BB000E90500268C1E140033C08ED8C3C38CC028 -:101BC000870692013DFFFF740D8ED88C060000330E -:101BD000C08ED8E904008C069001E80100C306839A -:101BE0003E9001FF7429833E3A34007511BA860095 -:101BF000E81E008C063A34833E9001FF7411833E48 -:101C00003C3400750ABA8800E806008C063C3407AC -:101C1000C3A190018EC026A10800EF26A1000026D6 -:101C2000C7060000FFFFA390013DFFFF7503A392CD -:101C300001833EED3400740BB81000E784C706ED55 -:101C4000340000C35051565752061E33C08ED8E799 -:101C50005AFF06BC33E925FB5051565752061E3336 -:101C6000C08ED8E75AFF06B033E911FB50515657E2 -:101C700052061E33C08ED8E75AFF06B43306FF065D -:101C80007634803EFE3400740407E9F0FAB8800030 -:101C9000E8D30207E9E6FA000000000000000000B7 -:101CA000C61D081D911E5D1E731E891E911EA81D56 -:101CB000911E911EAF1EAF1E151D151D911E991F61 -:101CC000000000000000000000040000000200000E -:101CD00000010010000100400000000000010000B1 -:101CE00007E999FA5051565752061E33C08ED8E76D -:101CF0005AFF06B2330668F61CE506A3B2338BF032 -:101D000083E61E2EFFA4A01CE50CA980007406E843 -:101D1000A401E506C353E50C8BD8A9010074148314 -:101D20003EE03A00740D8E063834E8BF06C706E080 -:101D30003A0000E5000D1800E700E5020D1100E78C -:101D4000028BC35BA901007401C38BD0B80008E704 -:101D5000848BC28E06383426A30C008BD0C1E003DE -:101D60008316883400FF067C3426833E06000A75FD -:101D7000218BC22540183D4000740C3D00107512A7 -:101D800026FE0E0A00740BF706EF3420007503E9F7 -:101D90005A068CC0268E06020026830E08002026D6 -:101DA000A3120026A31000C3FF06C433E50CA9014B -:101DB000007501C3A9F0077401C3FF06D433E50021 -:101DC0000D1800E700C3FF06CA33803EA036087531 -:101DD000148E06303426F7060A00000874072681A0 -:101DE0000E08000008E58225FDFFE782E50C50E5BE -:101DF00080250007A3E43AE58C250080A3E23A5849 -:101E0000A902007525833EE23A00751E833EE43A3E -:101E1000007517E5080D000425FF04E708E86A01CE -:101E2000E5820D0200E782E92100E81A06803EE81B -:101E3000FF00740A803EE8FF047403E90D00C60643 -:101E4000E8FF01BA0C01B80808EFED803E9F3606A6 -:101E50007505830E993640B80001E90901FF06CCEB -:101E6000338126AF36FFF7A1AF36E706FF06C6344B -:101E7000E91E00FF06CE33FF0695378126AF36FFF9 -:101E8000EFA1AF36E706E90800FF06D033FF067A78 -:101E900034FF06D233D1E68E0630342E8B84C01C3C -:101EA00026090608002E8B84C21C09066637C3E586 -:101EB0000CA98000745650E8F00058A9000175077D -:101EC000FF06C633E90800FF067834FF06C833E58D -:101ED0008225FDFFE782E86E05BA1001ED803EE83D -:101EE000FF00740A803EE8FF047403E91D00C60683 -:101EF000E8FF01BA0C01B80808EFEDE90D00C606CD -:101F0000E8FF03BA0C01B80808EFEDC3A90100749B -:101F10001CE82C00833EE03A00740F068E0638342D -:101F2000E8C904C706E03A000007E95D008BD08EDF -:101F300006383426A30C00E8060068691DE94A004B -:101F4000A90004740AB80004FF06D833E91700A9F1 -:101F50000001740AFF063937B80001E90800A9102A -:101F600000B81000741D090666378CC08E06303428 -:101F700026F7060A000001740726810E08000001FA -:101F80008EC0C3FF06C233E9F8FFE5000D1800E775 -:101F900000E5020D1100E702C358E943FDE5080D15 -:101FA000000425FF04E708E9E0FFE50EA900087535 -:101FB00001C3E9F5FF000000000000000000000080 -:101FC0005051565752061E33C08ED8E75AFF06B8F6 -:101FD00033E548065357FF164E375F5B833E80015B -:101FE000FF74588E06800126FF0E0800754D26A14D -:101FF0000000A3800126C7060000FFFF8CC0268ECC -:1020000006020026810E080080008BD02687061A63 -:102010000026833E1800FF740A8EC0268916000031 -:10202000E905002689161800833E8001FF740C8E96 -:1020300006800126833E08000074B307E93EF7E5F9 -:102040004C90E502A90020740D25FFDF0D0100E78B -:10205000020D0020E702E50A8BD8A3F43325C3570D -:102060000D0010E70AF7069B3600807437F7C300AF -:10207000807406F7C30008745D8126C2347FFFC7F1 -:102080000635370500B88003CD3981269B36FF7FA2 -:10209000C7060F370400F7069B3640007506C706D3 -:1020A0000F370300F7069B360020742AF7C3000899 -:1020B0007424803E9D36067C1DFF069434830E6694 -:1020C00037208E06303426F7060A000001740726F2 -:1020D000810E08000001F7C30020753BF7069A3710 -:1020E0008000740BFF06893733C0E70EE90400FF58 -:1020F000063B37F7069B360020741C80269E36FF71 -:1021000075158E06303426F7060A00000874072677 -:10211000810E08000008C3C300000000000000009A -:1021200002230223022302230323DD220223FD21B3 -:102130000223A424F32402238D227A23022397244A -:102140001B247524022302238E25FB8E067E01FBB1 -:1021500026833E0000FF74F2268E060000FA268BCE -:102160001E080026231E0A0074E58CC08ED0268B24 -:102170002602008C16F23322FF756A26A11C008A03 -:10218000E38ADC22D8750DD0E824F80AC075F2B0D5 -:1021900080E9EDFFD0E824F80AC07502B08032E48F -:1021A00026A31C00F7C3080075472E8A9FC5252E5D -:1021B0008BBFC52680C310268E1D268C1E06008B65 -:1021C000160000C7060000FFFF26891583FAFF7579 -:1021D0000A2E8B97CD26262116080033C08ED826CE -:1021E000891E0400C38ADFB7002E8A9FC525E9E057 -:1021F000FF2683260800F783C310E9DEFF60061E72 -:102200006887256A001F8E06F2338B0E3434390E30 -:10221000F233740E26810E0A00000226810E080099 -:1022200000022689260200A3F2338ED08D2680007C -:10223000368926020036891E200036C706080000AF -:1022400000B90400BE00002E8BBCC52636C705FFB2 -:10225000FF36C74502FFFF83C602E2EB8E067E0112 -:10226000368B0E22008CC026833E0000FF268E0691 -:1022700000007407263B0E22007DEA368C06000023 -:102280008EC0268C160000FB36FF2E1E00061E6830 -:102290008B256A001F2609360800F7C600FF740167 -:1022A000C356522E8BB4C52581E6FF002E8BB4C5D4 -:1022B000268CC28EC026C7060000FFFF8EC2268372 -:1022C0003CFF740F8BD0268754028EC226A30000D9 -:1022D000E90700268944022689045A5EC3061E685F -:1022E0008B256A001F8E06F23326A30A0026892654 -:1022F0000200A134348ED08D2680008C16F233E992 -:102300004DFECF501E525333C08ED826833E04005C -:10231000FF26C706040000007403E91A00833EE6A6 -:102320003A027613FF06D6338CC08E063234BE4096 -:1023300000683A23E95EFFE884F85B5A1F58CFE84B -:10234000E10026C606180010268A1E2900881E1BDA -:102350003726C7060C00FF7F26A10E00E79C26A1AA -:102360000800E79AE50080FB0874090D18ACE70047 -:10237000071F58CF0D1800E9F4FF501E0633C08E1A -:10238000D8833EA1360075B7268B3606002EFF9403 -:10239000DC23071F58CFE88A00E5000D1800E7008E -:1023A000E84900C353F706EF342000752DE58C256E -:1023B00000708BD8E58C2500703BC374058BD8E981 -:1023C000F2FF3D00307510E50225EFFFE702C7067A -:1023D000E03AFFFFE90300E812005BC3A323962362 -:1023E000A423A4239623A4239623962326A029007E -:1023F000A21B3726C7060C00FF7F26A10E00E79C14 -:1024000026A10800E79AE50025FF53268B36060033 -:1024100083E60E2E0B84AD25E700C3061E688B25D0 -:102420006A001F830EEF3420830E9B3608E50025DB -:10243000EFFF0D0800E700E500A910007501C3E5F6 -:1024400000A9100075F9C350535156061E33C08EB3 -:10245000D8B80500E784E5080D000425FF04E70867 -:10246000E5000D1800E700E5020D1100E7021F0767 -:102470005E595B58C3501E33C08ED8C706EF340078 -:102480000083269B36F7E5000D1800E700E5020DF6 -:102490001100E7021F58CF60061E6887256A001FDB -:1024A000E816F5C3061E688B256A001F8EC02683BA -:1024B0003E0A00007403E8430026C7060A00FFFF37 -:1024C000268B1606008E1E8E018CD88BCA833E008A -:1024D00000FF8E1E0000740A2B16080073EB290EF5 -:1024E000080026890E0800268C1E00008ED88C0657 -:1024F0000000C360061E6887256A001F8EC08BC857 -:102500008E1E8E0126C7060A0000008CD8833E006E -:1025100000FF74253B0E00008E1E000075ED8ED866 -:1025200026A10000A300003DFFFF74568ED826A10F -:10253000080001060800E94900268E1E0200BE18A8 -:1025400000833CFF743C390C74198E1CBE00008360 -:102550003E0000FF742C390E000074078E1E000030 -:10256000E9ECFF26A10000890433C98ED93DFFFFA5 -:10257000751083FE18750B268E1E0200812608003A -:102580007FFF33C08ED8C31F0761CF1F07CF600600 -:102590001E6887256A001FE506251E003D1E007582 -:1025A000F6B90800E558E75A23C0E0F8C300000078 -:1025B000000000000000AC000000A8008C02040035 -:1025C0000008102000FF0E0C0C0A0A0A0A0808086E -:1025D0000808080808060606060606060606060691 -:1025E00006060606060404040404040404040404A1 -:1025F000040404040404040404040404040404049B -:1026000004040404040202020202020202020202A0 -:10261000020202020202020202020202020202029A -:10262000020202020202020202020202020202028A -:10263000020202020202020202020202020202027A -:102640000202020202000000000000000000000080 -:10265000000000000000000000000000000000007A -:10266000000000000000000000000000000000006A -:10267000000000000000000000000000000000005A -:10268000000000000000000000000000000000004A -:10269000000000000000000000000000000000003A -:1026A000000000000000000000000000000000002A -:1026B000000000000000000000000000000000001A -:1026C00000000000001800140010000C00FF7FFF45 -:1026D000BFFFDFFFEFFFF7FFFBFFFDFFFE7FFFBF49 -:1026E000FFDFFFEFFFF7FFFBFFFDFFFEFF00000036 -:1026F000803EE234017603E9A500B80000E74EB958 -:102700002800E2FEC606453702BF3F282E8B45084B -:10271000E74EB92800E2FE2E8B1DC706B3364011E6 -:10272000C706B1362700C70646370200C706483736 -:102730006400F706B5360200751C2E0B5D0281267B -:10274000B336FFFEC706B1369C00C7064637080001 -:10275000C70648379001891EB736891EFE33BE2052 -:10276000008BC3E74EB92800E2FE2E8B4504E74EEE -:10277000B92800E2FEE54E8BCB2E2345062E234DD5 -:10278000063AC174364E75D9803E453700740BC683 -:1027900006453700BF2F28E972FFC606453701F707 -:1027A00006B53602007414E5CE25FDFFE7CEE843FA -:1027B00000E5CE0D0200E7CEE83900803EE23401AC -:1027C0007601C3B8EA05E78CFAE812F4FB8D06D06F -:1027D000398BD8C1E804A338348EC0A1303426A385 -:1027E000020026C7060000FFFF83C318D1EB26892D -:1027F0001E0800C3E5020D0040E702E5000D0400DD -:10280000E700B80000E70AE50AA900807514E508AA -:102810000D0010E708E50A0D0008B90500E70AE217 -:10282000FCC3E5080D0010B90500E708E2FCC3048D -:102830000C2000010C7EFF000C0200100040000C78 -:10284000C6010000C0F7FF00C002001000400000F9 -:1028500033C08ED88D3E72498D36B037B914008B97 -:102860001E3034895C022E8B45028944062E8B056E -:1028700089440483C70483C610E2E8C6069E360E68 -:10288000E8FD26688328A1AA02CD35833EA1360043 -:102890007403E93B2733FF8E06A6028B36A4022E73 -:1028A000FFA42E30830E993604C70637370100C6C1 -:1028B00006CA3401E97D19803EA0360874E68026F8 -:1028C0009E36FF751AF7069B3600207412F7069B9A -:1028D000360300750A830E663710C606A03608E96F -:1028E000FB01803E9E360275CEC606A03606E9EC98 -:1028F00001C3E9E80126C7060A00000026FF2604F6 -:1029000000A1D1362639061A007522A1D336263900 -:10291000061C007518A1D5362639061E00750E2630 -:10292000F7060C0040007405830E663740810EAF39 -:10293000360010A1AF36E706803E9D36027506CD03 -:1029400034E9A21AC3F7069B361000755426F60622 -:102950000A00FF754C26A0190024C03C4075118068 -:102960003E953600743B26C7060400FFFFE93100A0 -:10297000E8F104F7069B360300742F8BD8B87D036B -:10298000CD3A8BC3C606A03606F7069B3602007505 -:1029900005C606A03604810E9B36800083269B3632 -:1029A000FCE92301E8871DE933015026A10C00252D -:1029B00007003D07007503E984003D05007503E944 -:1029C0007C00833EE83A047475833EE83A02746EF4 -:1029D000F706E63418807503E96A00F706E6340066 -:1029E00080743526803E290002752D5156578D364C -:1029F0003E348D3E2000B90600F3A65F5E59744553 -:102A000026A12000A33E3426A12200A3403426A103 -:102A10002400A34234E92600F706E6340800740BCC -:102A200026803E1900007403E91300F706E634100F -:102A300000741226A02800C0E80422C0740726C72C -:102A4000060400FFFF5823C07403E957FF81269B4B -:102A500036FFFE83FE067F2426A120003B06D136EA -:102A6000751A26A122003B06D336751026A1240034 -:102A70003B06D5367506810E9B36000126A1200047 -:102A8000257FFFA3B83426A12200A3BA3426A124AF -:102A900000A3BC348BC686C4A3C034D1E680FC0935 -:102AA0007403E8AA1C8BC62EFFA4304926A10C0093 -:102AB0003DFF7F740F26FF2604008E063834E8366B -:102AC00006CD50C3E91600CD34E91100CD34893666 -:102AD0003D37A19D36A33F37C606A0360CE88E00D1 -:102AE000A19F3622E47532F7064C370100752AF6AD -:102AF000069D3680740788269E36E931003A069D89 -:102B000036A39D3674288BF02EFFA40D2B4429EE9E -:102B1000421944CD442F455A453A269E367501C385 -:102B200032C086C48BF0A29E362EFFA420498B2E85 -:102B3000993623ED7501C3BF0100BE000085FD7508 -:102B40001A46D1E7E9F6FF2A0029002800270025C8 -:102B50000005000700260006002000F7D7213E9957 -:102B600036D1E62E8BB4472BE94FFFE956FF80267E -:102B70009E36FF7517F7064C370100750FF6069D58 -:102B800036807408F7066637FFFF7507C706663795 -:102B90000000C3F70641370100750BB87F03CD393C -:102BA000C7064137010033F6B80040850666377422 -:102BB0002180BC5437FF7404FE84543780BC9634A3 -:102BC000FF7404FE84963431066637833E66370010 -:102BD000740546D1E873D4C3A1F433A90088740BFB -:102BE000A9001075098B1E4337FFE3E9D700C7061C -:102BF00035370500C70643371E2CF706F4330008A7 -:102C00007406C7064337102CB88003CD39E9CDFED2 -:102C1000A9000874D9FF0E353775EDE96600A900E3 -:102C20000875CBFF0E353775DF810EC234C000F654 -:102C3000069D36807448810E9B360080F7069B36D1 -:102C40000100741EB87D03CD3A810E9B368000834F -:102C5000269B36FEC7060F370200C606A03604E9DB -:102C60007BFE803EA036047507833E0F3701750555 -:102C7000C606A03606C7060F370200E95FFEBE0291 -:102C800000E94AFE80269E36FF753AF6069D36809C -:102C9000742DF7069B360020752BC606A03606FF5E -:102CA000069434830E6637208E06303426F7060AE3 -:102CB000000001740726810E08000001E90600BE2D -:102CC0000400E909FE810EAF360008A1AF36E70621 -:102CD000E50AA90080740E8126AF36FFF7A1AF3652 -:102CE000E706E909FFE9F5FDC70641370000830E55 -:102CF000993602E9E7FD80269E36FF751DF7069B93 -:102D00003600407505830E993608830E993620816A -:102D1000269B36FFBFB88503CD39E9C0FD803E9EB6 -:102D200036067407803E9E360A7534F6069D368058 -:102D30007506BE0700E996FDC606A03604833E0F61 -:102D40003702741BC7060F370400803E9E36067597 -:102D50000EF7069B3640007506C7060F370300E9DD -:102D60007BFD803E9D36047512810EC2340040FF0B -:102D7000069234C606A03606E962FDBE0500E94D9E -:102D8000FDF6069D36807519830EC23404BE06001A -:102D9000E93BFD80269E36FF75C5FF063137E90009 -:102DA000008326C234BFC606A03606E92FFDE50A19 -:102DB0005025C3BFE70A5880269E36FF750DA9002F -:102DC000407508C606A03606E912FDB88303CD3962 -:102DD000C3B87C03CD39F706F43300107509C70674 -:102DE00033370200E9F6FCFF0E33377403E9EDFCDC -:102DF000FF068E34E8F719830EC23408BE0300E9DB -:102E0000CCFC0000000000000000000400040405E9 -:102E1000040404000300030300000000000000009D -:102E20000004000808050808080003000303000068 -:102E3000020404040400000800000A1400001A0040 -:102E40001C001E2000000441060B08C2FFE704031B -:102E500006040405040604870403060404854EA240 -:102E600004CF04CDC706A2370000C706A63700006E -:102E700026A12000257FFFA3F53626A12200A3F777 -:102E80003626A12400A3F936E83B198BF0268B0ED9 -:102E90000E002BC883E90EB8018083F9047C51260B -:102EA0008A542888161C3740268B6C2686CD3BCD4D -:102EB00086CD890EA43775384032FF268A5C29807A -:102EC000FB15772580FB0A742080FB01741BB80476 -:102ED000802E3A97022E74072E3A97182E751133CA -:102EE000C080FB09754F8BF3C326C7060400FFFFA4 -:102EF0005052A1A43786C4263B0626007C32268188 -:102F00003E260000047E298D742A268B1422D2745A -:102F10001F80E6BF80FE097517C706A23701008033 -:102F2000FA04750C268B4402A3033786C4A3D0345D -:102F30005A58E9B1FFBD72372E8A872E2E22C074EF -:102F40001605442E8BF82E8B053E89460083C5025C -:102F500083C70222E47DEF8D742A83E9047503E9B7 -:102F6000A100268B1422D27503E97C00C706A63780 -:102F70000100BF72378B0583C70280E6BF80E43F44 -:102F800080FE09752280FA04755EC706A23701002B -:102F9000268B4402A3033786C4A3D03486C4C70655 -:102FA000A6370000E947003BFD7E15268B04A840AC -:102FB0007406B80780E938FF32C0268B04E92E007A -:102FC0003AF475B1C745FE000080FE22750D3AD077 -:102FD0007716C706A6370000E913003AD07509C76F -:102FE00006A6370000E90600B80580E902FF32F6C0 -:102FF00003F22BCAB8058023C97603E964FF740382 -:10300000E9EDFE33C0BF72378B1547473BFD7F1B91 -:10301000F6C6807416F706A63701007406B8088055 -:10302000E9C3FEF6C64074E0B80780E9B8FE7D4209 -:10303000A34544294429B728E228EE2BF228F52895 -:103040000129AC2A4429442944294429442900005F -:10305000733600000336C535833545350735D23420 -:1030600045340000000000000000000000000000E7 -:103070000000A6380000E03800000000000000005A -:103080000000000000000000000000000000000040 -:10309000F2330000A6336033FD32BC3277323C326B -:1030A000FB316A310A31E0E0101010E0E0E0E000AE -:1030B0000000000000000000000000000000000010 -:1030C000000000000000E000E0E0E0E0E0E0E0E020 -:1030D000E033FF26F6061A0080741B2680261A00AD -:1030E0007F268B3E260083E71F740B26800E200070 -:1030F0008026013E0E00C3602E8B84A63026A318C6 -:1031000000D1E62EFF94503061C326C7060400C4E8 -:103110002A26C7060E00160026C706060006002649 -:10312000C606190000E8BF05E8980526C706260070 -:10313000000826C60628004026C60629002ABF2AFF -:103140000026C6050426C645012AA1933733DBA90C -:1031500040007502B301A900107402B788A90008E5 -:10316000740380CF4426895D02C3830EC2342026B7 -:10317000C70604006B2B26C7060E00300026C706C4 -:1031800006000A0026C7060A00040026C606190023 -:1031900000E86905E82C0526C7062600002226C699 -:1031A0000628006026C606290029BF2A0026C60573 -:1031B0000826C645012D8D7D02BE5437B90300F3A4 -:1031C000A526C6050826C645012E8D7D02BE5A37A6 -:1031D000B90300F3A5E8D405E86405B90600BE54B8 -:1031E000378D2E2C00268B4600290483C60283C50A -:1031F0000283F90475024545E2EBC326C7060400C5 -:10320000C42A26C7060E00240026C70606000600AC -:1032100026C606190000E8E404E8A70426C7062627 -:1032200000001626C60628006026C606290028BF0C -:103230002A00E85B06E87405E80405C326C706040F -:1032400000C42A26C7060E001A0026C70606000676 -:103250000026C606190000E8A304E8660426C7068F -:103260002600000C26C60628006026C60629002770 -:10327000BF2A00E82105C326C7060400C42A26C7C2 -:10328000060E00200026C70606000A0026C7060A0A -:1032900000040026C606190000E84B04E8240426B2 -:1032A000C7062600001226C60628004026C60629A4 -:1032B0000026BF2A00E8F404E88404C326C70604F5 -:1032C00000C42A26C7060E00340026C706060006DC -:1032D0000026C606190000E80D04E8E60326C70626 -:1032E0002600002626C60628004026C606290025F8 -:1032F000BF2A00E8B604E84604E8FA04C326C70675 -:103300000400C42A26C7060E003800A1A237500BBD -:10331000C0750726C7060E00340026C7060600063D -:103320000026C606190000E89903E8A4FD26C74553 -:1033300026002A580BC0750626C745260026A11C64 -:1033400037C1E0042688452826C645292483C72A94 -:10335000E82904E8A004E82205E8F803E80904C322 -:1033600026C7060400C42A26C7060E00320026C758 -:10337000060600060026C606190000E84503E850C8 -:10338000FD26C745260024A11C37C1E00426884538 -:103390002826C645292383C72AE8E003E86C04E809 -:1033A0008A04E89C04C326C7060400C42A26C7066C -:1033B0000E00340026C7060600060026C6061900C1 -:1033C00000E8FF02E80AFD26C745260026A11C37B3 -:1033D000C1E0042688452826C645292283C72AE855 -:1033E0009A03E8C703E85703E8F803E87804E88A93 -:1033F00004C326C7060400744526C7060E003E0017 -:1034000026C7060600060026C7060A00040026C6D0 -:1034100006190000E8FC02E8A902833E8D37037517 -:10342000019026C7062600003026C6062800502632 -:10343000C606290020BF2A00E8D003E80103E8B54A -:1034400003E89F03C326C70604006143B9F0008365 -:10345000E90226890E0E0026C7060600020026C6CF -:103460000619000026C7061A00000026C7061C0021 -:10347000000026C7061E000000E8470283E90E860A -:10348000CD26890E260086CD26C60628000026C633 -:1034900006290008BF2A0083E90426890D26C645AF -:1034A00001268D7D0283E902BB0100B830304B75E7 -:1034B00017BB0A008AC4268805B03180C40180FC8D -:1034C0003A750AB461E90500268805040147497583 -:1034D000DDC326C7060400044526C7060E001200F9 -:1034E00026C7060600060026C606190001E8E50103 -:1034F000E8D00126C7062600000426C606280000DC -:1035000026C606290007C326C7060400C42A26C704 -:10351000060E00200026C7060600060026C606196D -:103520000006E80402E89B0126C7062600001226D2 -:10353000C60628000026C606290006BF2A00E86B3A -:1035400002E8FB01C326C7060400C42A26C7060EEC -:1035500000200026C7060600060026C6061900053C -:10356000E8C601E85D0126C7062600001226C60649 -:1035700028000026C606290005BF2A00E82D02E81B -:10358000BD01C3FF06823426C70604003D4126C79D -:10359000060E00200026C70606000E0026C60619E5 -:1035A0000004E88401E81B0126C706260000122655 -:1035B000C60628000026C606290004BF2A00E8EB3C -:1035C00001E87B01C326C7060400674226C7060E32 -:1035D00000200026C7060600080026C606190003BC -:1035E000E84601E8DD0026C7062600001226C606CA -:1035F00028000026C606290003BF2A00E8AD01E81E -:103600003D01C3FF06843426C7060400674226C76F -:10361000060E00240026C7060600080026C6061966 -:103620000002E80401E89B0026C7062600001626D3 -:10363000C60628000026C606290002BF2A0026C6A4 -:10364000050426C6450101A10F3786E0F6066F374F -:1036500001750F3906CC3474098BD8B88903CD397C -:103660008BC3A3CC34268945028D7D04E83D01E857 -:10367000CD00C326C7060400C42A26C7060E001CB8 -:1036800000A1A237500BC0750726C7060E00180010 -:1036900026C7060600060026C606190000E8230015 -:1036A000E82EFA26C74526000E580BC0750626C719 -:1036B0004526000A26C645290083C72AE8BD00E83A -:1036C000FF00C3565751B90300BED136BF2000F3E7 -:1036D000A5595F5EC3565751B90300BED136BF1A14 -:1036E00000F3A5595F5EC326C7061A00C00026C7AF -:1036F000061C00000026C7061E000010C326C706D1 -:103700001A00C00026C7061C00000026C7061E00BF -:103710000008C326C7061A00C00026C7061C000002 -:103720000026C7061E000002C326C7061A00C000F6 -:1037300026C7061C00FFFF26C7061E00FFFFC32684 -:10374000C6050826C64501028D7D02BE0537B903B0 -:1037500000F3A5C326C6050426C6450106A10D37FC -:10376000268945028D7D04C326C6050426C645016B -:1037700007A10B372689450283C704C3A1A2370BD3 -:10378000C0741326C6050426C6450109A1033726C1 -:1037900089450283C704C326C6050826C64501021B -:1037A0008D7D02BE0537B90300F3A5C326C6050605 -:1037B00026C645010B8D7D02BEEF36B90200F3A58A -:1037C000C326C6050626C6450120A16837268945B9 -:1037D00002A16A3726886505C1E00426884504836E -:1037E000C706C326C6050426C645012126C74502CD -:1037F000000083C704C326C6051426C64501228DD2 -:103800007D02BE1F37B90900F3A5C326C6050C26E5 -:10381000C64501238D7D021E0E1F8D364054B9030F -:1038200000F3A533C0B90200F3AB1FC326C60508D9 -:1038300026C64501288D7D02BED136B90300F3A509 -:10384000C326C6050826C6450129A1C23486E0263E -:10385000894502A19B362689450426884506268887 -:1038600045078D7D08C326C6050626C645012B8D56 -:103870007D02BEBB36B90200F3A5C326C6050626E7 -:10388000C645012C8D7D02BEE536B90200F3A5C305 -:1038900026C6050426C6450130A1373786E02689AD -:1038A00045028D7D04C326C7060E001E0026C706EE -:1038B0000600020026C606190000E86CFEE803FEBA -:1038C00026C7062600001026C60628003026C60693 -:1038D000290011BF2A00E83500E84500E85500C37B -:1038E00026C7060E00120026C7060600020026C6DE -:1038F00006190000E832FEE8C9FD26C706260000CA -:103900000426C60628003026C606290013C326C68C -:10391000050426C645010C26C74502000183C704DD -:10392000C326C6050426C645010E26C74502000269 -:1039300083C704C326C6050426C645012126C745FC -:1039400002000083C704C300000000000000000064 -:10395000B339C939833AB339B339B3391C3A1C3A4C -:10396000A3B634A1E936A31137A3D234A1EB36A311 -:103970001337A3D434A1ED36A31537A3D634A10150 -:1039800037A3CE34A1F736A31737A3DC34A1F93619 -:10399000A31937A3DE34F7069B360200750C33C03B -:1039A000A09E368BF02EFFA45039E90F01BE070010 -:1039B000E919F1F6069D368074F3C606A03602C6F4 -:1039C000066E3708C606703702B88803CD39F6068A -:1039D0006F3701754AA1D1363A06E93675413A2664 -:1039E000EA36753BA1D3363A06EB3675323A26EC09 -:1039F00036752CA1D5363A06ED3675233A26EE36C5 -:103A0000751DC606703702FE0E6E37750FB8880337 -:103A1000CD3A830E9B3612C606A0360CE9A8F0A15B -:103A20000537263B0620007540A10737263B0622B6 -:103A3000007536A10937263B062400752CA09E365A -:103A40003C02750826F6061800087547C6066E374C -:103A500008FE0E7037751CC606703702E5020D01B0 -:103A60000425EFFFE702E95EF0C606703702C606DE -:103A70006E3708E50225FFFB0D010025EFFFE70289 -:103A8000E944F0F7069B360001742526F606180077 -:103A90000875ED81269B367FFFB88903CD3AB8843F -:103AA00003CD3AC606A036068326C234AFE917F026 -:103AB000A101373A260F377FC7E9F7FE83269B36E9 -:103AC000ECE82A0D810E9B368000BBFF7FCD53C6EC -:103AD00006A03602E9F0EF830E9B3611C606A0362B -:103AE0000CE9F9EF443B2C3BC72A6B3B443BC72A0C -:103AF000C72AC72AA3B634810EC2340020F7064174 -:103B0000370100741B8CC3C70641370000B87F0320 -:103B1000CD3A33C08EC0BF5437B90600F3AB8EC365 -:103B200033C0A09E368BF02EFFA4E43AF7069B36F6 -:103B3000000175218326C234BFA1A936E700A19BED -:103B400036E90900A19B3681269B36FFDFA90020BC -:103B50007506E96E00E96FEF830E993604C70637E4 -:103B6000370100C606CA3401E95800830E9B36406F -:103B7000E85800A105373B06E9367537A107373B02 -:103B800006EB36752EA109373B06ED367525FE0E80 -:103B90007137751CB88703CD3A830E993610A15042 -:103BA00037C7065037000009069936C606A0360802 -:103BB000E914EF830E993604C70637370300C606AB -:103BC000CA3403C606A0360AE9FCEEA1D136263B6C -:103BD0000620007515A1D336263B0622007512A1DA -:103BE000D536263B062400750FC38D362000E90B21 -:103BF000008D362200E904008D36240083C402F7CC -:103C000006E63401007415263A047708720E263A47 -:103C100064017208C606A03606E9ABEEE87C0A8CA1 -:103C2000C03DFFFF741B26C60618001026C70604F9 -:103C300000493C26C70606000C00CD50B94E00E2F4 -:103C4000FEC606A0360AE994EEE97BEE8F3C063DFF -:103C5000063D063DD23CEA3C063D063DA3B6348116 -:103C600026C234AFDFC7064C370000B88A03CD3A0E -:103C7000803E9D3604750C803E9E36067405C60651 -:103C80009F360633C0A09E368BF02EFFA44C3CF727 -:103C9000069B360020750E81269B36FFBFB88B032E -:103CA000CD3AE95400F7069B3600017403E917EE9C -:103CB000C70637370200C606CA3402830E99360497 -:103CC000830E503704F6069D3680752AE81F0BE9EF -:103CD0002700F7069B36000175D3C7063737020069 -:103CE000C606CA3402830E993604C606A03600F60C -:103CF000069D36807403E8DE0A81269B367CFFBB76 -:103D0000FFFFCD53CD54E9BEEDA3B634E8AD01B805 -:103D10008603CD39C7064C3700008126C234AFDF99 -:103D2000F6069D36807434F7069B3600207456F7ED -:103D3000069B3600017427E83501721CBE004085E1 -:103D400036C23475080936C234FF069234E88B0156 -:103D50007306810E99368000E96CEDE9B500C7065F -:103D600037370200C606CA3402830E993604830E22 -:103D7000503704803E9E36087403E85A0AE8EF0084 -:103D800072D6E9C8FF803E9E360A7512C606A03676 -:103D900000F7069B3608007402CD54E8390A8126E4 -:103DA0009B36FFBFE8C80072AFB88B03CD39E99CE2 -:103DB000FFF6069E36FF7558A3B634E8FE0081264E -:103DC000C234FFBFF6069D36807448F7069B360066 -:103DD000207422F7069B3600407508E89100723087 -:103DE000E9220026A10C00A960007524810E663727 -:103DF0000008E9D2ECC7064C370000E871007210E9 -:103E0000B88B03CD39E8D3007306810E9936800054 -:103E1000E9B4EC803E9D3604750C803E9E360674F7 -:103E200046C6069F3606F7069B360001740C803E98 -:103E30009D36087505C6069F360AE8320072D1E83D -:103E40009900803E9D36087513810E99368000F7E3 -:103E5000069B3600207508B88B03CD39E968ECC69F -:103E6000069F360AE960ECB88603CD3AE958EC269D -:103E7000A10C00A9600074088126C234FFBFF9C3F9 -:103E8000F7069B3600407413810E66370008E84A37 -:103E9000007306810E99368000F9C3810E9B3600AF -:103EA0004080266F37FE81269B367FFFC606A036F0 -:103EB00000F8C3810E99360001E921EC26A120000B -:103EC000A3FB36A3AA3426A12200A3FD36A3AC345B -:103ED00026A12400A3FF36A3AE34C3A10537263B99 -:103EE0000620007519A10737263B062200750FA191 -:103EF0000937263B0624007505E80200F8C3511E69 -:103F0000068BC78D362000BF0537B903001E061F7C -:103F100007F3A58BF88D362000BFA034B90300F35A -:103F2000A5071F598BF8A10737A3A634A10937A30A -:103F3000A834F9C3C606B63401E98BEBE887088BD1 -:103F4000F00512002629060E00268B442A263A0682 -:103F50000E00755B26832E0E000280FC277550260E -:103F60008B442CA9FFFF75478BFE33C026F6453CDA -:103F7000807406268A453A241F03F826807D450969 -:103F8000752D8CC28E0638348EDA8B0E0E00268983 -:103F90000E0E008D742CBF1800F3A433C08ED826EB -:103FA000C7060400B53F26C70606000600CD50B878 -:103FB0000680E9EFE926A10C00A39337830E99361A -:103FC00001E900EB26803E1C00FF752F26803E1E77 -:103FD00000FF752726F7060C004000751BA1D1369F -:103FE00026A31A00A1D33626A31C00A1D53626A3EA -:103FF0001E00B80A80E83607E9E2EAFF069034BE00 -:104000000A00C606B63401F6069D36807505830E95 -:10401000C23401E9B6EA803E9D360A750F26A10C2E -:10402000002507003D04007503E87900A1F33686FA -:10403000E0E71EA3E33681260B37000381260D3708 -:104040007B7F830E0D3748E81E0026A10C00250754 -:10405000003D0400740926F7060C0020007506B820 -:104060000100E93FE9E95FEAC70641370000B87F90 -:1040700003CD3AA11D37A3C43486E0687F031FA394 -:10408000060033C08ED8A10B37A3B234A10D37A3DD -:10409000B434A1F336A3C834A1EF36A39C34A1F104 -:1040A00036A39E34C3800E9D3680BE0000E8B40760 -:1040B000B87B03CD3AB87C03CD39C706333702004D -:1040C000A1E536E72EA1E736E73EB88203CD3AF701 -:1040D000069B3600207503E8FD06A1D336A3EF3614 -:1040E000A39C34A1D536A3F136A39E34C3F6069D16 -:1040F00036807431BE2200E91700F6069D368074C2 -:1041000024BE2300E90A00F6069D36807417BE24FB -:104110000056E8A8058CC03DFFFF5E7405E8D7EFA8 -:10412000CD50E91FE8E99FE9000000000000000011 -:10413000B88403CD3AB88A03CD39E9F700803EA0B0 -:104140003608752EA9D007752CA1B1360D0004E7ED -:1041500008E50025FF73E700B88A03CD3AE8C306F7 -:1041600033C0E70EE50A25C317E70ACD54C606A0FB -:104170003600E968E9BE0400E93FE983269B36BFC3 -:10418000C606713703B88603CD3AB88803CD3AB86E -:104190008303CD3AB88703CD39810EC2340020E9BC -:1041A0009200E84906B88703CD39BBFF7FCD53B8ED -:1041B0008403CD3AB88803CD3AB88B03CD3AB8839F -:1041C00003CD3AB88603CD3AB88503CD3AC3E500AE -:1041D00025FF53E700830EC234408326C234EFE844 -:1041E0000C06BBFF7FCD53B88A03CD3AB88503CD0B -:1041F0003AB88603CD3AB88303CD3AB88703CD3AAF -:10420000B88B03CD3AB88403CD3AB88903CD3AC30D -:10421000830EC23450E81804E8D305F6066F370160 -:104220007512B88903CD39833E0F37007506C7066E -:104230000F370400A19D3680FC087405B88403CDB7 -:1042400039E5020D010825EFFFE702A19D3686E062 -:1042500032E48BF0D1EE33C00D20000906AD36A15B -:10426000AD36E704E953E8E95AE833C0A01B37D17B -:10427000E03A06A0367503E9BAFFE960E8C70641EF -:10428000370000E8C1E1E86A0633C00D4100E75697 -:10429000A1B1360D0010E708E50225F9FF0D030076 -:1042A000E702A1B336E70AA1AF36E706A1AD36E7CC -:1042B00004E87C03E89F03C7061D3700C8C7060B48 -:1042C000370003C7060D377B7F33C0A39936A39B06 -:1042D00036A39D36A39F36A34C37A3F336A3EF3600 -:1042E000A3F136E882FDC6069F3602E9EFE7E50254 -:1042F0000D018825EFFF0D00400D0004E702E8F2F4 -:1043000005E50A0D4000E70A33C0A38137A38537CE -:10431000A38337A38737A38937E5000D0084E7001F -:10432000B88C03CD39B88000CD35C706AA02FFFF8F -:10433000E50025FF7BE700810E9A378000B87E03F9 -:10434000CD3933C0E70EBE08008E063834E8A7ED3D -:104350008326EF34DFFF068137CD50830EEF342004 -:10436000C3F7069A378000743DA9D0077410A900DE -:1043700004741233C0E70EFF068737E9D2FFFF0649 -:104380008537E9CBFFFF068337E9C4FF83269A37D9 -:104390007FA18937030687373D05007F01C3BBFF37 -:1043A0007FCD53E90000E50225FFFB25EFFF0D015E -:1043B00000E702A183373B0646377F2AA185373BBA -:1043C0000648377C21A18937030687373D05007FE2 -:1043D00015C6069F3604E50225FFF70D010025EFFF -:1043E000FFE702E9F7E6BE0100F7069B360300741B -:1043F0000A83269B36FC830EC23404E9D0E6B87BE0 -:1044000003CD39E5020D016025EFFFE702C706F194 -:10441000342003B88E03CD39C38126C2347FFF8098 -:104420000E6F3701F7069B36030074D2B87B03CDBD -:104430003AB87D03CD3983269B36EF33C0B08AA2CC -:104440009F36A29D36C7064C370100C7060F3704BA -:1044500000F7069B3640007506C7060F370300B805 -:104460008D03CD39E800D5E5020D014025EFFF8B26 -:10447000D8B87C03CD39C706333702008BC30D0093 -:104480002025F9FF0B06E83AE702C3FF0EF1347569 -:1044900001C3E54EA901007512E500A900047505E8 -:1044A0000D0004E700B88E03CD39C3E500A9000470 -:1044B00074F325FFFBE700E9EBFFC606A036048393 -:1044C000269B36FC810E9B368000E910E6B88E03F1 -:1044D000CD3ACD54810EAF360018A1AF36E706B8FD -:1044E0007B03CD39A1D336A38F37A1D536A391371E -:1044F000C7068B370200C7068D370200830E993638 -:1045000040E9D9E5803E9F36067515A9D00775ECC0 -:10451000250018750EFF0E8B3775E1C6069F36080D -:10452000E9BAE5FF0E8D3775D3BE0800E99FE5B8FF -:104530007B03CD39F7069B3600207408C6069F36EC -:104540000AE90D00F7069B360040740BB88B03CDCB -:1045500039810E99368000E983E5B87B03CD39C7F0 -:10456000068B370400C7068D370400810E9936008C -:1045700002E969E5F6069D3680751BA9D00775EB43 -:10458000A90018750CFF0E8D3775E0E817FBE94C94 -:10459000E5B88203CD39C3FF0E8B3775CEBE090057 -:1045A000E92BE5C7063D370000C7069B360000E84B -:1045B0003C028126AF36FFE7A1AF36E70681269B96 -:1045C00036FF7FE5020D010025EFFF25FFDFE70243 -:1045D000BBFF7FCD5333C0A39D36A39F36E8500069 -:1045E000E87300B88103CD39C3F7069B3603007426 -:1045F0000DC6069F3602C606A03600E9DFE4830E2C -:104600009B3610C70699360000E8E702E5560D0212 -:1046100000E756C706A80200008B363D37E8440283 -:10462000C606A0360EE9B5E4000000000000000058 -:1046300006B88A03CD3AB88503CD3AB88603CD3A99 -:10464000B88303CD3AB88703CD3AB88B03CD3AB8D7 -:104650008803CD3A07C306B88803CD3AB87B03CDAB -:104660003AB88203CD3AB87F03CD3AB87C03CD3A4D -:10467000B87E03CD3AB88003CD3AB88103CD3AB8BD -:104680008403CD3AB88903CD3AB87D03CD3AB88DCD -:1046900003CD3AC7064137000007C3068E063834FB -:1046A0001F8B0E0E0026890E0E00BE1800BF1800CC -:1046B000F3A4061E07CD340733C08ED8C326F606F2 -:1046C000200080744433C026A02600241F8BF026CF -:1046D0008B5C28891E6A37068E0638341FC0E304B7 -:1046E00026885C288BC6B90600BE2000BF1A00F3DE -:1046F000A48BC883C706F3A426812626001F802624 -:10470000813626000080E9A9FF268B1E2800891E1D -:104710006A37068E0638341FC0E30426881E280038 -:10472000B90600BE2000BF1A00F3A4E984FF86C4C6 -:10473000A36837E887FFF7066A370F007410803EDA -:104740009E36007509BE0000E8ACE9CD50C3C350E9 -:10475000560633C026F606200080740626A02600E2 -:10476000241F8BF0268B5C2686FB83EB04744F831F -:10477000C62A8CC08ED8B9070033C08EC0BF72372E -:10478000F3AB33C98A0C80F9007503E930003BD9DB -:104790007303E929002BD98A4401253F0074193D90 -:1047A0000B007D14D1E08BF82E8BBD5C498D74021B -:1047B00083E902F3A4E9020003F123DB75C433C0EB -:1047C0008ED8075E58C333C026F6062000807406D4 -:1047D00026A02600241FC3E50A25C3BFE70AB88622 -:1047E00003CD39B88303CD3981269B367CDFB8856C -:1047F00003CD3AE50225FFF30D010025EFFFE702A7 -:10480000E50025FF53E700A1E73625FFFEA3E736C5 -:10481000E73E83269936CF810EAF360010A1AF3622 -:10482000E706C3E5020D010C25EFFFE702A1E7361D -:104830000D0001E73EA3E736810E9B360020830E74 -:1048400099362081269B367CBF810EAF360010A1A1 -:10485000AF36E706B88603CD39B88503CD39B883BE -:1048600003CD3AC30BF67549068E063234803EE01E -:104870003401751B26893606008E06323426F7066B -:104880000A000020740726810E0800002007C3805C -:104890003EE33401751926893606008E0632342629 -:1048A000F7060A000010740726810E0800001007A2 -:1048B000C3E9B4FF50515733C0B906008EC0BFD111 -:1048C00036F3AE5F740C26F6060000C07504F85986 -:1048D00058C3F9E9F9FF8B050B45020B4504C35298 -:1048E00050E506251E003D1E0075F6B80180E75A0A -:1048F000585AC3E8E9FF50E50225FF7F0D01002566 -:10490000EFFFE7020D0080E702A1AD36E704A1AF9B -:1049100036E70658C3000000000000000000000059 -:104920002E2BCE4110427B413041A241AF4544295C -:10493000C72AC72A6039F43A5C3C093DB13D343F8F -:10494000C72A3C3FC72AC43F16401640ED40FA40F4 -:104950000741C72AC72AC72AC72AD65200000137EB -:10496000E936F336EF361D370D370B379C370337F3 -:10497000FB36622D4006D12DF401BA4440068C432B -:104980006400E82CC800D82B0500E9455000974585 -:10499000FA00AE2D04016A420200F62CBC02932DEF -:1049A000DC051D2D6400A12D1400D73A0807812DC8 -:1049B0006400B33E020030436400C52CF4018B4414 -:1049C00002000000000000000000000000000000E5 -:1049D000803EFD3402740CE82005C706A1360000B5 -:1049E000E99AF8FF06C033E810058B363D37E873C7 -:1049F000FEC3CD34E9E805C706A3360000C706416B -:104A0000370000E8EDFE33C00D4100E756A1B13696 -:104A10000D0010E708A1B336E70AA1AF36E706A1FB -:104A2000AD36E704E82B09C7061D3700C8C7060BDB -:104A3000370003C7060D377B7F33C0A39B36A39D8A -:104A400036C7064C370100C6069E36FFC706053737 -:104A50000000C70607370000C70609370000A3F3A8 -:104A600036A3EF36A3F136E8FEF5E50225F9FF0D92 -:104A700003000D008825EFFF0D00400D0004E70244 -:104A8000B88F03CD39B88000CD35C706AA02FFFF25 -:104A9000A1A936A3A7360D00A40D0008E700A3A91D -:104AA00036C706A3360100C706A5360C00833EA50F -:104AB00036007509C7063D370500E913FFFF0EA54F -:104AC00036BE1100E82205B89003CD39C3833EA35A -:104AD000360174D9C3B89003CD3A26A02B00268B9B -:104AE0001E2C00CD34833EA336017403E9F0043C50 -:104AF0000F751E81FB0002751826A12000A3053743 -:104B000026A12200A3073726A12400A30937E9091B -:104B100000C7063D370100E9B6FEC706A33602000E -:104B2000C6069E36FFE8CBFDE81CD933C0A3853707 -:104B3000A38337A38737A38937B89103CD39B880CA -:104B400000CD35C706AA02FFFFE50025FF53E700A9 -:104B5000810E9A378000B89203CD3933C0E70EBE7C -:104B600008008E063834E88EE526C70604007D4B23 -:104B70008326EF34DFCD50830EEF3420C3F7069A3F -:104B80003780007432A9D007740CA90004740E3366 -:104B9000C0E70EE9DAFFFF068537E9D3FFFF06839A -:104BA00037E9CCFFC7063D370100E936FE83269A78 -:104BB000377FBBFF7FCD53E5000D00ACE700E5027A -:104BC00025FFFB25EFFF25FFF70D0100E702A1837D -:104BD000373B0646377FCDA185373B0648377CC437 -:104BE000C706A3360300BE1300E8FD03B89303CD48 -:104BF00039B89403CD39B89603CD39B89503CD397A -:104C0000BE0600E8E303E9D603833EA3360374013E -:104C1000C3BE1300E8D203B89403CD39C3B89403DC -:104C2000CD3A26A02B00268B1E2C00CD34833EA32C -:104C300036037403E9A8033C0D753E83FB00753908 -:104C4000E5020D0020E702B89303CD3AC706A3366C -:104C50000400BE0000E80CFCC6069D3680C6069E19 -:104C60003600C70633370200B89A03CD39E8FC0096 -:104C7000C7064C370000E96603C7063D370800E960 -:104C800061FD833EA336037509C7063D370500E97C -:104C900051FDE94A03833EA336047412833EA336D2 -:104CA00005740BCD34C7063D370700E935FDC7064F -:104CB000A3360600C6069E36FFB89A03CD3AB899C9 -:104CC00003CD3AB89603CD3AB89703CD39B89803D7 -:104CD000CD39B89B03CD39E918FDCD34833EA336D9 -:104CE000047718833EA336037508F7069B36000148 -:104CF0007509C7063D370100E9E8FCE9E102CD345A -:104D0000833EA336027709C7063D370100E9D3FC8D -:104D1000833EA336047705B89603CD39E9C00283F4 -:104D20003EA33603751026A10C00250700503D0454 -:104D3000007503E83600A1F33686E0E71EA3E336EC -:104D400081260B37000381260D377B7F830E0D37BD -:104D500048E814F3583D0400740926F7060C0020B7 -:104D6000007506B80100E97A02E986FCA1E536E79C -:104D70002EA1E736E73EA1D336A39C34A1D536A3B6 -:104D80009E34C326803E1C00FF752F26803E1E00E9 -:104D9000FF752726F7060C004000751BA1D13626AB -:104DA000A31A00A1D33626A31C00A1D53626A31E24 -:104DB00000B80A80E92C02E938FCFF069034BE0AEC -:104DC00000C606B63401F6069D36807505830EC210 -:104DD0003401CD34E90CFC833EA336037509C706C4 -:104DE0003D370500E9FCFBE5020D03000D00880DD1 -:104DF00000400D0004E702C706A3360500C6069E64 -:104E000036FFBE0200E8E101B88903CD3AB89A0343 -:104E1000CD3AB89903CD39B89703CD39B89803CDB9 -:104E200039E9BB01833EA33603740A833EA33604EB -:104E30007403E9AA01BE0600E8AE01B89503CD39B6 -:104E4000E99C01833EA336057403E99201BE02008A -:104E5000E89601B89903CD39E98401C7060F3705F3 -:104E600000E97B01E50225FFDFE702C706A336075D -:104E700000C7060F370500E96501E8D504C6069DA1 -:104E80003600C7069B360000C7060F370500C70669 -:104E9000A8020000C7064C370100E50225F9FF0D06 -:104EA00003000D008825EFFF0D00400D0004E70210 -:104EB000E967FCB89A03CD39F706F4330010750999 -:104EC000C70633370200E91601FF0E33377403E9D2 -:104ED0000D01FF068E34830EC23408C7063D37032A -:104EE00000E9FFFAC35250BAE000B80010EF585A78 -:104EF000C3C7063D370000E9E9FAFAE85404B88070 -:104F0000038EC026C7060400D82BB87F038EC026A8 -:104F1000C7060400E82C33C08EC0A1A736A3A9366B -:104F2000A1A936E700A1AB36E702C70605370000A6 -:104F3000C70607370000C70609370000C6069D36BA -:104F400000C6069E36FFC7069B360000C706A3367E -:104F50000000C7060F370000C706A8020000C706FA -:104F60004C3701008126AF36FFE7A1AF36E706BB1D -:104F7000FF7FCD53E87CF9E5560D0200E756FBC3F1 -:104F80008D3EC0538D36F038B90E008B1E303489FB -:104F90005C022E8B45028944062E8B0589440483CE -:104FA000C70483C610E2E8B880038EC026C7060493 -:104FB00000E251B87F038EC026C7060400B2523308 -:104FC000C08EC0C706A1360100C7060F370500C353 -:104FD00033FF8E06A6028B36A4022EFFA4A053E850 -:104FE0008CDBC3E848F7E9F6FF8E063834E807E1C2 -:104FF00026C7060400DF4FCD50C326C7060A0000AF -:105000000026FF260400CD34E9D4FFA1D13626398D -:10501000061A007522A1D3362639061C007518A180 -:10502000D5362639061E00750E26F7060C00400000 -:105030007405830E663740810EAF360010A1AF367F -:10504000E706833EA336027505CD34E956FB833E61 -:10505000A3360074B1833EA3360577AA26F6060A66 -:1050600000FF75A2E8FDDD50F6069336207503E9D2 -:105070008C0026A10C002507003D07007503E9768A -:10508000003D05007503E96E00F706E634188075EB -:1050900003E96A00F706E6340080743526803E296D -:1050A0000002752D5156578D363E348D3E2000B985 -:1050B0000600F3A65F5E59754526A12000A33E3485 -:1050C00026A12200A3403426A12400A34234E926CD -:1050D00000F706E6340800740B26803E19000074C1 -:1050E00003E91300F706E6341000741226A0280026 -:1050F000C0E80422C0740726C7060400FFFF582337 -:10510000C07403E9DDFE81269B36FFFE26A1200048 -:105110003B06D136751A26A122003B06D336751000 -:1051200026A124003B06D5367506810E9B3600016C -:1051300026A12000257FFFA3B83426A12200A3BA10 -:105140003426A12400A3BC348BC686C4A3C034D1AA -:10515000E680FC097403E8F6F5A105370B0607376E -:105160000B060937743E26A120003B06053775174C -:1051700026A122003B060737750D26A124003B0619 -:1051800009377503E91D0026A02800240F3C03748D -:105190001B3C00750F833EA336047410F7069B3644 -:1051A000000174082EFF94F853E933FECD34C7068E -:1051B0003D370100E92CF8833EA336057410833E89 -:1051C000A336017E0983EE162EFF942454C3CD34FA -:1051D000C326A10C003DFF7F740526FF260400E9CD -:1051E000FDFDA1F433A90088740BA9001075098B8B -:1051F0001E4337FFE3E99700C70635370500C706AA -:1052000043372852F706F43300087406C7064337BD -:105210001A52B88003CD39E9C5FDA9000874D9FF39 -:105220000E353775EDE93000A9000875CBFF0E3556 -:105230003775DF810EC234C000F6069D3680740FCC -:10524000810E9B360080C7060F370200E990FDC72C -:10525000063D370200E98BF780269E36FF7530F653 -:10526000069D36807420FF069434830E6637208EA8 -:1052700006303426F7060A000001740726810E085E -:10528000000001E90900C7063D370400E954F78131 -:105290000EAF360008A1AF36E706E50AA900807414 -:1052A0000E8126AF36FFF7A1AF36E706E949FFE9E1 -:1052B0002DFDC70641370000BE2900E82BFDE91E81 -:1052C000FDCD34833EA336047709C7063D37010080 -:1052D000E910F7E909FDCD34C3C7069B360000E8A5 -:1052E0000CF58126AF36FFE7A1AF36E70681269B96 -:1052F00036FF7FE5020D010025EFFF25FFDFE70206 -:10530000BBFF7FCD5333C0A39D36A39F36E820F368 -:10531000E843F3830E9B3610C70699360000E8D2A7 -:10532000F5E5560D0200E756C706A8020000BE00CC -:1053300000E830F5C606A0360EB89C03CD39B8801B -:1053400000CD35C706AA02FFFFC706A1360100E956 -:10535000A5F606B88F03CD3AB89003CD3AB89103BD -:10536000CD3AB89203CD3AB89303CD3AB89403CD71 -:105370003AB89503CD3AB89603CD3AB89703CD3AEB -:10538000B89803CD3AB89903CD3AB89A03CD3AB854 -:105390009B03CD3AB87F03CD3AB88003CD3A07C31B -:1053A000F749F14EDF4FDF4FDF4FDF4FF851DF4F4F -:1053B000FA4F0B50D151DF4FDF4FDF4FDF4FDF4F41 -:1053C000E44E0600CD4A0400E44E1900AD4BFA004D -:1053D000824C0807094C1400244E6400D74DF40198 -:1053E000644EBC027A4EE803434E0200B34EF40111 -:1053F0005B4EF401E54E140006500650954CC15228 -:10540000C152FE4CDA4C0650065006500650B751B9 -:10541000B751B751B751B751B7510650D54A065099 -:105420001D4C0650834D1F4D1F4DED40FA40074166 -:1054300037372E3737202079792F79792F797920CE -:1054400030312E3930202030322F31372F3939206A -:10545000000000000000000000000000000000004C -:10546000000000000000000000000000000000003C -:10547000000000000000000000000000000000002C -:10548000000000000000000000000000000000001C -:10549000000000000000000000000000000000000C -:1054A00000000000000000000000000000000000FC -:1054B00000000000000000000000000000000000EC -:1054C00000000000000000000000000000000000DC -:1054D00000000000000000000000000000000000CC -:1054E00000000000000000000000000000000000BC -:1054F00000000000000000000000000000000000AC -:10550000000000000000000000000000000000009B -:10551000000000000000000000000000000000008B -:10552000000000000000000000000000000000007B -:10553000000000000000000000000000000000006B -:10554000000000000000000000000000000000005B -:10555000000000000000000000000000000000004B -:10556000000000000000000000000000000000003B -:10557000000000000000000000000000000000002B -:10558000000000000000000000000000000000001B -:10559000000000000000000000000000000000000B -:1055A00000000000000000000000000000000000FB -:1055B00000000000000000000000000000000000EB -:1055C00000000000000000000000000000000000DB -:1055D00000000000000000000000000000000000CB -:1055E00000000000000000000000000000000000BB -:1055F00000000000000000000000000000000000AB -:10560000000000000000000000000000000000009A -:10561000000000000000000000000000000000008A -:10562000000000000000000000000000000000007A -:10563000000000000000000000000000000000006A -:10564000000000000000000000000000000000005A -:10565000000000000000000000000000000000004A -:10566000000000000000000000000000000000003A -:10567000000000000000000000000000000000002A -:10568000000000000000000000000000000000001A -:10569000000000000000000000000000000000000A -:1056A00000000000000000000000000000000000FA -:1056B00000000000000000000000000000000000EA -:1056C00000000000000000000000000000000000DA -:1056D00000000000000000000000000000000000CA -:1056E00000000000000000000000000000000000BA -:1056F00000000000000000000000000000000000AA -:105700000000000000000000000000000000000099 -:105710000000000000000000000000000000000089 -:105720000000000000000000000000000000000079 -:105730000000000000000000000000000000000069 -:105740000000000000000000000000000000000059 -:105750000000000000000000000000000000000049 -:105760000000000000000000000000000000000039 -:105770000000000000000000000000000000000029 -:105780000000000000000000000000000000000019 -:105790000000000000000000000000000000000009 -:1057A00000000000000000000000000000000000F9 -:1057B00000000000000000000000000000000000E9 -:1057C00000000000000000000000000000000000D9 -:1057D00000000000000000000000000000000000C9 -:1057E00000000000000000000000000000000000B9 -:1057F00000000000000000000000000000000000A9 -:105800000000000000000000000000000000000098 -:105810000000000000000000000000000000000088 -:105820000000000000000000000000000000000078 -:105830000000000000000000000000000000000068 -:105840000000000000000000000000000000000058 -:105850000000000000000000000000000000000048 -:105860000000000000000000000000000000000038 -:105870000000000000000000000000000000000028 -:105880000000000000000000000000000000000018 -:105890000000000000000000000000000000000008 -:1058A00000000000000000000000000000000000F8 -:1058B00000000000000000000000000000000000E8 -:1058C00000000000000000000000000000000000D8 -:1058D00000000000000000000000000000000000C8 -:1058E00000000000000000000000000000000000B8 -:1058F00000000000000000000000000000000000A8 -:105900000000000000000000000000000000000097 -:105910000000000000000000000000000000000087 -:105920000000000000000000000000000000000077 -:105930000000000000000000000000000000000067 -:105940000000000000000000000000000000000057 -:105950000000000000000000000000000000000047 -:105960000000000000000000000000000000000037 -:105970000000000000000000000000000000000027 -:105980000000000000000000000000000000000017 -:105990000000000000000000000000000000000007 -:1059A00000000000000000000000000000000000F7 -:1059B00000000000000000000000000000000000E7 -:1059C00000000000000000000000000000000000D7 -:1059D00000000000000000000000000000000000C7 -:1059E00000000000000000000000000000000000B7 -:1059F00000000000000000000000000000000000A7 -:105A00000000000000000000000000000000000096 -:105A10000000000000000000000000000000000086 -:105A20000000000000000000000000000000000076 -:105A30000000000000000000000000000000000066 -:105A40000000000000000000000000000000000056 -:105A50000000000000000000000000000000000046 -:105A60000000000000000000000000000000000036 -:105A70000000000000000000000000000000000026 -:105A80000000000000000000000000000000000016 -:105A90000000000000000000000000000000000006 -:105AA00000000000000000000000000000000000F6 -:105AB00000000000000000000000000000000000E6 -:105AC00000000000000000000000000000000000D6 -:105AD00000000000000000000000000000000000C6 -:105AE00000000000000000000000000000000000B6 -:105AF00000000000000000000000000000000000A6 -:105B00000000000000000000000000000000000095 -:105B10000000000000000000000000000000000085 -:105B20000000000000000000000000000000000075 -:105B30000000000000000000000000000000000065 -:105B40000000000000000000000000000000000055 -:105B50000000000000000000000000000000000045 -:105B60000000000000000000000000000000000035 -:105B70000000000000000000000000000000000025 -:105B80000000000000000000000000000000000015 -:105B90000000000000000000000000000000000005 -:105BA00000000000000000000000000000000000F5 -:105BB00000000000000000000000000000000000E5 -:105BC00000000000000000000000000000000000D5 -:105BD00000000000000000000000000000000000C5 -:105BE00000000000000000000000000000000000B5 -:105BF00000000000000000000000000000000000A5 -:105C00000000000000000000000000000000000094 -:105C10000000000000000000000000000000000084 -:105C20000000000000000000000000000000000074 -:105C30000000000000000000000000000000000064 -:105C40000000000000000000000000000000000054 -:105C50000000000000000000000000000000000044 -:105C60000000000000000000000000000000000034 -:105C70000000000000000000000000000000000024 -:105C80000000000000000000000000000000000014 -:105C90000000000000000000000000000000000004 -:105CA00000000000000000000000000000000000F4 -:105CB00000000000000000000000000000000000E4 -:105CC00000000000000000000000000000000000D4 -:105CD00000000000000000000000000000000000C4 -:105CE00000000000000000000000000000000000B4 -:105CF00000000000000000000000000000000000A4 -:105D00000000000000000000000000000000000093 -:105D10000000000000000000000000000000000083 -:105D20000000000000000000000000000000000073 -:105D30000000000000000000000000000000000063 -:105D40000000000000000000000000000000000053 -:105D50000000000000000000000000000000000043 -:105D60000000000000000000000000000000000033 -:105D70000000000000000000000000000000000023 -:105D80000000000000000000000000000000000013 -:105D90000000000000000000000000000000000003 -:105DA00000000000000000000000000000000000F3 -:105DB00000000000000000000000000000000000E3 -:105DC00000000000000000000000000000000000D3 -:105DD00000000000000000000000000000000000C3 -:105DE00000000000000000000000000000000000B3 -:105DF00000000000000000000000000000000000A3 -:105E00000000000000000000000000000000000092 -:105E10000000000000000000000000000000000082 -:105E20000000000000000000000000000000000072 -:105E30000000000000000000000000000000000062 -:105E40000000000000000000000000000000000052 -:105E50000000000000000000000000000000000042 -:105E60000000000000000000000000000000000032 -:105E70000000000000000000000000000000000022 -:105E80000000000000000000000000000000000012 -:105E90000000000000000000000000000000000002 -:105EA00000000000000000000000000000000000F2 -:105EB00000000000000000000000000000000000E2 -:105EC00000000000000000000000000000000000D2 -:105ED00000000000000000000000000000000000C2 -:105EE00000000000000000000000000000000000B2 -:105EF00000000000000000000000000000000000A2 -:105F00000000000000000000000000000000000091 -:105F10000000000000000000000000000000000081 -:105F20000000000000000000000000000000000071 -:105F30000000000000000000000000000000000061 -:105F40000000000000000000000000000000000051 -:105F50000000000000000000000000000000000041 -:105F60000000000000000000000000000000000031 -:105F70000000000000000000000000000000000021 -:105F80000000000000000000000000000000000011 -:105F90000000000000000000000000000000000001 -:105FA00000000000000000000000000000000000F1 -:105FB00000000000000000000000000000000000E1 -:105FC00000000000000000000000000000000000D1 -:105FD00000000000000000000000000000000000C1 -:105FE00000000000000000000000000000000000B1 -:105FF00000000000000000000000000000000000A1 -:106000000000000000000000000000000000000090 -:106010000000000000000000000000000000000080 -:106020000000000000000000000000000000000070 -:106030000000000000000000000000000000000060 -:106040000000000000000000000000000000000050 -:106050000000000000000000000000000000000040 -:106060000000000000000000000000000000000030 -:106070000000000000000000000000000000000020 -:106080000000000000000000000000000000000010 -:106090000000000000000000000000000000000000 -:1060A00000000000000000000000000000000000F0 -:1060B00000000000000000000000000000000000E0 -:1060C00000000000000000000000000000000000D0 -:1060D00000000000000000000000000000000000C0 -:1060E00000000000000000000000000000000000B0 -:1060F00000000000000000000000000000000000A0 -:10610000000000000000000000000000000000008F -:10611000000000000000000000000000000000007F -:1061200090EAC01500000000000000000000130607 -:00000001FF -/* - * The firmware this driver downloads into the tokenring card is a - * separate program and is not GPL'd source code, even though the Linux - * side driver and the routine that loads this data into the card are. - * - * This firmware is licensed to you strictly for use in conjunction - * with the use of 3Com 3C359 TokenRing adapters. There is no - * waranty expressed or implied about its fitness for any purpose. - */ - -/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode. - * - * Notes: - * - Loaded from xl_init upon adapter initialization. - * - * Available from 3Com as part of their standard 3C359 driver. - */ diff --git a/firmware/Makefile b/firmware/Makefile index 0d15a3d113a..344713b1166 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -26,7 +26,6 @@ fw-shipped- += acenic/tg1.bin else acenic-objs := acenic/tg1.bin acenic/tg2.bin endif -fw-shipped-$(CONFIG_3C359) += 3com/3C359.bin fw-shipped-$(CONFIG_ACENIC) += $(acenic-objs) fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \ adaptec/starfire_tx.bin @@ -86,7 +85,6 @@ fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ qlogic/12160.bin fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw -fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ ess/maestro3_assp_minisrc.fw diff --git a/firmware/WHENCE b/firmware/WHENCE index 182ecb6c275..8388f02de2b 100644 --- a/firmware/WHENCE +++ b/firmware/WHENCE @@ -89,18 +89,6 @@ Licence: Allegedly GPLv2+, but no source visible. Marked: Copyright (C) 2001 Qlogic Corporation (www.qlogic.com) -------------------------------------------------------------------------- -Driver: smctr -- SMC ISA/MCA Token Ring adapter - -File: tr_smctr.bin -Info: MCT.BIN v6.3C1 03/01/95 - -Original licence info: - - * This firmware is licensed to you strictly for use in conjunction - * with the use of SMC TokenRing adapters. There is no waranty - * expressed or implied about its fitness for any purpose. - --------------------------------------------------------------------------- Driver: kaweth -- USB KLSI KL5USB101-based Ethernet device @@ -567,32 +555,6 @@ Found in hex form in kernel source. -------------------------------------------------------------------------- -Driver: 3C359 - 3Com 3C359 Token Link Velocity XL adapter - -File: 3com/3C359.bin - -Licence: -/* - * The firmware this driver downloads into the tokenring card is a - * separate program and is not GPL'd source code, even though the Linux - * side driver and the routine that loads this data into the card are. - * - * This firmware is licensed to you strictly for use in conjunction - * with the use of 3Com 3C359 TokenRing adapters. There is no - * waranty expressed or implied about its fitness for any purpose. - */ -/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode. - * - * Notes: - * - Loaded from xl_init upon adapter initialization. - * - * Available from 3Com as part of their standard 3C359 driver. - */ - -Found in hex form in kernel source. - --------------------------------------------------------------------------- - Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter File: cis/LA-PCM.cis diff --git a/firmware/tr_smctr.bin.ihex b/firmware/tr_smctr.bin.ihex deleted file mode 100644 index 6797451ffa9..00000000000 --- a/firmware/tr_smctr.bin.ihex +++ /dev/null @@ -1,477 +0,0 @@ -:10000000BC1D123B63B4E900001F000101000205A2 -:10001000010006030100040901000A070100080BA2 -:1000200001000C000000000F0100100D01000E1374 -:10003000010014110100120000050015010016193D -:1000400001001A1701001800000E00000001000056 -:100050000004001B01001C0000070000000F00004E -:10006000000B001D01001E0000080000000200003F -:10007000000C000000060000000D0000000300005E -:10008000000A00000009000478C6BC0194049380B3 -:10009000C84062E9DA1C2C1555555555555555582B -:1000A0000BE9E5D595C19D77CEBBA06E1C05F67713 -:1000B000C602FA9670E81DC0170E02FA587DC05F9E -:1000C00072CEECA4C384907A30CD8D7919E76C247C -:1000D000279C08390738A84A4CEA4D989B244CC005 -:1000E00026D3E7545A4DF24C0C13234990326EA498 -:1000F000DF9371137726E126F8260C4C12260809A7 -:10010000828260A9307936B0B2A8A772648F9B331F -:1001100033F9B839D51173AA75265D2651932A494A -:1001200094C99589BC4DC89B809BA099064C862696 -:10013000589BA49B9937626C679B3330BF366661CE -:10014000BF36ECC5BD66825A5031D59D9818293C02 -:1001500098864C17263E2CB8693B492EB408431AA2 -:10016000A4F9B351F110F343CD086F6379B3330EA3 -:100170001398499804DA7CE05279310C982E4DACF2 -:100180002C8414EE4CFE675EE49A7529D7A9353AA3 -:10019000945BD59B58B4AF7566AF14A9EF40952515 -:1001A00008B9AD42FCD8D98C330E1398661E45AC05 -:1001B000B00C42D3CCA61262DEB4B180497DA2DE7F -:1001C000B418C02484E654F5834601681A630CC64B -:1001D0001264FA4C351C2C0EAAAAAAAAAAAAAAAA88 -:1001E000AAAAAAAAAAADD70270E04CF3A1C1D5C0B1 -:1001F0003CB96939604E58770267933C99E4CF382F -:100200001C972E401B903146A35E0E88346A35E061 -:10021000E8AA351AA9F51546A3EA7D4AA351AA9F73 -:100220007054A6572EB4CDC8A30CC1DAC6E1CB7A60 -:10023000D41C68FFCF55A8C02D851117442A300B58 -:100240004A88C24DB520D5260169516952195260BC -:100250001695168296549805A545F3DD6AF9281877 -:10026000EF003030514E445D12D143E6126F9EBA1A -:10027000CCDF25031DE006060A30CCA9EB2D008655 -:10028000A612654F56D665495F3DE837C940C77825 -:100290000181828C33184980AE40C518059C6D18C9 -:1002A000660EF3A0C61262DEF504B4AC6BC61991FB -:1002B0007305482E72948073A1C8473666642F3642 -:1002C0006664079902918E72D10F9D063173A0C3A7 -:1002D000516A1A20BF3A0C2C7387435E600223FCDC -:1002E000E0D635EF9EF5EF92818EF0030305186698 -:1002F00045CC0B482E700A4039D0E4239B3332178B -:100300009B333203CC8548C73814A5CE297ED280D2 -:10031000A1A8B448882FCE830B1CE1D0D7980488BD -:1003200087CE963173A58FF38358D7BE7B82AF9269 -:10033000818EF0030305186645CC1520B9C8290045 -:10034000E743908E6CCCC85E6CCCC80F3205231C82 -:10035000E450D45A17882FCE8310F9D023173A04CB -:1003600035E600221639C3A3FCE0D635E0BFF41809 -:10037000F22D4D43516E5A221F30D417E74191732D -:1003800005482E776900E743908E6CCCC85E6CCC34 -:10039000C80F3205231CEF4C4E0604C99E0BFF41CB -:1003A0008F22D4D43516E5A221F35A82FCE8322EEE -:1003B00060A905CE1348073A1C8473666642F3664B -:1003C000664079902918E70A989C0A9EB5125C7CD1 -:1003D000C3318B982A7CD3ED38E9D34E74ED499E16 -:1003E0000BFF418F22D4D43516E5A22DEB45338F78 -:1003F000FCF7A05F25031DE40E060A30CC0CF3EBDE -:1004000040DE61A870920A00E1241E00E1241E0073 -:10041000E1241E00E1241E00E1241E010F982A0B96 -:10042000F3A0C8B9A2A4173A6900E743908E7548B3 -:100430005E706901E6005231CC1814A5CC09829493 -:10044000730CA091F525CC070684849F30A2A47D6F -:100450005075A665014A8EB4CCC435547566A49710 -:100460007A895053138019E3495C6DCEA940350653 -:1004700078D25706F1B32A8D972362925D69991C51 -:100480006A36E6CD46126F9EE1ABE4A30CC0DEAC4B -:10049000D40D281BD012A500F84BAD332806A0DEE2 -:1004A00014973A895DC00DE30690925D699866B92C -:1004B0001995E4A8CF9D331849BE7B86AF928C3343 -:1004C00024140CF4832421C270BFF418F22D4D4380 -:1004D000516E5A221F32A82FCE8322E605A4173A66 -:1004E0006900E743908E75485E706901E642A46337 -:1004F0009802294B9A2978E9405313818132678207 -:10050000FFD063C8B5350D45AE50087CE0D05F9D87 -:100510000645CC01A4173A6900E743908E75485E02 -:10052000706901E659A463981C52973B30528E7D46 -:100530002A091F51EBA4A40AB99487AEC531380229 -:10054000FFD063C8B5350D45AE50087CEA20BF3AF0 -:100550000C8B9A16905CE9A4039D0E4239D5217943 -:1005600095480F300A918E60EB297300095404CA34 -:1005700082655265E4CA226572650932E099724C5F -:10058000C4E00BFF418F22D4D43516B94021F38A41 -:1005900082FCE8322E60A905CE9A4039D0E4239D32 -:1005A00052179954619901E640A4639804B1849864 -:1005B00018EF2D0305313802FFD063C8B5350D455E -:1005C000B968887CE0505F9D0645CC81482E713427 -:1005D0008F48014815210521E90A5203CE5A4639B0 -:1005E000CF478E60AB1AF35343EB3524B81B30076B -:1005F000098A742F7E41741E1D0D874649D595D1F9 -:10060000D5D5BBA94E829D053A0A7414E829D0427B -:10061000745BCE50C40745BCE20C40745BCE8304CF -:10062000F9954D13635E6F313BA08BA2C5398D7870 -:100630003A22A0006BC1D1546016D991A2E7438C35 -:1006400024DC1CE05117396B3BCC4B422E6B50BF66 -:100650003636654F7A185525789823E7503EF38152 -:100660004C026D3E7153AF78A9D4A629B1BCD9997B -:10067000B28E628F222E7516B0B2AB23281654525A -:1006800031BCD999B28E6619022E7516502CA9C8A4 -:10069000C6F520D3E47F4F9C0AD6167F90EE4CEB34 -:1006A000CFE288BA2F4286AEBDE5A7529F93637909 -:1006B000EB3308F9945247CD99256F3A0C13E65560 -:1006C000344C5A4DB52395A548115A0A4395AC2C84 -:1006D000BA240549B1BCCAA7726C6BC5BDE83169C3 -:1006E000525D0612653EB1504C7D4FAC0A300B3660 -:1006F0006411738A838E75129F7BD29958EE822E75 -:1007000077A0E39D5D4FBC2A532953DE9324BAB3EF -:1007100036AA4AC679D4B9DE625A11735050BF372F -:10072000366F1323BA0C24CEBDE2A752B28E6B6093 -:10073000622E751330ACA059CA646379B333651C5B -:10074000CC32045CEA2CA059DF231BD4835247DD52 -:100750007996D49EB3524BA25A1A8D5D7B82A752D2 -:10076000B28E6619022E7516502C8C321D7B8EA708 -:1007700052B1BCD9999804DA7CE2ACFE6619022E1B -:100780006550BF336664FE7418864C1726D6165221 -:100790003918DE7ACCC23E651491F36649086E833F -:1007A0000933AF31ED0D9D0612622A318D6DE7419F -:1007B000827CCAA68987092E29B1AF1039D66497E1 -:1007C000301D42759344028C24D27AB350F68905C9 -:1007D000435E6198C02C92253C8B2489490549E7EA -:1007E0000CB98498B7AD3344AE5A5186609F38A98E -:1007F000A26C6BC48EF45E49461262DEB4CD215CFD -:10080000B4A30CC13E7229A26C6BC6126247F0E819 -:10081000C33204354092A4828810927CCBD42FA49A -:1008200002118498B7AD3344AE5A5186609F38A9FF -:10083000A26C6BC48EF45E494408493E65EA17D247 -:100840000108C24C5BD699A42B9694619827CE459B -:10085000344D8D78810927CCBD12286C58AFB6F382 -:10086000A0C13E655344D8D7928E7D4BC2FA612613 -:10087000063AB36B030549E70CB96F5A66955CB449 -:10088000A30CC13E7029A26EA4DF9371137726E1F9 -:1008900026F826C6BC9473F92F0BE9849818EACC85 -:1008A000EC0C15279C32FF3D56AF928B7AD335D591 -:1008B000CB4A30CC13E7029A26C6BC947341979179 -:1008C000F483CE0420628B0516498C24C0C7569051 -:1008D000C0C15279C32E5BD5A672D294FAAD58C866 -:1008E000FA9F54B3324BB954A651866B79D0609FAE -:1008F0003205344D8D7A4D1E7AB35100A93D59A869 -:100900007B4482A1AF4A8D52A95241494F3A2E40B1 -:10091000A49950BE90085279C32E61262DEB4CD07D -:1009200015CB4A30CC13E7029A26C6BC48FE1D25DB -:1009300046A954A920A4A79D1720524CA85F48049B -:100940002309316F5A6680AE5A5186609F3814D1A0 -:100950003635E4A79D1720524CA2450D8B15F49116 -:10096000DE8BC928C24C5BD699A95CB4A30CD6F324 -:10097000A0C13E640A689B1AF16D4CAA92E03694BD -:10098000709B297813AEB3AA85D44375093AC9EB95 -:100990003524B81B328E13487E4EFD40FD40FD408D -:1009A000FD40FD40FC13F421F917458A300B335FFD -:1009B00083A22A300B335F83A2A8C02DB32070928C -:1009C000139ADE741827CCAA689B1AF70745518042 -:1009D0005B66470738A823E751113FE0E8854601E9 -:1009E0006D990612654F7A2024BAB33215257BAD76 -:1009F0003378AE0E73D047CEA730CC44FF83A2A885 -:100A0000C02CD991C1D11518059B3208BA2C518040 -:100A100059B3207092E29889FDBCEE1890FC8BA22D -:100A2000C52B0D783A22A561AF074551805B66441E -:100A30009EB3524B83ADC709BE1F9F74655D0A17F5 -:100A40007CABA0C24C3849122E384907A30CC13EDA -:100A5000655344D8D7ADE700324B9B33344A03008B -:100A60009D25CE8324B819998C02124BA199D8C028 -:100A7000274973CFF93CF47CE79804E92E7F39E3EA -:100A80004F4653C06013A4B9E53C03DE8F9CF300CE -:100A90009C6FCF3E85F9A336021E6038923E631AE2 -:100AA000109FCF181092BCD0A40CDCC00F9C9734C0 -:100AB00062B6E7F3F3A5CF1842341CC2CAFA8E68B7 -:100AC0005206AF3CA30DBF9E50E1D173CAE03AFC81 -:100AD000C1091A1E6A5C5B8E634E7773CC6167DD59 -:100AE000E66C48D1F31B24695108D4421BF467D14A -:100AF000804E2FD08CD83009C21E801C46013A4748 -:100B0000D031A106013A7F4630211804E95E8429DC -:100B100000C027CDD0007C9804F92E84628C027D21 -:100B2000BA3E7E4C027D2E8C61083009F41D0165B1 -:100B300073009F51D085201804FABD194618C027AC -:100B4000DFD194384C027D174657013009F5FA0180 -:100B50000906013E87A14B88C027DC740D39D300FC -:100B60009F73D030B39804FBBD06C483009F47D069 -:100B70003648CC0271BF3F9A17E63F0821E692A49F -:100B80008F9A1031A7F310B184AF3AACDCF773F24F -:100B90005CC62ADB9E7E7E97310863D0737B43A8B8 -:100BA000E63D34EAF3E315BF9F185F45CFE89F5F4A -:100BB0009A5B03D0F3D3CE371CD00FBB9E68783B33 -:100BC000BCCA31E8F9A20212A27351086FD1F346F0 -:100BD0000138BF40FC23009C21E84951804E91F42C -:100BE000210319804E9FD0216306013A568C02746E -:100BF000FE75495E63D34A54423513A7D1804E95A2 -:100C0000E81E9A4C027CDD1BB9E6013E4BA062A3B4 -:100C1000009F6E8CFCF3009F4BA04218CC027D0716 -:100C200043DA13009F51D03D349804FABD1C628C06 -:100C3000027DFD1C6173009F45D1F44E6013EBF4FF -:100C400025B033009F43D1A79C1804FB8E8403E991 -:100C5000804FB9E843C13009F77A0A319804FA3E67 -:100C6000844041804E82E7418709230423009D058B -:100C7000CE961C248C108C0274173A043849182123 -:100C80001804E82E7450E12460846013A0B9D411D4 -:100C9000C248C108C0274173A82384918211804EA5 -:100CA00082E7528E12460846013A0B9D401C248C66 -:100CB000108C0274173A090E12460846013A0B9836 -:100CC0006A1C24B0E11804E82E6B50E1258708C0A7 -:100CD000274173054384961C23009D05CCAA1C2440 -:100CE000B0E11804E82E70687092C3846013E54484 -:100CF000F9409D05CE5A1C24B0E11804F9D13E708C -:100D000027CF13E5442CA042CB89F2213A0B9C0A51 -:100D10001C24B0E11804F9D10B3810B3C4213936C2 -:100D20005C42C8842B79D061C2741524BAD331E5F2 -:100D300059082908E066634295128100290BC151C8 -:100D400024B81999902290B418A0914101414141D1 -:100D50005283CA4028682908BA16109C990B5694E9 -:100D600090521574C0271A2AD29025D3009D28AB23 -:100D70004A42174C0270D4842E9804E12A42174C40 -:100D8000027082904BA60138514842E9804E15A46A -:100D90002174C0270FA412E9804E82AC80ACA0ACB5 -:100DA000A959E5644565CAC84ACE0ACE4ACE95918E -:100DB000959495932925C0CCCC88A4975636647217 -:100DC00090548A9C4508B9B766129309C9B2748ECB -:100DD000BA6013E5348EBA6013E4748EBA6013E51A -:100DE000691D74C027CA291D74C027CED225D3001F -:100DF0009F38A44BA6013E5E912E9804F915225D02 -:100E00003009F3E912E9804F905225D3009DC5487F -:100E100025D3009C45CECD09C9B21A44BA6013E768 -:100E2000348974C0271C27B79C80C2D776599B93FE -:100E30000C64C31D1BF4454BC7C63A37E8814BC74A -:100E4000C63A37E8914BC7C632618EB3BCC34A225B -:100E5000E6B5249771C987B431AE73A2CF39D25D9C -:100E6000044442C0D6DE710616BBDBCE830C64C3DD -:100E70001D311304F9954D133293635E6614CC292A -:100E80002A5330A6614CC299853A72CCC299850624 -:100E90001BB30A661414249985330A08B186614C81 -:100EA000C2842168733B30A661414EA5985330AC93 -:100EB0005976614CC2B08DD6614CC2B02CF6614CF3 -:100EC000C2B18CA5985330AC0F24CC2998560F286A -:100ED0006615921A1985330ACA850CC2998565C3AD -:100EE000D985330ACE7086614CC2B397710C993B99 -:100EF000CC83580BEA779D064ABE047460E0D14E5D -:100F0000384C3EEE3EEE3EEE3EEE30BBCAE11F7781 -:100F10001F771F771F7727708FBB800E11F771F730 -:100F20007C6F3CB33602FB8DE655707F2D246955EE -:100F30004F58A9231F54F78A95252B750CCCAC5616 -:100F400051CC51E445CEA21239C0A0AF566A497FB8 -:100F5000028C09F80BEBAF56766752B28E69A71177 -:100F600073A8B1BCCAA0A936502C98E70AF566A4AC -:100F700097E25A3027BAF7834EA5330A66158DE6F5 -:100F80005539D2A7AC546016701B728E628F222E18 -:100F9000751602FB8DE60A953D62A300B701B553B5 -:100FA000DE2A5494ADD43332B15947314791173AC0 -:100FB0008848E702B017DC679D4B8DE752AA7BD4C7 -:100FC000AA92BDD699BC5602FB8CF36666C6F36640 -:100FD0006662992AF8186870B08A0D5555555552B1 -:100FE00032E1405C380BEA9B87017DC05F7017DC03 -:100FF00005F5DC9B017D614D80BEA77982A21F5063 -:10100000152A8F8B1CE5A5138458E702915405021D -:101010004BBD221A947F9C1AC05F421A21D180597D -:10102000C06D1C2C0A83555555555555555555556C -:1010300055541CB85C6E179C2F385E70E7B85E7014 -:10104000BCE179C2F385E70BCE179C299C299C292A -:101050009C230F5814EE357726219305C9B017D27B -:101060001D188A219305C9B017D187AC0A740FAE39 -:10107000F55A82A3E43A3114BBD7599974A21930B6 -:101080005C9B017D187AC0A740F843D4638925D0C2 -:1010900010D61C6A10F5558925D151661F51F5915E -:1010A000492E8915986AA3E08A9465640E1317384F -:1010B000A8864C1726C05F461EB028631F087A8C8E -:1010C0007124BA021AD00D421EAAB124BA2A2D31B7 -:1010D000F51F587492E8875A6352DEF451694A3E0C -:1010E00009694650F0E131730545BD598D8B4A7C45 -:1010F000D3ED38E9D34E74ED443260B93602FA5B71 -:10110000DE8A2D29D0E121F5A39221F219305C9BD2 -:10111000017D21F5A0C6016701B445CEA51239D4E1 -:101120001C05F440A1C2C3506AAAAAAAAAAAAAAAE4 -:10113000AAAAAAAAAAAA81AF869F191BE781F3656A -:10114000F280BE7017DFDF380BEB0DC380BEA70F38 -:10115000954F5A94C02CD8B1A7CE5A1173A83AC251 -:10116000CCB63017DC6F35A9804DA7CE2A1879C5CB -:1011700049DE61A822E75033F9986408B99542FC2A -:10118000CCD9953D62A248D448E70288B9C1A0E312 -:101190009D4E62E6CCC66BCE8310C982E4DAC2C82B -:1011A0001EC3B93602FAA9EB4E3030FA0DF0A9EBA6 -:1011B00040B90FAA7AD2C2C8FAA7AD410A47D53DB5 -:1011C00068ACF1F54F5A97547D4FA8AA551F11737B -:1011D0005AB017DE5D59A925D0552A46BCB822AEB3 -:1011E00045293E14FAE19994CA4ABE3DD699925DCA -:1011F0001517C8D7DC15178A401F0A9EACC9654968 -:101200005C1D10684A3E5BDE83169580BE91745863 -:10121000A4007C38E7563017DF75A6649745209DFB -:10122000035F70545E291DF0A9EACC865495C1D1A4 -:1012300006830FAA7BD0654945BDE962D291DF04E0 -:101240005D16291C7D4FAC1A471AA9F5676653280D -:10125000B7BD2C5A523BE3DD59A925D1A8AC086B88 -:10126000EE08ABC5202F854F566675495C1C181DCE -:1012700081C26405F080BE355CD017C255F0957C04 -:10128000255F080BE1017C7BAB3524BA1055931A1E -:10129000FB822AF148D7C2A7AB31B2A4AC639D4A06 -:1012A0008D7C7BAB3524BA1054308D7DC11578AC64 -:1012B0006F5A94601AE379D4AA4F854F5666D54980 -:1012C00058C73A9549F045D1629486BC1D13D29017 -:1012D000FFCF7A83F25031DE006060A11735A85F3E -:1012E0009B1B3707441A300B380DBC1CE0D047CE8F -:1012F000A0AA7AA1986A92953D6831805B80DAA9AC -:10130000EF41952516F7A58B4AC679B333602FAA0E -:101310009EB15180599ECAA7AC0A300B67B2ADD5B9 -:10132000DA925D17A300B32D956E08A958A1173A5C -:101330008B017D54F78E9525081CE05602FBC1D128 -:10134000151805926B3C1D1228C02CA56C11701746 -:10135000B2384D80BEE02FB4EC4AEDB39E02FB8064 -:10136000BEE02FB139933E6DE710609F32A9A26CA9 -:1013700005F440E60A953D6A2300B380DAA7D62A31 -:10138000030D7017D22E76294FBC54A6516F7A5890 -:10139000B4AC05F48BA2F40E350D492EB4CC18A5CF -:1013A000C8F84A9723E1052E47C28A5C8F85697287 -:1013B0003E1F4AC3551F5643328CA35E60A845CEDC -:1013C0000D602FA3849DD8F017D22E0E1B2384D836 -:1013D0000BEB89F380BEE02FBB3985DF2203E701E9 -:1013E0007DC05F7017D11738145BD6A2740D4B7A8D -:1013F000B33196946BCC3523D749481573290F5DCB -:101400008AC05F4D79843580BE881CC3529F59685D -:10141000C02CE036AA7BCD4A92BEF3814A7D5B594F -:1014200094CA1C24EEC780BE881CC3529F5968C052 -:101430002CE036AA7BCD4A92BEF38143849C7B3854 -:101440000BEBAF70D4EA53009B4F9C5430F38A945B -:10145000FAB6B3299422E61A85F9B05993F9D2C4A1 -:101460003260B936B0B390D977261C2722E896B4FB -:1014700023EA9EB511805965862073968D79AD5803 -:101480000BE917448A4A07D77A82A190FAEF0154F0 -:10149000BA50D4591E2CE9F38A99856B0B23159702 -:1014A00072611730D42C738748AA028125DE910D12 -:1014B0004AC05F7ED280A53EB2D0C86B80BE881C79 -:1014C000EA0917441A371A917458A371AF074454A4 -:1014D0006E35E0E8AA640F90FAD06300B380DA2C8E -:1014E000738748AA028125DE910D4AC05F48BA275A -:1014F000A300B701B74F9CB46BCC3516F566632DCE -:10150000291EBA4A40AB99487AEC508B9C0822FCC1 -:10151000F9B2553D62A92351239C0A3C730D445CEA -:10152000E15071CEA11FE7156B0B25ED0B93602FDA -:10153000AA9EAC3665495F7A2050087FEF3914497E -:10154000011181046040CC59C0AD23EB41B081F260 -:101550003A41AA5043E4D48654A087C152CA9301A9 -:1015600032549D2402000052AF1646A7916708B47A -:101570000451F16519B46E2DC0AD490092571B742A -:10158000455F2351B7440A1006A36E8B6B081F19E1 -:10159000D1E680828054042A4591A9E459C22D01E4 -:1015A000140450D3FC558461D980512FE21F465F4B -:1015B00040E020154ABC591A9E459C22D01148CBC8 -:1015C000E81408015415E2C8D4F22CE116808A46CA -:1015D0005F527CD9A8F888D05A3CD25C5B80DAA7ED -:1015E000D65A0886A45D17A0C3522E88A8221F537E -:1015F000EADACCA650E127763C05F54FAB6B329981 -:1016000043849C7B380BE927ACD492E00EDA384D4A -:1016100080BEE67D50BA51AE66EFBCDC7B871E0211 -:10162000FA93E6CD47C443CD0F349DA300B05501D6 -:10163000AE038404CE01D0E17002800E89E9221F3E -:10164000E0E896B011F4C2CE036A442DC06D48059F -:10165000B80DA300B776D5DEB150DC7D77BC54BAA7 -:10166000527F5814340F9AF381580BEAEF581460E4 -:1016700016A56C2EF7814BA56F7D5DEEB52E95807E -:10168000BEF073BD047CEAFEEB4CDE2953DD6A54E8 -:1016900094A9EA0A8C02D64C3C05F400EACD56AF78 -:1016A000C047D29C8D29CAE02FAEBD75999D4AF9DD -:1016B000EF517C940C77801818292AF8E0E8AA30BA -:1016C0000B2A987C1D1151805954C351F51B3324AA -:1016D000BB82A5195C1D1028C02C9AC7C1D1228CD1 -:1016E00002C994645C0CD68E13602FB80BEA30E309 -:1016F000C05F48DC780BE800E3C05F6C38D52E355E -:101700004F5A8A61AA9F561B32994642C8010C451E -:10171000CEA517E6C6CEA9EB151646A24738144348 -:101720002622E73D602FAA9EB512E07F017DE3E708 -:101730000293F995445CE5A0E39D4A7F9C54A9EB94 -:10174000510546B9FCC01B222E64542FCD46CCA7B0 -:10175000D586CCA65055C645CE5A0E39D4A7F9C564 -:101760004A9EB5118059C06DCFE600D9117322A1F0 -:101770007E6A36653EAC366532B017DD3E72D27990 -:10178000310C982E4C20732A8FF38AADE741827C6E -:10179000CAA689B5859FB0F017D51F5454251AA83D -:1017A000FF2A946511D74944D5CCA055D8AE0E88F0 -:1017B0001460164D6322E07286384D80BEE02FB86B -:1017C0000BEE02FB8138F017D7D71E02FAFAE3C0FE -:1017D0005F4C85900218C85B80DA300B701B4C227E -:1017E000D34C33038C2E4C4326D0F56366D095A79B -:1017F000CE45330AD61642386EE4CEBD592CD2AB54 -:10180000BA949DE61AB017D54F5A8B091A88B9C5F4 -:10181000424730D43216728865BD599925A5602F8C -:10182000B860F308B74A1A8FAB0D994651AF38A884 -:101830008E9065135218A054B1422E61A848E72D2E -:1018400016F7A805A5602FA475D251357328157613 -:101850002B83A20518059358C8B806286384D80BB3 -:10186000EE02FB80BEE02FA043A7017D4CE3C05FEA -:101870007017DC05F4642DC06D1805B80DA5BD6AA0 -:101880002386AA9EB511A46AA3EA8A8D23E117389C -:101890003469719845A6986A3EAC36651946BCE233 -:1018A000A23A41944D48628152C516F7A88B4A541A -:1018B000F5A88C02DC06D1039CB4A9EE0A95252A72 -:1018C0007AAD46016701B5D7AC0A300B6C4935E6F5 -:1018D000B567F3006C88B99150BF311B32A7B86867 -:1018E00095257BAD3378A7CD3ED38E9D34E74ED47E -:1018F00022E706848E60A8FF38AB839C2A08F9D4BF -:101900002063BC1A060AC05F4642DC06D1805B80B9 -:10191000DA22E61A848E72D16F5A80871AAA7AD494 -:1019200048C8D547D5152323E11738348CBA4B7BEB -:10193000D402D28C22DC06D51F561B328CA35E71DA -:10194000511D20CA26A43140A962B017DF9EF4B70A -:10195000C940C778018182B83839491C26C05F70F8 -:1019600017D4ABE12AF84ABE12AF8F974FCBA7012D -:101970007DDA80AA91647F4A81D522C8FE828025C3 -:1019800048B23EBBDC352E9407E88A9C03E24BA5A7 -:1019900077ABB332E94BBD598684977A04BA53E1E9 -:1019A00032EF50D4E63553EB029CC7D77AB330D22E -:1019B0005DEA02E9445D1628C02CE0369174455971 -:1019C00018D54FAC0AC435308B38692BBD5998698E -:1019D0002EF512E958674AEF50D58E3E1CA4B0CEC2 -:1019E00093216E1A481FA22AC30D577AB30D092EF0 -:1019F000F4435D288B832092384D80BEE02FAC17D6 -:101A000049B3A582E93EE93674E02FA6CE9C05F4E1 -:101A1000C22C8C52577AD48D48FAEF50D5AE35533C -:101A2000EB028621AAEF56661A4BBD44BA50C4E9B0 -:101A300053EB028681F5DEA1A8621F5DFEA25D293F -:101A400077A86A618D40FD11530C6AA7D60530C78F -:101A5000D77FA9574A5DEB481B0C7C8B9D8A53EFBF -:101A60006694CA54F5A0C6016E036A9F5676653225 -:101A70008B7BD2C5A5602FAA7D65A300B701B4C832 -:101A80005A078FED01D527916701B48B9C541C73C5 -:101A9000A8845CC150BF365660AB8C8B9C541C73C1 -:101AA000A8845CC150BF36566C05F553D6A2300BE6 -:101AB000295B19FCF69445CF150BF33CB32A7AC584 -:101AC0004601648A31239C0A5DEA34332E95C7CEE1 -:101AD0002A4FE65020B9310C9BEF391445CE45070B -:101AE0001CEA4687AB1B3684A75EAC966752B017DC -:101AF000DCFE7B4A22E78A85F99E59977A8D0CCBCA -:101B0000A527F3A0443260B937DE72288B9C8A0E79 -:101B100039D48C05F7E7B82AF92818EF0030305788 -:101B200007440A508FF07391411F3A9045C0BB188B -:101B3000E13602FBFB9E02FAEEE7F5CF017D105C79 -:101B4000F017D105CF017D53EB2D1805B80DA64236 -:101B5000DC06D31735A88B9C0A0E39D40CFE7B4AC1 -:101B600022E6550BF331B3602FBC7CE2A4FE655135 -:101B70001738141C73A819FCF69445CCAA17E66311 -:101B8000660AB8CC85A158F6A23548487F4A89959F -:101B90002121FD0502549E45910E3C05F507405557 -:101BA00048523E86A07548523EB5004A9C006BC71D -:101BB000CE4527F32A843735DEA0AB231AAEF58352 -:101BC0005918D743DE2AD094EBDE053A959FCCC353 -:101BD0002045CCAA17E666CC43264FE741222E705B -:101BE0006838E753E02FABBC12D2E9580BEAA7AD37 -:101BF00045A11FC05F7839C8A08F9D481C24EEC73F -:101C000080BEBAF56D6649770D4EA53009B4F9C5A9 -:101C1000430F38A93F9D02FBCE4511739141C73A4E -:101C2000919FCF69445CF150BF33CB32A7AC549045 -:101C30008D448E702977A8D0CCBA56B0B29D8C86D0 -:101C40004C172677261C271C249E2361BE8E124F1C -:101C500011871CEA5C05F5D7B86A752977AB0D9931 -:101C600074A54F72A0AA4AC6F36666C63982AF75DC -:101C7000A66F146BCE05707396823E7528E13AA765 -:101C8000AD44601652B61D7AB6B324BB86A75298EF -:101C900004DA7CE2A1879C55F79CB5AC2C9533B94E -:101CA0003105D953D6A2300B295B022E615A17E6B3 -:101CB0009CB32A7AC54021A891CE0527F3A5886454 -:101CC000C172654F58140C8D7EF381445CEF41C79F -:101CD0003ABE02FAA9EACECCA92953D6A24647DDDC -:101CE0007AC0A30086E29B29788B810998709B2992 -:101CF000795DD972ED94BCB976133B2A5DB29795A4 -:101D00002ED94BCA7D5B5994CA1C24EEC794BCC023 -:101D100026D3E7150C3CE2ACFE7B4A22E78A85F924 -:101D20009E59977A8D0CCBA527F3A0417262193783 -:101D3000DE70288B9C8A0E39D48D0F56366D094E75 -:101D4000BD592CCEA56B0B22D99DC9B297BEF3818C -:101D50004A7D65A300938F672978C24DC1D1068261 -:101D600031AF07383411F3A82A9EA8661AA4A54FEC -:101D70005A0C118FAA7BD0654945BDE962D2B19E4C -:101D80006CCCC6198709C38E75411F3AA513D5556A -:101D900055555555555555555555555555555555F3 -:101DA00055555555555555555555555555555555E3 -:0E1DB00055555555555555555555555ACC90C8 -:00000001FF diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 044e7b58d31..1bfe8802cc1 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -2005,7 +2005,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) o2net_listen_sock = sock; INIT_WORK(&o2net_listen_work, o2net_accept_many); - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); if (ret < 0) { printk(KERN_ERR "o2net: Error %d while binding socket at " diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 3c9b616c834..b5d568fa19e 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -167,7 +167,6 @@ header-y += if_arp.h header-y += if_bonding.h header-y += if_bridge.h header-y += if_cablemodem.h -header-y += if_ec.h header-y += if_eql.h header-y += if_ether.h header-y += if_fc.h @@ -186,7 +185,6 @@ header-y += if_pppox.h header-y += if_slip.h header-y += if_strip.h header-y += if_team.h -header-y += if_tr.h header-y += if_tun.h header-y += if_tunnel.h header-y += if_vlan.h diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h index 39c917fd1b9..302791e3ab2 100644 --- a/include/linux/atmlec.h +++ b/include/linux/atmlec.h @@ -21,13 +21,6 @@ /* Maximum number of LEC interfaces (tweakable) */ #define MAX_LEC_ITF 48 -/* - * From the total of MAX_LEC_ITF, last NUM_TR_DEVS are reserved for Token Ring. - * E.g. if MAX_LEC_ITF = 48 and NUM_TR_DEVS = 8, then lec0-lec39 are for - * Ethernet ELANs and lec40-lec47 are for Token Ring ELANS. - */ -#define NUM_TR_DEVS 8 - typedef enum { l_set_mac_addr, l_del_mac_addr, diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index 65a2562f66b..6bb43382f3f 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h @@ -67,6 +67,17 @@ struct ieee_ets { __u8 reco_prio_tc[IEEE_8021QAZ_MAX_TCS]; }; +/* This structure contains rate limit extension to the IEEE 802.1Qaz ETS + * managed object. + * Values are 64 bits long and specified in Kbps to enable usage over both + * slow and very fast networks. + * + * @tc_maxrate: maximal tc tx bandwidth indexed by traffic class + */ +struct ieee_maxrate { + __u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS]; +}; + /* This structure contains the IEEE 802.1Qaz PFC managed object * * @pfc_cap: Indicates the number of traffic classes on the local device @@ -321,6 +332,7 @@ enum ieee_attrs { DCB_ATTR_IEEE_PEER_ETS, DCB_ATTR_IEEE_PEER_PFC, DCB_ATTR_IEEE_PEER_APP, + DCB_ATTR_IEEE_MAXRATE, __DCB_ATTR_IEEE_MAX }; #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) diff --git a/include/linux/dccp.h b/include/linux/dccp.h index eaf95a023af..d16294e2a11 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -549,6 +549,8 @@ static inline const char *dccp_role(const struct sock *sk) return NULL; } +extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req); + #endif /* __KERNEL__ */ #endif /* _LINUX_DCCP_H */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index fe5136d8145..3d406e0ede6 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -18,8 +18,6 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * WARNING: This move may well be temporary. This file will get merged with others RSN. - * */ #ifndef _LINUX_ETHERDEVICE_H #define _LINUX_ETHERDEVICE_H @@ -59,7 +57,7 @@ extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, * * Return true if the address is all zeroes. */ -static inline int is_zero_ether_addr(const u8 *addr) +static inline bool is_zero_ether_addr(const u8 *addr) { return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); } @@ -71,7 +69,7 @@ static inline int is_zero_ether_addr(const u8 *addr) * Return true if the address is a multicast address. * By definition the broadcast address is also a multicast address. */ -static inline int is_multicast_ether_addr(const u8 *addr) +static inline bool is_multicast_ether_addr(const u8 *addr) { return 0x01 & addr[0]; } @@ -82,7 +80,7 @@ static inline int is_multicast_ether_addr(const u8 *addr) * * Return true if the address is a local address. */ -static inline int is_local_ether_addr(const u8 *addr) +static inline bool is_local_ether_addr(const u8 *addr) { return 0x02 & addr[0]; } @@ -93,7 +91,7 @@ static inline int is_local_ether_addr(const u8 *addr) * * Return true if the address is the broadcast address. */ -static inline int is_broadcast_ether_addr(const u8 *addr) +static inline bool is_broadcast_ether_addr(const u8 *addr) { return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff; } @@ -104,7 +102,7 @@ static inline int is_broadcast_ether_addr(const u8 *addr) * * Return true if the address is a unicast address. */ -static inline int is_unicast_ether_addr(const u8 *addr) +static inline bool is_unicast_ether_addr(const u8 *addr) { return !is_multicast_ether_addr(addr); } @@ -118,7 +116,7 @@ static inline int is_unicast_ether_addr(const u8 *addr) * * Return true if the address is valid. */ -static inline int is_valid_ether_addr(const u8 *addr) +static inline bool is_valid_ether_addr(const u8 *addr) { /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to * explicitly check for it here. */ @@ -159,7 +157,7 @@ static inline void eth_hw_addr_random(struct net_device *dev) * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * - * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. + * Compare two Ethernet addresses, returns 0 if equal, non-zero otherwise. * Unlike memcmp(), it doesn't return a value suitable for sorting. */ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) @@ -171,6 +169,18 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; } +/** + * ether_addr_equal - Compare two Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two Ethernet addresses, returns true if equal + */ +static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} + static inline unsigned long zap_last_2bytes(unsigned long value) { #ifdef __BIG_ENDIAN @@ -181,34 +191,34 @@ static inline unsigned long zap_last_2bytes(unsigned long value) } /** - * compare_ether_addr_64bits - Compare two Ethernet addresses + * ether_addr_equal_64bits - Compare two Ethernet addresses * @addr1: Pointer to an array of 8 bytes * @addr2: Pointer to an other array of 8 bytes * - * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. - * Unlike memcmp(), it doesn't return a value suitable for sorting. + * Compare two Ethernet addresses, returns true if equal, false otherwise. + * * The function doesn't need any conditional branches and possibly uses * word memory accesses on CPU allowing cheap unaligned memory reads. - * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} + * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } * - * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ -static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], - const u8 addr2[6+2]) +static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], + const u8 addr2[6+2]) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long fold = ((*(unsigned long *)addr1) ^ (*(unsigned long *)addr2)); if (sizeof(fold) == 8) - return zap_last_2bytes(fold) != 0; + return zap_last_2bytes(fold) == 0; fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ (*(unsigned long *)(addr2 + 4))); - return fold != 0; + return fold == 0; #else - return compare_ether_addr(addr1, addr2); + return ether_addr_equal(addr1, addr2); #endif } @@ -220,23 +230,23 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], * Compare passed address with all addresses of the device. Return true if the * address if one of the device addresses. * - * Note that this function calls compare_ether_addr_64bits() so take care of + * Note that this function calls ether_addr_equal_64bits() so take care of * the right padding. */ static inline bool is_etherdev_addr(const struct net_device *dev, const u8 addr[6 + 2]) { struct netdev_hw_addr *ha; - int res = 1; + bool res = false; rcu_read_lock(); for_each_dev_addr(dev, ha) { - res = compare_ether_addr_64bits(addr, ha->addr); - if (!res) + res = ether_addr_equal_64bits(addr, ha->addr); + if (res) break; } rcu_read_unlock(); - return !res; + return res; } #endif /* __KERNEL__ */ @@ -245,7 +255,7 @@ static inline bool is_etherdev_addr(const struct net_device *dev, * @a: Pointer to Ethernet header * @b: Pointer to Ethernet header * - * Compare two ethernet headers, returns 0 if equal. + * Compare two Ethernet headers, returns 0 if equal. * This assumes that the network header (i.e., IP header) is 4-byte * aligned OR the platform can handle unaligned access. This is the * case for all packets coming into netif_receive_skb or similar diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index f5647b59a90..e17fa714058 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -137,6 +137,23 @@ struct ethtool_eeprom { }; /** + * struct ethtool_modinfo - plugin module eeprom information + * @cmd: %ETHTOOL_GMODULEINFO + * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx + * @eeprom_len: Length of the eeprom + * + * This structure is used to return the information to + * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM. + * The type code indicates the eeprom data format + */ +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +/** * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates * @cmd: ETHTOOL_{G,S}COALESCE * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after @@ -661,12 +678,17 @@ struct ethtool_flash { * %ETHTOOL_SET_DUMP * @version: FW version of the dump, filled in by driver * @flag: driver dependent flag for dump setting, filled in by driver during - * get and filled in by ethtool for set operation + * get and filled in by ethtool for set operation. + * flag must be initialized by macro ETH_FW_DUMP_DISABLE value when + * firmware dump is disabled. * @len: length of dump data, used as the length of the user buffer on entry to * %ETHTOOL_GET_DUMP_DATA and this is returned as dump length by driver * for %ETHTOOL_GET_DUMP_FLAG command * @data: data collected for get dump data operation */ + +#define ETH_FW_DUMP_DISABLE 0 + struct ethtool_dump { __u32 cmd; __u32 version; @@ -726,6 +748,29 @@ struct ethtool_sfeatures { struct ethtool_set_features_block features[0]; }; +/** + * struct ethtool_ts_info - holds a device's timestamping and PHC association + * @cmd: command number = %ETHTOOL_GET_TS_INFO + * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags + * @phc_index: device index of the associated PHC, or -1 if there is none + * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values + * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values + * + * The bits in the 'tx_types' and 'rx_filters' fields correspond to + * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values, + * respectively. For example, if the device supports HWTSTAMP_TX_ON, + * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set. + */ +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + /* * %ETHTOOL_SFEATURES changes features present in features[].valid to the * values of corresponding bits in features[].requested. Bits in .requested @@ -788,6 +833,7 @@ struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); +int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection @@ -893,6 +939,12 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * and flag of the device. * @get_dump_data: Get dump data. * @set_dump: Set dump specific flags to the device. + * @get_ts_info: Get the time stamping and PTP hardware clock capabilities. + * Drivers supporting transmit time stamps in software should set this to + * ethtool_op_get_ts_info(). + * @get_module_info: Get the size and type of the eeprom contained within + * a plug-in module. + * @get_module_eeprom: Get the eeprom information from the plug-in module * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -954,6 +1006,12 @@ struct ethtool_ops { int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, + struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + }; #endif /* __KERNEL__ */ @@ -1028,6 +1086,9 @@ struct ethtool_ops { #define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */ #define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */ #define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */ +#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ +#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */ +#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET @@ -1177,6 +1238,12 @@ struct ethtool_ops { #define RX_CLS_LOC_FIRST 0xfffffffe #define RX_CLS_LOC_LAST 0xfffffffd +/* EEPROM Standards for plug in modules */ +#define ETH_MODULE_SFF_8079 0x1 +#define ETH_MODULE_SFF_8079_LEN 256 +#define ETH_MODULE_SFF_8472 0x2 +#define ETH_MODULE_SFF_8472_LEN 512 + /* Reset flags */ /* The reset() operation must clear the flags for the components which * were actually reset. On successful return, the flags indicate the diff --git a/include/linux/filter.h b/include/linux/filter.h index 8eeb205f298..72090994d78 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -126,7 +126,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define SKF_AD_HATYPE 28 #define SKF_AD_RXHASH 32 #define SKF_AD_CPU 36 -#define SKF_AD_MAX 40 +#define SKF_AD_ALU_XOR_X 40 +#define SKF_AD_MAX 44 #define SKF_NET_OFF (-0x100000) #define SKF_LL_OFF (-0x200000) @@ -153,6 +154,9 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp) extern int sk_filter(struct sock *sk, struct sk_buff *skb); extern unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *filter); +extern int sk_unattached_filter_create(struct sk_filter **pfp, + struct sock_fprog *fprog); +extern void sk_unattached_filter_destroy(struct sk_filter *fp); extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); extern int sk_detach_filter(struct sock *sk); extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); @@ -228,6 +232,7 @@ enum { BPF_S_ANC_HATYPE, BPF_S_ANC_RXHASH, BPF_S_ANC_CPU, + BPF_S_ANC_ALU_XOR_X, }; #endif /* __KERNEL__ */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 5852545e6bb..6af8738ae7e 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -274,6 +274,33 @@ struct hv_ring_buffer_debug_info { u32 bytes_avail_towrite; }; + +/* + * + * hv_get_ringbuffer_availbytes() + * + * Get number of bytes available to read and to write to + * for the specified ring buffer + */ +static inline void +hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, + u32 *read, u32 *write) +{ + u32 read_loc, write_loc, dsize; + + smp_read_barrier_depends(); + + /* Capture the read/write indices before they changed */ + read_loc = rbi->ring_buffer->read_index; + write_loc = rbi->ring_buffer->write_index; + dsize = rbi->ring_datasize; + + *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + *read = dsize - *write; +} + + /* * We use the same version numbering for all Hyper-V modules. * diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h deleted file mode 100644 index 06695b74d40..00000000000 --- a/include/linux/ibmtr.h +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef __LINUX_IBMTR_H__ -#define __LINUX_IBMTR_H__ - -/* Definitions for an IBM Token Ring card. */ -/* This file is distributed under the GNU GPL */ - -/* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */ - -#define TR_RETRY_INTERVAL (30*HZ) /* 500 on PC = 5 s */ -#define TR_RST_TIME (msecs_to_jiffies(50)) /* 5 on PC = 50 ms */ -#define TR_BUSY_INTERVAL (msecs_to_jiffies(200)) /* 5 on PC = 200 ms */ -#define TR_SPIN_INTERVAL (3*HZ) /* 3 seconds before init timeout */ - -#define TR_ISA 1 -#define TR_MCA 2 -#define TR_ISAPNP 3 -#define NOTOK 0 - -#define IBMTR_SHARED_RAM_SIZE 0x10000 -#define IBMTR_IO_EXTENT 4 -#define IBMTR_MAX_ADAPTERS 4 - -#define CHANNEL_ID 0X1F30 -#define AIP 0X1F00 -#define AIPADAPTYPE 0X1FA0 -#define AIPDATARATE 0X1FA2 -#define AIPEARLYTOKEN 0X1FA4 -#define AIPAVAILSHRAM 0X1FA6 -#define AIPSHRAMPAGE 0X1FA8 -#define AIP4MBDHB 0X1FAA -#define AIP16MBDHB 0X1FAC -#define AIPFID 0X1FBA - -#define ADAPTRESET 0x1 /* Control Adapter reset (add to base) */ -#define ADAPTRESETREL 0x2 /* Release Adapter from reset ( """) */ -#define ADAPTINTREL 0x3 /* Adapter interrupt release */ - -#define GLOBAL_INT_ENABLE 0x02f0 - -/* MMIO bits 0-4 select register */ -#define RRR_EVEN 0x00 /* Shared RAM relocation registers - even and odd */ -/* Used to set the starting address of shared RAM */ -/* Bits 1 through 7 of this register map to bits 13 through 19 of the shared - RAM address.*/ -/* ie: 0x02 sets RAM address to ...ato! issy su wazzoo !! GODZILLA!!! */ -#define RRR_ODD 0x01 -/* Bits 2 and 3 of this register can be read to determine shared RAM size */ -/* 00 for 8k, 01 for 16k, 10 for 32k, 11 for 64k */ -#define WRBR_EVEN 0x02 /* Write region base registers - even and odd */ -#define WRBR_ODD 0x03 -#define WWOR_EVEN 0x04 /* Write window open registers - even and odd */ -#define WWOR_ODD 0x05 -#define WWCR_EVEN 0x06 /* Write window close registers - even and odd */ -#define WWCR_ODD 0x07 - -/* Interrupt status registers - PC system - even and odd */ -#define ISRP_EVEN 0x08 - -#define TCR_INT 0x10 /* Bit 4 - Timer interrupt. The TVR_EVEN timer has - expired. */ -#define ERR_INT 0x08 /* Bit 3 - Error interrupt. The adapter has had an - internal error. */ -#define ACCESS_INT 0x04 /* Bit 2 - Access interrupt. You have attempted to - write to an invalid area of shared RAM - or an invalid register within the MMIO. */ -/* In addition, the following bits within ISRP_EVEN can be turned on or off */ -/* by you to control the interrupt processing: */ -#define INT_ENABLE 0x40 /* Bit 6 - Interrupt enable. If 0, no interrupts will - occur. If 1, interrupts will occur normally. - Normally set to 1. */ -/* Bit 0 - Primary or alternate adapter. Set to zero if this adapter is the - primary adapter, 1 if this adapter is the alternate adapter. */ - - -#define ISRP_ODD 0x09 - -#define ADAP_CHK_INT 0x40 /* Bit 6 - Adapter check. the adapter has - encountered a serious problem and has closed - itself. Whoa. */ -#define SRB_RESP_INT 0x20 /* Bit 5 - SRB response. The adapter has accepted - an SRB request and set the return code within - the SRB. */ -#define ASB_FREE_INT 0x10 /* Bit 4 - ASB free. The adapter has read the ASB - and this area can be safely reused. This interrupt - is only used if your application has set the ASB - free request bit in ISRA_ODD or if an error was - detected in your response. */ -#define ARB_CMD_INT 0x08 /* Bit 3 - ARB command. The adapter has given you a - command for action. The command is located in the - ARB area of shared memory. */ -#define SSB_RESP_INT 0x04 /* Bit 2 - SSB response. The adapter has posted a - response to your SRB (the response is located in - the SSB area of shared memory). */ -/* Bit 1 - Bridge frame forward complete. */ - - - -#define ISRA_EVEN 0x0A /*Interrupt status registers - adapter - even and odd */ -/* Bit 7 - Internal parity error (on adapter's internal bus) */ -/* Bit 6 - Timer interrupt pending */ -/* Bit 5 - Access interrupt (attempt by adapter to access illegal address) */ -/* Bit 4 - Adapter microcode problem (microcode dead-man timer expired) */ -/* Bit 3 - Adapter processor check status */ -/* Bit 2 - Reserved */ -/* Bit 1 - Adapter hardware interrupt mask (prevents internal interrupts) */ -/* Bit 0 - Adapter software interrupt mask (prevents internal software ints) */ - -#define ISRA_ODD 0x0B -#define CMD_IN_SRB 0x20 /* Bit 5 - Indicates that you have placed a new - command in the SRB and are ready for the adapter to - process the command. */ -#define RESP_IN_ASB 0x10 /* Bit 4 - Indicates that you have placed a response - (an ASB) in the shared RAM which is available for - the adapter's use. */ -/* Bit 3 - Indicates that you are ready to put an SRB in the shared RAM, but - that a previous command is still pending. The adapter will then - interrupt you when the previous command is completed */ -/* Bit 2 - Indicates that you are ready to put an ASB in the shared RAM, but - that a previous ASB is still pending. The adapter will then interrupt - you when the previous ASB is copied. */ -#define ARB_FREE 0x2 -#define SSB_FREE 0x1 - -#define TCR_EVEN 0x0C /* Timer control registers - even and odd */ -#define TCR_ODD 0x0D -#define TVR_EVEN 0x0E /* Timer value registers - even and odd */ -#define TVR_ODD 0x0F -#define SRPR_EVEN 0x18 /* Shared RAM paging registers - even and odd */ -#define SRPR_ENABLE_PAGING 0xc0 -#define SRPR_ODD 0x19 /* Not used. */ -#define TOKREAD 0x60 -#define TOKOR 0x40 -#define TOKAND 0x20 -#define TOKWRITE 0x00 - -/* MMIO bits 5-6 select operation */ -/* 00 is used to write to a register */ -/* 01 is used to bitwise AND a byte with a register */ -/* 10 is used to bitwise OR a byte with a register */ -/* 11 is used to read from a register */ - -/* MMIO bits 7-8 select area of interest.. see below */ -/* 00 selects attachment control area. */ -/* 01 is reserved. */ -/* 10 selects adapter identification area A containing the adapter encoded - address. */ -/* 11 selects the adapter identification area B containing test patterns. */ - -#define PCCHANNELID 5049434F3631313039393020 -#define MCCHANNELID 4D4152533633583435313820 - -#define ACA_OFFSET 0x1e00 -#define ACA_SET 0x40 -#define ACA_RESET 0x20 -#define ACA_RW 0x00 - -#ifdef ENABLE_PAGING -#define SET_PAGE(x) (writeb((x), ti->mmio + ACA_OFFSET+ ACA_RW + SRPR_EVEN)) -#else -#define SET_PAGE(x) -#endif - -/* do_tok_int possible values */ -#define FIRST_INT 1 -#define NOT_FIRST 2 - -typedef enum { CLOSED, OPEN } open_state; -//staic const char *printstate[] = { "CLOSED","OPEN"}; - -struct tok_info { - unsigned char irq; - void __iomem *mmio; - unsigned char hw_address[32]; - unsigned char adapter_type; - unsigned char data_rate; - unsigned char token_release; - unsigned char avail_shared_ram; - unsigned char shared_ram_paging; - unsigned char turbo; - unsigned short dhb_size4mb; - unsigned short rbuf_len4; - unsigned short rbuf_cnt4; - unsigned short maxmtu4; - unsigned short dhb_size16mb; - unsigned short rbuf_len16; - unsigned short rbuf_cnt16; - unsigned short maxmtu16; - /* Additions by David Morris */ - unsigned char do_tok_int; - wait_queue_head_t wait_for_reset; - unsigned char sram_base; - /* Additions by Peter De Schrijver */ - unsigned char page_mask; /* mask to select RAM page to Map*/ - unsigned char mapped_ram_size; /* size of RAM page */ - __u32 sram_phys; /* Shared memory base address */ - void __iomem *sram_virt; /* Shared memory base address */ - void __iomem *init_srb; /* Initial System Request Block address */ - void __iomem *srb; /* System Request Block address */ - void __iomem *ssb; /* System Status Block address */ - void __iomem *arb; /* Adapter Request Block address */ - void __iomem *asb; /* Adapter Status Block address */ - __u8 init_srb_page; - __u8 srb_page; - __u8 ssb_page; - __u8 arb_page; - __u8 asb_page; - unsigned short exsap_station_id; - unsigned short global_int_enable; - struct sk_buff *current_skb; - - unsigned char auto_speedsave; - open_state open_status, sap_status; - enum {MANUAL, AUTOMATIC} open_mode; - enum {FAIL, RESTART, REOPEN} open_action; - enum {NO, YES} open_failure; - unsigned char readlog_pending; - unsigned short adapter_int_enable; /* Adapter-specific int enable */ - struct timer_list tr_timer; - unsigned char ring_speed; - spinlock_t lock; /* SMP protection */ -}; - -/* token ring adapter commands */ -#define DIR_INTERRUPT 0x00 /* struct srb_interrupt */ -#define DIR_MOD_OPEN_PARAMS 0x01 -#define DIR_OPEN_ADAPTER 0x03 /* struct dir_open_adapter */ -#define DIR_CLOSE_ADAPTER 0x04 -#define DIR_SET_GRP_ADDR 0x06 -#define DIR_SET_FUNC_ADDR 0x07 /* struct srb_set_funct_addr */ -#define DIR_READ_LOG 0x08 /* struct srb_read_log */ -#define DLC_OPEN_SAP 0x15 /* struct dlc_open_sap */ -#define DLC_CLOSE_SAP 0x16 -#define DATA_LOST 0x20 /* struct asb_rec */ -#define REC_DATA 0x81 /* struct arb_rec_req */ -#define XMIT_DATA_REQ 0x82 /* struct arb_xmit_req */ -#define DLC_STATUS 0x83 /* struct arb_dlc_status */ -#define RING_STAT_CHANGE 0x84 /* struct dlc_open_sap ??? */ - -/* DIR_OPEN_ADAPTER options */ -#define OPEN_PASS_BCON_MAC 0x0100 -#define NUM_RCV_BUF 2 -#define RCV_BUF_LEN 1024 -#define DHB_LENGTH 2048 -#define NUM_DHB 2 -#define DLC_MAX_SAP 2 -#define DLC_MAX_STA 1 - -/* DLC_OPEN_SAP options */ -#define MAX_I_FIELD 0x0088 -#define SAP_OPEN_IND_SAP 0x04 -#define SAP_OPEN_PRIORITY 0x20 -#define SAP_OPEN_STATION_CNT 0x1 -#define XMIT_DIR_FRAME 0x0A -#define XMIT_UI_FRAME 0x0d -#define XMIT_XID_CMD 0x0e -#define XMIT_TEST_CMD 0x11 - -/* srb close return code */ -#define SIGNAL_LOSS 0x8000 -#define HARD_ERROR 0x4000 -#define XMIT_BEACON 0x1000 -#define LOBE_FAULT 0x0800 -#define AUTO_REMOVAL 0x0400 -#define REMOVE_RECV 0x0100 -#define LOG_OVERFLOW 0x0080 -#define RING_RECOVER 0x0020 - -struct srb_init_response { - unsigned char command; - unsigned char init_status; - unsigned char init_status_2; - unsigned char reserved[3]; - __u16 bring_up_code; - __u16 encoded_address; - __u16 level_address; - __u16 adapter_address; - __u16 parms_address; - __u16 mac_address; -}; - -struct dir_open_adapter { - unsigned char command; - char reserved[7]; - __u16 open_options; - unsigned char node_address[6]; - unsigned char group_address[4]; - unsigned char funct_address[4]; - __u16 num_rcv_buf; - __u16 rcv_buf_len; - __u16 dhb_length; - unsigned char num_dhb; - char reserved2; - unsigned char dlc_max_sap; - unsigned char dlc_max_sta; - unsigned char dlc_max_gsap; - unsigned char dlc_max_gmem; - unsigned char dlc_t1_tick_1; - unsigned char dlc_t2_tick_1; - unsigned char dlc_ti_tick_1; - unsigned char dlc_t1_tick_2; - unsigned char dlc_t2_tick_2; - unsigned char dlc_ti_tick_2; - unsigned char product_id[18]; -}; - -struct dlc_open_sap { - unsigned char command; - unsigned char reserved1; - unsigned char ret_code; - unsigned char reserved2; - __u16 station_id; - unsigned char timer_t1; - unsigned char timer_t2; - unsigned char timer_ti; - unsigned char maxout; - unsigned char maxin; - unsigned char maxout_incr; - unsigned char max_retry_count; - unsigned char gsap_max_mem; - __u16 max_i_field; - unsigned char sap_value; - unsigned char sap_options; - unsigned char station_count; - unsigned char sap_gsap_mem; - unsigned char gsap[0]; -}; - -struct srb_xmit { - unsigned char command; - unsigned char cmd_corr; - unsigned char ret_code; - unsigned char reserved1; - __u16 station_id; -}; - -struct arb_rec_req { - unsigned char command; - unsigned char reserved1[3]; - __u16 station_id; - __u16 rec_buf_addr; - unsigned char lan_hdr_len; - unsigned char dlc_hdr_len; - __u16 frame_len; - unsigned char msg_type; -}; - -struct asb_rec { - unsigned char command; - unsigned char reserved1; - unsigned char ret_code; - unsigned char reserved2; - __u16 station_id; - __u16 rec_buf_addr; -}; - -struct rec_buf { - unsigned char reserved1[2]; - __u16 buf_ptr; - unsigned char reserved2; - unsigned char receive_fs; - __u16 buf_len; - unsigned char data[0]; -}; - -struct srb_set_funct_addr { - unsigned char command; - unsigned char reserved1; - unsigned char ret_code; - unsigned char reserved2[3]; - unsigned char funct_address[4]; -}; - -#endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 210e2c32553..ce9af891851 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -640,9 +640,9 @@ struct ieee80211_rann_ie { u8 rann_hopcount; u8 rann_ttl; u8 rann_addr[6]; - u32 rann_seq; - u32 rann_interval; - u32 rann_metric; + __le32 rann_seq; + __le32 rann_interval; + __le32 rann_metric; } __attribute__ ((packed)); enum ieee80211_rann_flags { @@ -1007,13 +1007,13 @@ enum ieee80211_min_mpdu_spacing { }; /** - * struct ieee80211_ht_info - HT information + * struct ieee80211_ht_operation - HT operation IE * - * This structure is the "HT information element" as - * described in 802.11n D5.0 7.3.2.58 + * This structure is the "HT operation element" as + * described in 802.11n-2009 7.3.2.57 */ -struct ieee80211_ht_info { - u8 control_chan; +struct ieee80211_ht_operation { + u8 primary_chan; u8 ht_param; __le16 operation_mode; __le16 stbc_param; @@ -1027,8 +1027,6 @@ struct ieee80211_ht_info { #define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 #define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 #define IEEE80211_HT_PARAM_RIFS_MODE 0x08 -#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10 -#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0 /* for operation_mode */ #define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 @@ -1301,7 +1299,7 @@ enum ieee80211_eid { WLAN_EID_EXT_SUPP_RATES = 50, WLAN_EID_HT_CAPABILITY = 45, - WLAN_EID_HT_INFORMATION = 61, + WLAN_EID_HT_OPERATION = 61, WLAN_EID_RSN = 48, WLAN_EID_MMIE = 76, @@ -1441,6 +1439,18 @@ enum ieee80211_tdls_actioncode { #define WLAN_TDLS_SNAP_RFTYPE 0x2 /** + * enum - mesh synchronization method identifier + * + * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method + * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method + * that will be specified in a vendor specific information element + */ +enum { + IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, + IEEE80211_SYNC_METHOD_VENDOR = 255, +}; + +/** * enum - mesh path selection protocol identifier * * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol @@ -1448,7 +1458,7 @@ enum ieee80211_tdls_actioncode { * be specified in a vendor specific information element */ enum { - IEEE80211_PATH_PROTOCOL_HWMP = 0, + IEEE80211_PATH_PROTOCOL_HWMP = 1, IEEE80211_PATH_PROTOCOL_VENDOR = 255, }; @@ -1460,7 +1470,7 @@ enum { * specified in a vendor specific information element */ enum { - IEEE80211_PATH_METRIC_AIRTIME = 0, + IEEE80211_PATH_METRIC_AIRTIME = 1, IEEE80211_PATH_METRIC_VENDOR = 255, }; diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6d722f41ee7..26cb3c2c5c7 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -82,11 +82,12 @@ #define ARPHRD_FCPL 786 /* Fibrechannel public loop */ #define ARPHRD_FCFABRIC 787 /* Fibrechannel fabric */ /* 787->799 reserved for fibrechannel media types */ -#define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */ +/* 800 used to be used for token ring */ #define ARPHRD_IEEE80211 801 /* IEEE 802.11 */ #define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ #define ARPHRD_IEEE802154 804 +#define ARPHRD_IEEE802154_MONITOR 805 /* IEEE 802.15.4 network monitor */ #define ARPHRD_PHONET 820 /* PhoNet media type */ #define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h deleted file mode 100644 index d85f9f48129..00000000000 --- a/include/linux/if_ec.h +++ /dev/null @@ -1,68 +0,0 @@ -/* Definitions for Econet sockets. */ - -#ifndef __LINUX_IF_EC -#define __LINUX_IF_EC - -/* User visible stuff. Glibc provides its own but libc5 folk will use these */ - -struct ec_addr { - unsigned char station; /* Station number. */ - unsigned char net; /* Network number. */ -}; - -struct sockaddr_ec { - unsigned short sec_family; - unsigned char port; /* Port number. */ - unsigned char cb; /* Control/flag byte. */ - unsigned char type; /* Type of message. */ - struct ec_addr addr; - unsigned long cookie; -}; - -#define ECTYPE_PACKET_RECEIVED 0 /* Packet received */ -#define ECTYPE_TRANSMIT_STATUS 0x10 /* Transmit completed, - low nibble holds status */ - -#define ECTYPE_TRANSMIT_OK 1 -#define ECTYPE_TRANSMIT_NOT_LISTENING 2 -#define ECTYPE_TRANSMIT_NET_ERROR 3 -#define ECTYPE_TRANSMIT_NO_CLOCK 4 -#define ECTYPE_TRANSMIT_LINE_JAMMED 5 -#define ECTYPE_TRANSMIT_NOT_PRESENT 6 - -#ifdef __KERNEL__ - -#define EC_HLEN 6 - -/* This is what an Econet frame looks like on the wire. */ -struct ec_framehdr { - unsigned char dst_stn; - unsigned char dst_net; - unsigned char src_stn; - unsigned char src_net; - unsigned char cb; - unsigned char port; -}; - -struct econet_sock { - /* struct sock has to be the first member of econet_sock */ - struct sock sk; - unsigned char cb; - unsigned char port; - unsigned char station; - unsigned char net; - unsigned short num; -}; - -static inline struct econet_sock *ec_sk(const struct sock *sk) -{ - return (struct econet_sock *)sk; -} - -struct ec_device { - unsigned char station, net; /* Econet protocol address */ -}; - -#endif - -#endif diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4b24ff453ae..f715750d0b8 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -138,6 +138,8 @@ enum { IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ + IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ +#define IFLA_PROMISCUITY IFLA_PROMISCUITY __IFLA_MAX }; @@ -253,6 +255,7 @@ struct ifla_vlan_qos_mapping { enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, + IFLA_MACVLAN_FLAGS, __IFLA_MACVLAN_MAX, }; @@ -265,6 +268,8 @@ enum macvlan_mode { MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ }; +#define MACVLAN_FLAG_NOPROMISC 1 + /* SR-IOV virtual function management section */ enum { diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index d103dca5c56..f65e8d250f7 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -60,6 +60,7 @@ struct macvlan_dev { struct net_device *lowerdev; struct macvlan_pcpu_stats __percpu *pcpu_stats; enum macvlan_mode mode; + u16 flags; int (*receive)(struct sk_buff *skb); int (*forward)(struct net_device *dev, struct sk_buff *skb); struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h index 23cefa1111b..b4775418d52 100644 --- a/include/linux/if_pppol2tp.h +++ b/include/linux/if_pppol2tp.h @@ -19,10 +19,11 @@ #ifdef __KERNEL__ #include <linux/in.h> +#include <linux/in6.h> #endif /* Structure used to connect() the socket to a particular tunnel UDP - * socket. + * socket over IPv4. */ struct pppol2tp_addr { __kernel_pid_t pid; /* pid that owns the fd. @@ -35,6 +36,20 @@ struct pppol2tp_addr { __u16 d_tunnel, d_session; /* For sending outgoing packets */ }; +/* Structure used to connect() the socket to a particular tunnel UDP + * socket over IPv6. + */ +struct pppol2tpin6_addr { + __kernel_pid_t pid; /* pid that owns the fd. + * 0 => current */ + int fd; /* FD of UDP socket to use */ + + __u16 s_tunnel, s_session; /* For matching incoming packets */ + __u16 d_tunnel, d_session; /* For sending outgoing packets */ + + struct sockaddr_in6 addr; /* IP address and port to send to */ +}; + /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 * bits. So we need a different sockaddr structure. */ @@ -49,6 +64,17 @@ struct pppol2tpv3_addr { __u32 d_tunnel, d_session; /* For sending outgoing packets */ }; +struct pppol2tpv3in6_addr { + __kernel_pid_t pid; /* pid that owns the fd. + * 0 => current */ + int fd; /* FD of UDP or IP socket to use */ + + __u32 s_tunnel, s_session; /* For matching incoming packets */ + __u32 d_tunnel, d_session; /* For sending outgoing packets */ + + struct sockaddr_in6 addr; /* IP address and port to send to */ +}; + /* Socket options: * DEBUG - bitmask of debug message categories * SENDSEQ - 0 => don't send packets with sequence numbers diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index b5f927f59f2..09c474c480c 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -70,7 +70,7 @@ struct sockaddr_pppox { struct pppoe_addr pppoe; struct pptp_addr pptp; } sa_addr; -} __attribute__((packed)); +} __packed; /* The use of the above union isn't viable because the size of this * struct must stay fixed over time -- applications use sizeof(struct @@ -81,7 +81,13 @@ struct sockaddr_pppol2tp { __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tp_addr pppol2tp; -} __attribute__((packed)); +} __packed; + +struct sockaddr_pppol2tpin6 { + __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ + unsigned int sa_protocol; /* protocol identifier */ + struct pppol2tpin6_addr pppol2tp; +} __packed; /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 * bits. So we need a different sockaddr structure. @@ -90,7 +96,13 @@ struct sockaddr_pppol2tpv3 { __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tpv3_addr pppol2tp; -} __attribute__((packed)); +} __packed; + +struct sockaddr_pppol2tpv3in6 { + __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ + unsigned int sa_protocol; /* protocol identifier */ + struct pppol2tpv3in6_addr pppol2tp; +} __packed; /********************************************************************* * @@ -140,7 +152,7 @@ struct pppoe_hdr { __be16 sid; __be16 length; struct pppoe_tag tag[0]; -} __attribute__((packed)); +} __packed; /* Length of entire PPPoE + PPP header */ #define PPPOE_SES_HLEN 8 diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 58404b0c501..8185f57a9c7 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -28,10 +28,28 @@ struct team; struct team_port { struct net_device *dev; - struct hlist_node hlist; /* node in hash list */ + struct hlist_node hlist; /* node in enabled ports hash list */ struct list_head list; /* node in ordinary list */ struct team *team; - int index; + int index; /* index of enabled port. If disabled, it's set to -1 */ + + bool linkup; /* either state.linkup or user.linkup */ + + struct { + bool linkup; + u32 speed; + u8 duplex; + } state; + + /* Values set by userspace */ + struct { + bool linkup; + bool linkup_enabled; + } user; + + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; /* * A place for storing original values of the device before it @@ -42,14 +60,6 @@ struct team_port { unsigned int mtu; } orig; - bool linkup; - u32 speed; - u8 duplex; - - /* Custom gennetlink interface related flags */ - bool changed; - bool removed; - struct rcu_head rcu; }; @@ -68,18 +78,30 @@ struct team_mode_ops { enum team_option_type { TEAM_OPTION_TYPE_U32, TEAM_OPTION_TYPE_STRING, + TEAM_OPTION_TYPE_BINARY, + TEAM_OPTION_TYPE_BOOL, +}; + +struct team_gsetter_ctx { + union { + u32 u32_val; + const char *str_val; + struct { + const void *ptr; + u32 len; + } bin_val; + bool bool_val; + } data; + struct team_port *port; }; struct team_option { struct list_head list; const char *name; + bool per_port; enum team_option_type type; - int (*getter)(struct team *team, void *arg); - int (*setter)(struct team *team, void *arg); - - /* Custom gennetlink interface related flags */ - bool changed; - bool removed; + int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); + int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); }; struct team_mode { @@ -103,13 +125,15 @@ struct team { struct mutex lock; /* used for overall locking, e.g. port lists write */ /* - * port lists with port count + * List of enabled ports and their count */ - int port_count; - struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; - struct list_head port_list; + int en_port_count; + struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES]; + + struct list_head port_list; /* list of all ports */ struct list_head option_list; + struct list_head option_inst_list; /* list of option instances */ const struct team_mode *mode; struct team_mode_ops ops; @@ -119,7 +143,7 @@ struct team { static inline struct hlist_head *team_port_index_hash(struct team *team, int port_index) { - return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; + return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; } static inline struct team_port *team_get_port_by_index(struct team *team, @@ -216,6 +240,7 @@ enum { TEAM_ATTR_OPTION_TYPE, /* u8 */ TEAM_ATTR_OPTION_DATA, /* dynamic */ TEAM_ATTR_OPTION_REMOVED, /* flag */ + TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */ __TEAM_ATTR_OPTION_MAX, TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h deleted file mode 100644 index fc23aeb0f20..00000000000 --- a/include/linux/if_tr.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. INET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * Global definitions for the Token-Ring IEEE 802.5 interface. - * - * Version: @(#)if_tr.h 0.0 07/11/94 - * - * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> - * Donald Becker, <becker@super.org> - * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _LINUX_IF_TR_H -#define _LINUX_IF_TR_H - -#include <linux/types.h> -#include <asm/byteorder.h> /* For __be16 */ - -/* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble - and FCS/CRC (frame check sequence). */ -#define TR_ALEN 6 /* Octets in one token-ring addr */ -#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc)) -#define AC 0x10 -#define LLC_FRAME 0x40 - -/* LLC and SNAP constants */ -#define EXTENDED_SAP 0xAA -#define UI_CMD 0x03 - -/* This is an Token-Ring frame header. */ -struct trh_hdr { - __u8 ac; /* access control field */ - __u8 fc; /* frame control field */ - __u8 daddr[TR_ALEN]; /* destination address */ - __u8 saddr[TR_ALEN]; /* source address */ - __be16 rcf; /* route control field */ - __be16 rseg[8]; /* routing registers */ -}; - -#ifdef __KERNEL__ -#include <linux/skbuff.h> - -static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb) -{ - return (struct trh_hdr *)skb_mac_header(skb); -} -#endif - -/* This is an Token-Ring LLC structure */ -struct trllc { - __u8 dsap; /* destination SAP */ - __u8 ssap; /* source SAP */ - __u8 llc; /* LLC control field */ - __u8 protid[3]; /* protocol id */ - __be16 ethertype; /* ether type field */ -}; - -/* Token-Ring statistics collection data. */ -struct tr_statistics { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* no space in linux buffers */ - unsigned long tx_dropped; /* no space available in linux */ - unsigned long multicast; /* multicast packets received */ - unsigned long transmit_collision; - - /* detailed Token-Ring errors. See IBM Token-Ring Network - Architecture for more info */ - - unsigned long line_errors; - unsigned long internal_errors; - unsigned long burst_errors; - unsigned long A_C_errors; - unsigned long abort_delimiters; - unsigned long lost_frames; - unsigned long recv_congest_count; - unsigned long frame_copied_errors; - unsigned long frequency_errors; - unsigned long token_errors; - unsigned long dummy1; -}; - -/* source routing stuff */ -#define TR_RII 0x80 -#define TR_RCF_DIR_BIT 0x80 -#define TR_RCF_LEN_MASK 0x1f00 -#define TR_RCF_BROADCAST 0x8000 /* all-routes broadcast */ -#define TR_RCF_LIMITED_BROADCAST 0xC000 /* single-route broadcast */ -#define TR_RCF_FRAME2K 0x20 -#define TR_RCF_BROADCAST_MASK 0xC000 -#define TR_MAXRIFLEN 18 - -#endif /* _LINUX_IF_TR_H */ diff --git a/include/linux/in6.h b/include/linux/in6.h index 5c83d9e3eb8..cba469ba11a 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -142,7 +142,7 @@ struct in6_flowlabel_req { /* * IPv6 TLV options. */ -#define IPV6_TLV_PAD0 0 +#define IPV6_TLV_PAD1 0 #define IPV6_TLV_PADN 1 #define IPV6_TLV_ROUTERALERT 5 #define IPV6_TLV_JUMBO 194 diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index 4deb3834d62..8a2d438dc49 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h @@ -89,6 +89,7 @@ #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ +/* Initial bits allowed in backup server */ #define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \ IP_VS_CONN_F_NOOUTPUT | \ IP_VS_CONN_F_INACTIVE | \ @@ -97,6 +98,10 @@ IP_VS_CONN_F_TEMPLATE \ ) +/* Bits allowed to update in backup server */ +#define IP_VS_CONN_F_BACKUP_UPD_MASK (IP_VS_CONN_F_INACTIVE | \ + IP_VS_CONN_F_SEQ_MASK) + /* Flags that are not sent to backup server start from bit 16 */ #define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ @@ -125,8 +130,8 @@ struct ip_vs_service_user { /* virtual service options */ char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout in sec */ + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout in sec */ __be32 netmask; /* persistent netmask */ }; @@ -137,7 +142,7 @@ struct ip_vs_dest_user { __be16 port; /* real server options */ - unsigned conn_flags; /* connection flags */ + unsigned int conn_flags; /* connection flags */ int weight; /* destination weight */ /* thresholds for active connections */ @@ -187,8 +192,8 @@ struct ip_vs_service_entry { /* service options */ char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout */ + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout */ __be32 netmask; /* persistent netmask */ /* number of real servers */ @@ -202,7 +207,7 @@ struct ip_vs_service_entry { struct ip_vs_dest_entry { __be32 addr; /* destination address */ __be16 port; - unsigned conn_flags; /* connection flags */ + unsigned int conn_flags; /* connection flags */ int weight; /* destination weight */ __u32 u_threshold; /* upper threshold */ diff --git a/include/linux/ipx.h b/include/linux/ipx.h index 3d48014cdd7..8f0243982eb 100644 --- a/include/linux/ipx.h +++ b/include/linux/ipx.h @@ -38,7 +38,7 @@ struct ipx_interface_definition { #define IPX_FRAME_8022 2 #define IPX_FRAME_ETHERII 3 #define IPX_FRAME_8023 4 -#define IPX_FRAME_TR_8022 5 /* obsolete */ +/* obsolete token ring was 5 */ unsigned char ipx_special; #define IPX_SPECIAL_NONE 0 #define IPX_PRIMARY 1 diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h index e77d7f9bb24..7eab668f60f 100644 --- a/include/linux/l2tp.h +++ b/include/linux/l2tp.h @@ -11,6 +11,7 @@ #include <linux/socket.h> #ifdef __KERNEL__ #include <linux/in.h> +#include <linux/in6.h> #else #include <netinet/in.h> #endif @@ -39,6 +40,22 @@ struct sockaddr_l2tpip { sizeof(__u32)]; }; +/** + * struct sockaddr_l2tpip6 - the sockaddr structure for L2TP-over-IPv6 sockets + * @l2tp_family: address family number AF_L2TPIP. + * @l2tp_addr: protocol specific address information + * @l2tp_conn_id: connection id of tunnel + */ +struct sockaddr_l2tpip6 { + /* The first fields must match struct sockaddr_in6 */ + __kernel_sa_family_t l2tp_family; /* AF_INET6 */ + __be16 l2tp_unused; /* INET port number (unused) */ + __be32 l2tp_flowinfo; /* IPv6 flow information */ + struct in6_addr l2tp_addr; /* IPv6 address */ + __u32 l2tp_scope_id; /* scope id (new in RFC2553) */ + __u32 l2tp_conn_id; /* Connection ID of tunnel */ +}; + /***************************************************************************** * NETLINK_GENERIC netlink family. *****************************************************************************/ @@ -108,6 +125,8 @@ enum { L2TP_ATTR_MTU, /* u16 */ L2TP_ATTR_MRU, /* u16 */ L2TP_ATTR_STATS, /* nested */ + L2TP_ATTR_IP6_SADDR, /* struct in6_addr */ + L2TP_ATTR_IP6_DADDR, /* struct in6_addr */ __L2TP_ATTR_MAX, }; diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h index 4af841408fb..d0752eca9b4 100644 --- a/include/linux/mISDNhw.h +++ b/include/linux/mISDNhw.h @@ -72,7 +72,9 @@ #define FLG_LL_OK 24 #define FLG_LL_CONN 25 #define FLG_DTMFSEND 26 - +#define FLG_TX_EMPTY 27 +/* stop sending received data upstream */ +#define FLG_RX_OFF 28 /* workq events */ #define FLG_RECVQUEUE 30 #define FLG_PHCHANGE 31 @@ -135,10 +137,14 @@ extern int create_l1(struct dchannel *, dchannel_l1callback *); #define HW_TESTRX_RAW 0x9602 #define HW_TESTRX_HDLC 0x9702 #define HW_TESTRX_OFF 0x9802 +#define HW_TIMER3_IND 0x9902 +#define HW_TIMER3_VALUE 0x9a00 +#define HW_TIMER3_VMASK 0x00FF struct layer1; extern int l1_event(struct layer1 *, u_int); +#define MISDN_BCH_FILL_SIZE 4 struct bchannel { struct mISDNchannel ch; @@ -150,8 +156,14 @@ struct bchannel { int slot; /* multiport card channel slot */ struct timer_list timer; /* receive data */ + u8 fill[MISDN_BCH_FILL_SIZE]; struct sk_buff *rx_skb; - int maxlen; + unsigned short maxlen; + unsigned short init_maxlen; /* initial value */ + unsigned short next_maxlen; /* pending value */ + unsigned short minlen; /* for transparent data */ + unsigned short init_minlen; /* initial value */ + unsigned short next_minlen; /* pending value */ /* send data */ struct sk_buff *next_skb; struct sk_buff *tx_skb; @@ -163,23 +175,26 @@ struct bchannel { int err_crc; int err_tx; int err_rx; + int dropcnt; }; extern int mISDN_initdchannel(struct dchannel *, int, void *); -extern int mISDN_initbchannel(struct bchannel *, int); +extern int mISDN_initbchannel(struct bchannel *, unsigned short, + unsigned short); extern int mISDN_freedchannel(struct dchannel *); extern void mISDN_clear_bchannel(struct bchannel *); extern int mISDN_freebchannel(struct bchannel *); +extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *); extern void queue_ch_frame(struct mISDNchannel *, u_int, int, struct sk_buff *); extern int dchannel_senddata(struct dchannel *, struct sk_buff *); extern int bchannel_senddata(struct bchannel *, struct sk_buff *); +extern int bchannel_get_rxbuf(struct bchannel *, int); extern void recv_Dchannel(struct dchannel *); extern void recv_Echannel(struct dchannel *, struct dchannel *); -extern void recv_Bchannel(struct bchannel *, unsigned int id); +extern void recv_Bchannel(struct bchannel *, unsigned int, bool); extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); -extern void confirm_Bsend(struct bchannel *bch); extern int get_next_bframe(struct bchannel *); extern int get_next_dframe(struct dchannel *); diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index b5e7f220248..246a3529ecf 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -37,7 +37,7 @@ */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 1 -#define MISDN_RELEASE 21 +#define MISDN_RELEASE 29 /* primitives for information exchange * generell format @@ -115,6 +115,11 @@ #define MDL_ERROR_IND 0x1F04 #define MDL_ERROR_RSP 0x5F04 +/* intern layer 2 */ +#define DL_TIMER200_IND 0x7004 +#define DL_TIMER203_IND 0x7304 +#define DL_INTERN_MSG 0x7804 + /* DL_INFORMATION_IND types */ #define DL_INFO_L2_CONNECT 0x0001 #define DL_INFO_L2_REMOVED 0x0002 @@ -360,6 +365,7 @@ clear_channelmap(u_int nr, u_char *map) #define MISDN_CTRL_LOOP 0x0001 #define MISDN_CTRL_CONNECT 0x0002 #define MISDN_CTRL_DISCONNECT 0x0004 +#define MISDN_CTRL_RX_BUFFER 0x0008 #define MISDN_CTRL_PCMCONNECT 0x0010 #define MISDN_CTRL_PCMDISCONNECT 0x0020 #define MISDN_CTRL_SETPEER 0x0040 @@ -367,6 +373,7 @@ clear_channelmap(u_int nr, u_char *map) #define MISDN_CTRL_RX_OFF 0x0100 #define MISDN_CTRL_FILL_EMPTY 0x0200 #define MISDN_CTRL_GETPEER 0x0400 +#define MISDN_CTRL_L1_TIMER3 0x0800 #define MISDN_CTRL_HW_FEATURES_OP 0x2000 #define MISDN_CTRL_HW_FEATURES 0x2001 #define MISDN_CTRL_HFC_OP 0x4000 @@ -381,6 +388,12 @@ clear_channelmap(u_int nr, u_char *map) #define MISDN_CTRL_HFC_WD_INIT 0x4009 #define MISDN_CTRL_HFC_WD_RESET 0x400A +/* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum + * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will + * not change the value, but still read back the actual stetting. + */ +#define MISDN_CTRL_RX_SIZE_IGNORE -1 + /* socket options */ #define MISDN_TIME_STAMP 0x0001 @@ -585,6 +598,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) extern void set_channel_address(struct mISDNchannel *, u_int, u_int); extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); extern unsigned short mISDN_clock_get(void); +extern const char *mISDNDevName4ch(struct mISDNchannel *); #endif /* __KERNEL__ */ #endif /* mISDNIF_H */ diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h new file mode 100644 index 00000000000..a243dbba865 --- /dev/null +++ b/include/linux/mdio-mux.h @@ -0,0 +1,21 @@ +/* + * MDIO bus multiplexer framwork. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011, 2012 Cavium, Inc. + */ +#ifndef __LINUX_MDIO_MUX_H +#define __LINUX_MDIO_MUX_H +#include <linux/device.h> + +int mdio_mux_init(struct device *dev, + int (*switch_fn) (int cur, int desired, void *data), + void **mux_handle, + void *data); + +void mdio_mux_uninit(void *mux_handle); + +#endif /* __LINUX_MDIO_MUX_H */ diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 9958ff2cad3..1f3860a8a10 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -150,6 +150,10 @@ enum { /* statistics commands */ MLX4_CMD_QUERY_IF_STAT = 0X54, MLX4_CMD_SET_IF_STAT = 0X55, + + /* set port opcode modifiers */ + MLX4_SET_PORT_PRIO2TC = 0x8, + MLX4_SET_PORT_SCHEDULER = 0x9, }; enum { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 834c96c5d87..6d028247f79 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -628,6 +628,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 091f9e7dc8b..96005d75893 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -139,7 +139,8 @@ struct mlx4_qp_path { u8 rgid[16]; u8 sched_queue; u8 vlan_index; - u8 reserved3[2]; + u8 feup; + u8 reserved3; u8 reserved4[2]; u8 dmac[6]; }; diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index b188f68a08c..275e5d65dcb 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h @@ -33,6 +33,9 @@ enum { #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_ROUTER 0x80 +#define NTF_SELF 0x02 +#define NTF_MASTER 0x04 + /* * Neighbor Cache Entry States. */ diff --git a/include/linux/net.h b/include/linux/net.h index be60c7f5e14..2d7510f3893 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -250,6 +250,29 @@ extern struct socket *sockfd_lookup(int fd, int *err); #define sockfd_put(sock) fput(sock->file) extern int net_ratelimit(void); +#define net_ratelimited_function(function, ...) \ +do { \ + if (net_ratelimit()) \ + function(__VA_ARGS__); \ +} while (0) + +#define net_emerg_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__) +#define net_alert_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__) +#define net_crit_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__) +#define net_err_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__) +#define net_notice_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__) +#define net_warn_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) +#define net_info_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) +#define net_dbg_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) + #define net_random() random32() #define net_srandom(seed) srandom32((__force u32)seed) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 33900a53c99..e7fd468f712 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -54,6 +54,7 @@ #include <net/netprio_cgroup.h> #include <linux/netdev_features.h> +#include <linux/neighbour.h> struct netpoll_info; struct device; @@ -288,7 +289,7 @@ struct hh_cache { struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len); + const void *saddr, unsigned int len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); int (*rebuild)(struct sk_buff *skb); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); @@ -905,6 +906,16 @@ struct netdev_fcoe_hbainfo { * feature set might be less than what was returned by ndo_fix_features()). * Must return >0 or -errno if it changed dev->features itself. * + * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev, + * unsigned char *addr, u16 flags) + * Adds an FDB entry to dev for addr. + * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev, + * unsigned char *addr) + * Deletes the FDB entry from dev coresponding to addr. + * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, + * struct net_device *dev, int idx) + * Used to add FDB entries to dump requests. Implementers should add + * entries to skb and update idx with the number of entries. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1002,6 +1013,18 @@ struct net_device_ops { netdev_features_t features); int (*ndo_neigh_construct)(struct neighbour *n); void (*ndo_neigh_destroy)(struct neighbour *n); + + int (*ndo_fdb_add)(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags); + int (*ndo_fdb_del)(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr); + int (*ndo_fdb_dump)(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx); }; /* @@ -1132,7 +1155,6 @@ struct net_device { struct in_device __rcu *ip_ptr; /* IPv4 specific data */ struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ - void *ec_ptr; /* Econet specific data */ void *ax25_ptr; /* AX.25 specific data */ struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, assign before registering */ @@ -1477,6 +1499,8 @@ struct napi_gro_cb { /* Free the skb? */ int free; +#define NAPI_GRO_FREE 1 +#define NAPI_GRO_FREE_STOLEN_HEAD 2 }; #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) @@ -1680,7 +1704,7 @@ static inline void *skb_gro_network_header(struct sk_buff *skb) static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, - unsigned len) + unsigned int len) { if (!dev->header_ops || !dev->header_ops->create) return 0; @@ -1731,7 +1755,7 @@ struct softnet_data { unsigned int input_queue_head; unsigned int input_queue_tail; #endif - unsigned dropped; + unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; }; @@ -1916,7 +1940,7 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) } static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, - unsigned pkts, unsigned bytes) + unsigned int pkts, unsigned int bytes) { #ifdef CONFIG_BQL if (unlikely(!bytes)) @@ -1940,7 +1964,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, } static inline void netdev_completed_queue(struct net_device *dev, - unsigned pkts, unsigned bytes) + unsigned int pkts, unsigned int bytes) { netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); } @@ -2118,7 +2142,6 @@ extern struct sk_buff * napi_get_frags(struct napi_struct *napi); extern gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret); -extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); extern gro_result_t napi_gro_frags(struct napi_struct *napi); static inline void napi_free_frags(struct napi_struct *napi) @@ -2135,9 +2158,9 @@ extern void netdev_rx_handler_unregister(struct net_device *dev); extern bool dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ethtool(struct net *net, struct ifreq *); -extern unsigned dev_get_flags(const struct net_device *); +extern unsigned int dev_get_flags(const struct net_device *); extern int __dev_change_flags(struct net_device *, unsigned int flags); -extern int dev_change_flags(struct net_device *, unsigned); +extern int dev_change_flags(struct net_device *, unsigned int); extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); extern int dev_change_name(struct net_device *, const char *); extern int dev_set_alias(struct net_device *, const char *, size_t); @@ -2537,6 +2560,7 @@ extern int dev_addr_init(struct net_device *dev); /* Functions used for unicast addresses handling */ extern int dev_uc_add(struct net_device *dev, unsigned char *addr); +extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr); extern int dev_uc_del(struct net_device *dev, unsigned char *addr); extern int dev_uc_sync(struct net_device *to, struct net_device *from); extern void dev_uc_unsync(struct net_device *to, struct net_device *from); @@ -2546,6 +2570,7 @@ extern void dev_uc_init(struct net_device *dev); /* Functions used for multicast addresses handling */ extern int dev_mc_add(struct net_device *dev, unsigned char *addr); extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); +extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr); extern int dev_mc_del(struct net_device *dev, unsigned char *addr); extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); extern int dev_mc_sync(struct net_device *to, struct net_device *from); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 29734be334c..ff9c84c29b2 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -154,12 +154,6 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); -#ifdef CONFIG_SYSCTL -/* Sysctl registration */ -extern struct ctl_path nf_net_netfilter_sysctl_path[]; -extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; -#endif /* CONFIG_SYSCTL */ - extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #if defined(CONFIG_JUMP_LABEL) diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 2f8e18a2322..2edc64cab73 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -411,26 +411,32 @@ ip_set_get_h16(const struct nlattr *attr) #define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) #define ipset_nest_end(skb, start) nla_nest_end(skb, start) -#define NLA_PUT_IPADDR4(skb, type, ipaddr) \ -do { \ - struct nlattr *__nested = ipset_nest_start(skb, type); \ - \ - if (!__nested) \ - goto nla_put_failure; \ - NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \ - ipset_nest_end(skb, __nested); \ -} while (0) - -#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \ -do { \ - struct nlattr *__nested = ipset_nest_start(skb, type); \ - \ - if (!__nested) \ - goto nla_put_failure; \ - NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \ - sizeof(struct in6_addr), ipaddrptr); \ - ipset_nest_end(skb, __nested); \ -} while (0) +static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr) +{ + struct nlattr *__nested = ipset_nest_start(skb, type); + int ret; + + if (!__nested) + return -EMSGSIZE; + ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); + if (!ret) + ipset_nest_end(skb, __nested); + return ret; +} + +static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr) +{ + struct nlattr *__nested = ipset_nest_start(skb, type); + int ret; + + if (!__nested) + return -EMSGSIZE; + ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6, + sizeof(struct in6_addr), ipaddrptr); + if (!ret) + ipset_nest_end(skb, __nested); + return ret; +} /* Get address from skbuff */ static inline __be32 @@ -472,8 +478,8 @@ union ip_set_name_index { #define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ struct ip_set_req_get_set { - unsigned op; - unsigned version; + unsigned int op; + unsigned int version; union ip_set_name_index set; }; @@ -482,8 +488,8 @@ struct ip_set_req_get_set { #define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ struct ip_set_req_version { - unsigned op; - unsigned version; + unsigned int op; + unsigned int version; }; #endif /*_IP_SET_H */ diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index 230a290e197..b114d35aea5 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -610,17 +610,20 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, - htonl(jhash_size(h->table->htable_bits))); - NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); + if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE, + htonl(jhash_size(h->table->htable_bits))) || + nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem))) + goto nla_put_failure; #ifdef IP_SET_HASH_WITH_NETMASK - if (h->netmask != HOST_MASK) - NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); + if (h->netmask != HOST_MASK && + nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask)) + goto nla_put_failure; #endif - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); - if (with_timeout(h->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); + if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || + (with_timeout(h->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 47923205a4a..41d9cfa0816 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -30,6 +30,10 @@ ip_set_timeout_uget(struct nlattr *tb) { unsigned int timeout = ip_set_get_h32(tb); + /* Normalize to fit into jiffies */ + if (timeout > UINT_MAX/MSEC_PER_SEC) + timeout = UINT_MAX/MSEC_PER_SEC; + /* Userspace supplied TIMEOUT parameter: adjust crazy size */ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; } diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 0d3dd66322e..d146872a0b9 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -83,6 +83,10 @@ enum ip_conntrack_status { /* Conntrack is a fake untracked entry */ IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), + + /* Conntrack got a helper explicitly attached via CT target. */ + IPS_HELPER_BIT = 13, + IPS_HELPER = (1 << IPS_HELPER_BIT), }; /* Connection tracking event types */ diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h index f35b6b4801e..b0821f45fbe 100644 --- a/include/linux/netfilter/nf_conntrack_h323_types.h +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -7,12 +7,12 @@ typedef struct TransportAddress_ipAddress { /* SEQUENCE */ int options; /* No use */ - unsigned ip; + unsigned int ip; } TransportAddress_ipAddress; typedef struct TransportAddress_ip6Address { /* SEQUENCE */ int options; /* No use */ - unsigned ip; + unsigned int ip; } TransportAddress_ip6Address; typedef struct TransportAddress { /* CHOICE */ @@ -96,12 +96,12 @@ typedef struct DataType { /* CHOICE */ typedef struct UnicastAddress_iPAddress { /* SEQUENCE */ int options; /* No use */ - unsigned network; + unsigned int network; } UnicastAddress_iPAddress; typedef struct UnicastAddress_iP6Address { /* SEQUENCE */ int options; /* No use */ - unsigned network; + unsigned int network; } UnicastAddress_iP6Address; typedef struct UnicastAddress { /* CHOICE */ @@ -698,7 +698,7 @@ typedef struct RegistrationRequest { /* SEQUENCE */ } options; RegistrationRequest_callSignalAddress callSignalAddress; RegistrationRequest_rasAddress rasAddress; - unsigned timeToLive; + unsigned int timeToLive; } RegistrationRequest; typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */ @@ -730,7 +730,7 @@ typedef struct RegistrationConfirm { /* SEQUENCE */ eRegistrationConfirm_genericData = (1 << 12), } options; RegistrationConfirm_callSignalAddress callSignalAddress; - unsigned timeToLive; + unsigned int timeToLive; } RegistrationConfirm; typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 6fd1f0d07e6..a1048c1587d 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -80,7 +80,7 @@ extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); extern int nfnetlink_has_listeners(struct net *net, unsigned int group); -extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, +extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo, gfp_t flags); extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h new file mode 100644 index 00000000000..abb1650940d --- /dev/null +++ b/include/linux/netfilter/xt_HMARK.h @@ -0,0 +1,45 @@ +#ifndef XT_HMARK_H_ +#define XT_HMARK_H_ + +#include <linux/types.h> + +enum { + XT_HMARK_SADDR_MASK, + XT_HMARK_DADDR_MASK, + XT_HMARK_SPI, + XT_HMARK_SPI_MASK, + XT_HMARK_SPORT, + XT_HMARK_DPORT, + XT_HMARK_SPORT_MASK, + XT_HMARK_DPORT_MASK, + XT_HMARK_PROTO_MASK, + XT_HMARK_RND, + XT_HMARK_MODULUS, + XT_HMARK_OFFSET, + XT_HMARK_CT, + XT_HMARK_METHOD_L3, + XT_HMARK_METHOD_L3_4, +}; +#define XT_HMARK_FLAG(flag) (1 << flag) + +union hmark_ports { + struct { + __u16 src; + __u16 dst; + } p16; + __u32 v32; +}; + +struct xt_hmark_info { + union nf_inet_addr src_mask; + union nf_inet_addr dst_mask; + union hmark_ports port_mask; + union hmark_ports port_set; + __u32 flags; + __u16 proto_mask; + __u32 hashrnd; + __u32 hmodulus; + __u32 hoffset; /* Mark offset to start from */ +}; + +#endif /* XT_HMARK_H_ */ diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h index b1925b5925e..c42e52f39f8 100644 --- a/include/linux/netfilter/xt_hashlimit.h +++ b/include/linux/netfilter/xt_hashlimit.h @@ -6,7 +6,11 @@ /* timings are in milliseconds. */ #define XT_HASHLIMIT_SCALE 10000 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 - seconds, or one every 59 hours. */ + * seconds, or one packet every 59 hours. + */ + +/* packet length accounting is done in 16-byte steps */ +#define XT_HASHLIMIT_BYTE_SHIFT 4 /* details of this structure hidden by the implementation */ struct xt_hashlimit_htable; @@ -17,7 +21,13 @@ enum { XT_HASHLIMIT_HASH_SIP = 1 << 2, XT_HASHLIMIT_HASH_SPT = 1 << 3, XT_HASHLIMIT_INVERT = 1 << 4, + XT_HASHLIMIT_BYTES = 1 << 5, }; +#ifdef __KERNEL__ +#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ + XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ + XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES) +#endif struct hashlimit_cfg { __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index 31f8bec9565..c61b8fb1a9e 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild @@ -1,4 +1,3 @@ -header-y += ip_queue.h header-y += ip_tables.h header-y += ipt_CLUSTERIP.h header-y += ipt_ECN.h diff --git a/include/linux/netfilter_ipv4/ip_queue.h b/include/linux/netfilter_ipv4/ip_queue.h deleted file mode 100644 index a03507f465f..00000000000 --- a/include/linux/netfilter_ipv4/ip_queue.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This is a module which is used for queueing IPv4 packets and - * communicating with userspace via netlink. - * - * (C) 2000 James Morris, this code is GPL. - */ -#ifndef _IP_QUEUE_H -#define _IP_QUEUE_H - -#ifdef __KERNEL__ -#ifdef DEBUG_IPQ -#define QDEBUG(x...) printk(KERN_DEBUG ## x) -#else -#define QDEBUG(x...) -#endif /* DEBUG_IPQ */ -#else -#include <net/if.h> -#endif /* ! __KERNEL__ */ - -/* Messages sent from kernel */ -typedef struct ipq_packet_msg { - unsigned long packet_id; /* ID of queued packet */ - unsigned long mark; /* Netfilter mark value */ - long timestamp_sec; /* Packet arrival time (seconds) */ - long timestamp_usec; /* Packet arrvial time (+useconds) */ - unsigned int hook; /* Netfilter hook we rode in on */ - char indev_name[IFNAMSIZ]; /* Name of incoming interface */ - char outdev_name[IFNAMSIZ]; /* Name of outgoing interface */ - __be16 hw_protocol; /* Hardware protocol (network order) */ - unsigned short hw_type; /* Hardware type */ - unsigned char hw_addrlen; /* Hardware address length */ - unsigned char hw_addr[8]; /* Hardware address */ - size_t data_len; /* Length of packet data */ - unsigned char payload[0]; /* Optional packet data */ -} ipq_packet_msg_t; - -/* Messages sent from userspace */ -typedef struct ipq_mode_msg { - unsigned char value; /* Requested mode */ - size_t range; /* Optional range of packet requested */ -} ipq_mode_msg_t; - -typedef struct ipq_verdict_msg { - unsigned int value; /* Verdict to hand to netfilter */ - unsigned long id; /* Packet ID for this verdict */ - size_t data_len; /* Length of replacement data */ - unsigned char payload[0]; /* Optional replacement packet */ -} ipq_verdict_msg_t; - -typedef struct ipq_peer_msg { - union { - ipq_verdict_msg_t verdict; - ipq_mode_msg_t mode; - } msg; -} ipq_peer_msg_t; - -/* Packet delivery modes */ -enum { - IPQ_COPY_NONE, /* Initial mode, packets are dropped */ - IPQ_COPY_META, /* Copy metadata */ - IPQ_COPY_PACKET /* Copy metadata + packet (range) */ -}; -#define IPQ_COPY_MAX IPQ_COPY_PACKET - -/* Types of messages */ -#define IPQM_BASE 0x10 /* standard netlink messages below this */ -#define IPQM_MODE (IPQM_BASE + 1) /* Mode request from peer */ -#define IPQM_VERDICT (IPQM_BASE + 2) /* Verdict from peer */ -#define IPQM_PACKET (IPQM_BASE + 3) /* Packet from kernel */ -#define IPQM_MAX (IPQM_BASE + 4) - -#endif /*_IP_QUEUE_H*/ diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 1bc898b14a8..08c2cbbaa32 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -298,9 +298,14 @@ ip6t_ext_hdr(u8 nexthdr) (nexthdr == IPPROTO_DSTOPTS); } +enum { + IP6T_FH_F_FRAG = (1 << 0), + IP6T_FH_F_AUTH = (1 << 1), +}; + /* find specified header and get offset to it */ extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff); + int target, unsigned short *fragoff, int *fragflg); #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a2092f582a7..0f628ffa420 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -7,7 +7,7 @@ #define NETLINK_ROUTE 0 /* Routing/device hook */ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ -#define NETLINK_FIREWALL 3 /* Firewalling hook */ +#define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */ #define NETLINK_SOCK_DIAG 4 /* socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ diff --git a/include/linux/nfc.h b/include/linux/nfc.h index 39c1fcf089c..0ae9b5857c8 100644 --- a/include/linux/nfc.h +++ b/include/linux/nfc.h @@ -70,6 +70,7 @@ enum nfc_commands { NFC_EVENT_TARGETS_FOUND, NFC_EVENT_DEVICE_ADDED, NFC_EVENT_DEVICE_REMOVED, + NFC_EVENT_TARGET_LOST, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index e474f6e780c..2540e86d99a 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -548,6 +548,11 @@ * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether * No Acknowledgement Policy should be applied. * + * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels + * independently of the userspace SME, send this event indicating + * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with + * %NL80211_ATTR_WIPHY_CHANNEL_TYPE. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -689,6 +694,8 @@ enum nl80211_commands { NL80211_CMD_SET_NOACK_MAP, + NL80211_CMD_CH_SWITCH_NOTIFY, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1685,6 +1692,7 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) + * @NL80211_STA_INFO_T_OFFSET: timing offset with respect to this STA (s64) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -1708,6 +1716,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_CONNECTED_TIME, NL80211_STA_INFO_STA_FLAGS, NL80211_STA_INFO_BEACON_LOSS, + NL80211_STA_INFO_T_OFFSET, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -2142,6 +2151,11 @@ enum nl80211_mntr_flags { * * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute * + * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors + * to synchronize to for 11s default synchronization method (see 11C.12.2.2) + * + * @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -2166,6 +2180,8 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, NL80211_MESHCONF_FORWARDING, NL80211_MESHCONF_RSSI_THRESHOLD, + NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, + NL80211_MESHCONF_HT_OPMODE, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, @@ -2205,6 +2221,11 @@ enum nl80211_meshconf_params { * complete (unsecured) mesh peering without the need of a userspace daemon. * * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number + * + * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a + * vendor specific synchronization method or disable it to use the default + * neighbor offset synchronization + * * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use */ enum nl80211_mesh_setup_params { @@ -2214,6 +2235,7 @@ enum nl80211_mesh_setup_params { NL80211_MESH_SETUP_IE, NL80211_MESH_SETUP_USERSPACE_AUTH, NL80211_MESH_SETUP_USERSPACE_AMPE, + NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, /* keep last */ __NL80211_MESH_SETUP_ATTR_AFTER_LAST, @@ -2223,7 +2245,7 @@ enum nl80211_mesh_setup_params { /** * enum nl80211_txq_attr - TX queue parameter attributes * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved - * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*) + * @NL80211_TXQ_ATTR_AC: AC identifier (NL80211_AC_*) * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning * disabled * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form @@ -2236,7 +2258,7 @@ enum nl80211_mesh_setup_params { */ enum nl80211_txq_attr { __NL80211_TXQ_ATTR_INVALID, - NL80211_TXQ_ATTR_QUEUE, + NL80211_TXQ_ATTR_AC, NL80211_TXQ_ATTR_TXOP, NL80211_TXQ_ATTR_CWMIN, NL80211_TXQ_ATTR_CWMAX, @@ -2247,13 +2269,21 @@ enum nl80211_txq_attr { NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 }; -enum nl80211_txq_q { - NL80211_TXQ_Q_VO, - NL80211_TXQ_Q_VI, - NL80211_TXQ_Q_BE, - NL80211_TXQ_Q_BK +enum nl80211_ac { + NL80211_AC_VO, + NL80211_AC_VI, + NL80211_AC_BE, + NL80211_AC_BK, + NL80211_NUM_ACS }; +/* backward compat */ +#define NL80211_TXQ_ATTR_QUEUE NL80211_TXQ_ATTR_AC +#define NL80211_TXQ_Q_VO NL80211_AC_VO +#define NL80211_TXQ_Q_VI NL80211_AC_VI +#define NL80211_TXQ_Q_BE NL80211_AC_BE +#define NL80211_TXQ_Q_BK NL80211_AC_BK + enum nl80211_channel_type { NL80211_CHAN_NO_HT, NL80211_CHAN_HT20, diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 33d9f517510..5a3db3aa5f1 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -68,6 +68,7 @@ enum { IEEE802154_ATTR_CHANNEL_PAGE_LIST, IEEE802154_ATTR_PHY_NAME, + IEEE802154_ATTR_DEV_TYPE, __IEEE802154_ATTR_MAX, }; @@ -126,4 +127,23 @@ enum { #define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1) +enum { + __IEEE802154_DEV_INVALID = -1, + + /* TODO: + * Nowadays three device types supported by this stack at linux-zigbee + * project: WPAN = 0, MONITOR = 1 and SMAC = 2. + * + * Since this stack implementation exists many years, it's definitely + * bad idea to change the assigned values due to they are already used + * by third-party userspace software like: iz-tools, wireshark... + * + * Currently only monitor device is added and initialized by '1' for + * compatibility. + */ + IEEE802154_DEV_MONITOR = 1, + + __IEEE802154_DEV_MAX, +}; + #endif diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 53b94e025c7..912c27a0f7e 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -22,4 +22,6 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, void (*hndlr)(struct net_device *), phy_interface_t iface); +extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); + #endif /* __LINUX_OF_MDIO_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 6fe0a37d4ab..c291cae8ce3 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -412,6 +412,9 @@ struct phy_driver { /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); + /* Handles ethtool queries for hardware time stamping. */ + int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti); + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); @@ -477,7 +480,6 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } -int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); struct phy_device* get_phy_device(struct mii_bus *bus, int addr); int phy_device_register(struct phy_device *phy); int phy_init_hw(struct phy_device *phydev); diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 410b33d014d..32aef0a439e 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -509,6 +509,7 @@ enum { TCA_NETEM_CORRUPT, TCA_NETEM_LOSS, TCA_NETEM_RATE, + TCA_NETEM_ECN, __TCA_NETEM_MAX, }; @@ -654,4 +655,84 @@ struct tc_qfq_stats { __u32 lmax; }; +/* CODEL */ + +enum { + TCA_CODEL_UNSPEC, + TCA_CODEL_TARGET, + TCA_CODEL_LIMIT, + TCA_CODEL_INTERVAL, + TCA_CODEL_ECN, + __TCA_CODEL_MAX +}; + +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) + +struct tc_codel_xstats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 count; /* how many drops we've done since the last time we + * entered dropping state + */ + __u32 lastcount; /* count at entry to dropping state */ + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ + __s32 drop_next; /* time to drop next packet */ + __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ + __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ + __u32 dropping; /* are we in dropping state ? */ +}; + +/* FQ_CODEL */ + +enum { + TCA_FQ_CODEL_UNSPEC, + TCA_FQ_CODEL_TARGET, + TCA_FQ_CODEL_LIMIT, + TCA_FQ_CODEL_INTERVAL, + TCA_FQ_CODEL_ECN, + TCA_FQ_CODEL_FLOWS, + TCA_FQ_CODEL_QUANTUM, + __TCA_FQ_CODEL_MAX +}; + +#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) + +enum { + TCA_FQ_CODEL_XSTATS_QDISC, + TCA_FQ_CODEL_XSTATS_CLASS, +}; + +struct tc_fq_codel_qd_stats { + __u32 maxpacket; /* largest packet we've seen so far */ + __u32 drop_overlimit; /* number of time max qdisc + * packet limit was hit + */ + __u32 ecn_mark; /* number of packets we ECN marked + * instead of being dropped + */ + __u32 new_flow_count; /* number of time packets + * created a 'new flow' + */ + __u32 new_flows_len; /* count of flows in new list */ + __u32 old_flows_len; /* count of flows in old list */ +}; + +struct tc_fq_codel_cl_stats { + __s32 deficit; + __u32 ldelay; /* in-queue delay seen by most recently + * dequeued packet + */ + __u32 count; + __u32 lastcount; + __u32 dropping; + __s32 drop_next; +}; + +struct tc_fq_codel_xstats { + __u32 type; + union { + struct tc_fq_codel_qd_stats qdisc_stats; + struct tc_fq_codel_cl_stats class_stats; + }; +}; + #endif diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h new file mode 100644 index 00000000000..b5d8c192d84 --- /dev/null +++ b/include/linux/platform_data/wiznet.h @@ -0,0 +1,24 @@ +/* + * Ethernet driver for the WIZnet W5x00 chip. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef PLATFORM_DATA_WIZNET_H +#define PLATFORM_DATA_WIZNET_H + +#include <linux/if_ether.h> + +struct wiznet_platform_data { + int link_gpio; + u8 mac_addr[ETH_ALEN]; +}; + +#ifndef CONFIG_WIZNET_BUS_SHIFT +#define CONFIG_WIZNET_BUS_SHIFT 0 +#endif + +#define W5100_BUS_DIRECT_SIZE (0x8000 << CONFIG_WIZNET_BUS_SHIFT) +#define W5300_BUS_DIRECT_SIZE (0x0400 << CONFIG_WIZNET_BUS_SHIFT) + +#endif /* PLATFORM_DATA_WIZNET_H */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index dd2e44fba63..945704c2ed6 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -136,4 +136,12 @@ struct ptp_clock_event { extern void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event); +/** + * ptp_clock_index() - obtain the device index of a PTP clock + * + * @ptp: The clock obtained from ptp_clock_register(). + */ + +extern int ptp_clock_index(struct ptp_clock *ptp); + #endif diff --git a/include/linux/rndis.h b/include/linux/rndis.h new file mode 100644 index 00000000000..0c8dc7195cd --- /dev/null +++ b/include/linux/rndis.h @@ -0,0 +1,390 @@ +/* + * Remote Network Driver Interface Specification (RNDIS) + * definitions of the magic numbers used by this protocol + */ + +/* Remote NDIS Versions */ +#define RNDIS_MAJOR_VERSION 0x00000001 +#define RNDIS_MINOR_VERSION 0x00000000 + +/* Device Flags */ +#define RNDIS_DF_CONNECTIONLESS 0x00000001U +#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U +#define RNDIS_DF_RAW_DATA 0x00000004U + +/* + * Codes for "msg_type" field of rndis messages; + * only the data channel uses packet messages (maybe batched); + * everything else goes on the control channel. + */ +#define RNDIS_MSG_COMPLETION 0x80000000 +#define RNDIS_MSG_PACKET 0x00000001 /* 1-N packets */ +#define RNDIS_MSG_INIT 0x00000002 +#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_HALT 0x00000003 +#define RNDIS_MSG_QUERY 0x00000004 +#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_SET 0x00000005 +#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_RESET 0x00000006 +#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_INDICATE 0x00000007 +#define RNDIS_MSG_KEEPALIVE 0x00000008 +#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) +/* + * Reserved message type for private communication between lower-layer host + * driver and remote device, if necessary. + */ +#define RNDIS_MSG_BUS 0xff000001 + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_SUCCESS 0x00000000 +#define RNDIS_STATUS_PENDING 0x00000103 + +/* Status codes */ +#define RNDIS_STATUS_NOT_RECOGNIZED 0x00010001 +#define RNDIS_STATUS_NOT_COPIED 0x00010002 +#define RNDIS_STATUS_NOT_ACCEPTED 0x00010003 +#define RNDIS_STATUS_CALL_ACTIVE 0x00010007 + +#define RNDIS_STATUS_ONLINE 0x40010003 +#define RNDIS_STATUS_RESET_START 0x40010004 +#define RNDIS_STATUS_RESET_END 0x40010005 +#define RNDIS_STATUS_RING_STATUS 0x40010006 +#define RNDIS_STATUS_CLOSED 0x40010007 +#define RNDIS_STATUS_WAN_LINE_UP 0x40010008 +#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009 +#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C +#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D +#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E +#define RNDIS_STATUS_INTERFACE_UP 0x4001000F +#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010 +#define RNDIS_STATUS_MEDIA_BUSY 0x40010011 +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 +#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION +#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013L + +#define RNDIS_STATUS_NOT_RESETTABLE 0x80010001 +#define RNDIS_STATUS_SOFT_ERRORS 0x80010003 +#define RNDIS_STATUS_HARD_ERRORS 0x80010004 +#define RNDIS_STATUS_BUFFER_OVERFLOW 0x80000005 + +#define RNDIS_STATUS_FAILURE 0xC0000001 +#define RNDIS_STATUS_RESOURCES 0xC000009A +#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000BB +#define RNDIS_STATUS_CLOSING 0xC0010002 +#define RNDIS_STATUS_BAD_VERSION 0xC0010004 +#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005 +#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006 +#define RNDIS_STATUS_OPEN_FAILED 0xC0010007 +#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008 +#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009 +#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A +#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B +#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C +#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D +#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E +#define RNDIS_STATUS_INVALID_PACKET 0xC001000F +#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010 +#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011 +#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012 +#define RNDIS_STATUS_NOT_INDICATING 0xC0010013 +#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014 +#define RNDIS_STATUS_INVALID_DATA 0xC0010015 +#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016 +#define RNDIS_STATUS_INVALID_OID 0xC0010017 +#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018 +#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019 +#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE 0xC001001A +#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B +#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C +#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D +#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E +#define RNDIS_STATUS_NO_CABLE 0xC001001F + +#define RNDIS_STATUS_INVALID_SAP 0xC0010020 +#define RNDIS_STATUS_SAP_IN_USE 0xC0010021 +#define RNDIS_STATUS_INVALID_ADDRESS 0xC0010022 +#define RNDIS_STATUS_VC_NOT_ACTIVATED 0xC0010023 +#define RNDIS_STATUS_DEST_OUT_OF_ORDER 0xC0010024 +#define RNDIS_STATUS_VC_NOT_AVAILABLE 0xC0010025 +#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE 0xC0010026 +#define RNDIS_STATUS_INCOMPATABLE_QOS 0xC0010027 +#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED 0xC0010028 +#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION 0xC0010029 + +#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR 0xC0011000 + +/* codes for RNDIS_OID_GEN_PHYSICAL_MEDIUM */ +#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED 0x00000000 +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN 0x00000001 +#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM 0x00000002 +#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE 0x00000003 +#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE 0x00000004 +#define RNDIS_PHYSICAL_MEDIUM_DSL 0x00000005 +#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL 0x00000006 +#define RNDIS_PHYSICAL_MEDIUM_1394 0x00000007 +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN 0x00000008 +#define RNDIS_PHYSICAL_MEDIUM_MAX 0x00000009 + +/* Remote NDIS medium types. */ +#define RNDIS_MEDIUM_UNSPECIFIED 0x00000000 +#define RNDIS_MEDIUM_802_3 0x00000000 +#define RNDIS_MEDIUM_802_5 0x00000001 +#define RNDIS_MEDIUM_FDDI 0x00000002 +#define RNDIS_MEDIUM_WAN 0x00000003 +#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004 +#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006 +#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007 +#define RNDIS_MEDIUM_ATM 0x00000008 +#define RNDIS_MEDIUM_WIRELESS_LAN 0x00000009 +#define RNDIS_MEDIUM_IRDA 0x0000000A +#define RNDIS_MEDIUM_BPC 0x0000000B +#define RNDIS_MEDIUM_CO_WAN 0x0000000C +#define RNDIS_MEDIUM_1394 0x0000000D +/* Not a real medium, defined as an upper-bound */ +#define RNDIS_MEDIUM_MAX 0x0000000E + +/* Remote NDIS medium connection states. */ +#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000 +#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001 + +/* packet filter bits used by RNDIS_OID_GEN_CURRENT_PACKET_FILTER */ +#define RNDIS_PACKET_TYPE_DIRECTED 0x00000001 +#define RNDIS_PACKET_TYPE_MULTICAST 0x00000002 +#define RNDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 +#define RNDIS_PACKET_TYPE_BROADCAST 0x00000008 +#define RNDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 +#define RNDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 +#define RNDIS_PACKET_TYPE_SMT 0x00000040 +#define RNDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 +#define RNDIS_PACKET_TYPE_GROUP 0x00001000 +#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000 +#define RNDIS_PACKET_TYPE_FUNCTIONAL 0x00004000 +#define RNDIS_PACKET_TYPE_MAC_FRAME 0x00008000 + +/* RNDIS_OID_GEN_MINIPORT_INFO constants */ +#define RNDIS_MINIPORT_BUS_MASTER 0x00000001 +#define RNDIS_MINIPORT_WDM_DRIVER 0x00000002 +#define RNDIS_MINIPORT_SG_LIST 0x00000004 +#define RNDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008 +#define RNDIS_MINIPORT_INDICATES_PACKETS 0x00000010 +#define RNDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020 +#define RNDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040 +#define RNDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080 +#define RNDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100 +#define RNDIS_MINIPORT_IS_NDIS_5 0x00000200 +#define RNDIS_MINIPORT_IS_CO 0x00000400 +#define RNDIS_MINIPORT_DESERIALIZE 0x00000800 +#define RNDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000 +#define RNDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000 +#define RNDIS_MINIPORT_NETBOOT_CARD 0x00004000 +#define RNDIS_MINIPORT_PM_SUPPORTED 0x00008000 +#define RNDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000 +#define RNDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000 +#define RNDIS_MINIPORT_HIDDEN 0x00040000 +#define RNDIS_MINIPORT_SWENUM 0x00080000 +#define RNDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000 +#define RNDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000 +#define RNDIS_MINIPORT_HARDWARE_DEVICE 0x00400000 +#define RNDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000 +#define RNDIS_MINIPORT_64BITS_DMA 0x01000000 + +#define RNDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001 +#define RNDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002 +#define RNDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004 +#define RNDIS_MAC_OPTION_NO_LOOPBACK 0x00000008 +#define RNDIS_MAC_OPTION_FULL_DUPLEX 0x00000010 +#define RNDIS_MAC_OPTION_EOTX_INDICATION 0x00000020 +#define RNDIS_MAC_OPTION_8021P_PRIORITY 0x00000040 +#define RNDIS_MAC_OPTION_RESERVED 0x80000000 + +/* Object Identifiers used by NdisRequest Query/Set Information */ +/* General (Required) Objects */ +#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101 +#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102 +#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103 +#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104 +#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 +#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 +#define RNDIS_OID_GEN_LINK_SPEED 0x00010107 +#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 +#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 +#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A +#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B +#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C +#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D +#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E +#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F +#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110 +#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 +#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112 +#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113 +#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 +#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 +#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 +#define RNDIS_OID_GEN_SUPPORTED_GUIDS 0x00010117 +#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 +#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 +#define RNDIS_OID_GEN_PHYSICAL_MEDIUM 0x00010202 +#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A +#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B +#define RNDIS_OID_GEN_VLAN_ID 0x0001021C + +/* Optional OIDs */ +#define RNDIS_OID_GEN_MEDIA_CAPABILITIES 0x00010201 + +/* Required statistics OIDs */ +#define RNDIS_OID_GEN_XMIT_OK 0x00020101 +#define RNDIS_OID_GEN_RCV_OK 0x00020102 +#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103 +#define RNDIS_OID_GEN_RCV_ERROR 0x00020104 +#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105 + +/* Optional statistics OIDs */ +#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 +#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 +#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 +#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 +#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 +#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 +#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207 +#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 +#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209 +#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A +#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B +#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C + +#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D +#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E + +#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F +#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210 + +#define RNDIS_OID_GEN_NETCARD_LOAD 0x00020211 +#define RNDIS_OID_GEN_DEVICE_PROFILE 0x00020212 +#define RNDIS_OID_GEN_INIT_TIME_MS 0x00020213 +#define RNDIS_OID_GEN_RESET_COUNTS 0x00020214 +#define RNDIS_OID_GEN_MEDIA_SENSE_COUNTS 0x00020215 +#define RNDIS_OID_GEN_FRIENDLY_NAME 0x00020216 +#define RNDIS_OID_GEN_MINIPORT_INFO 0x00020217 +#define RNDIS_OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218 + +/* These are connection-oriented general OIDs. */ +/* These replace the above OIDs for connection-oriented media. */ +#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101 +#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102 +#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103 +#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104 +#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105 +#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106 +#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107 +#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108 +#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109 +#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A +#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B +#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C +#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D + +#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201 +#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202 + +/* These are connection-oriented statistics OIDs. */ +#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101 +#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102 +#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103 +#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104 +#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105 + + +#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201 +#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202 +#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203 +#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204 +#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205 +#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206 + +/* These are objects for Connection-oriented media call-managers. */ +#define RNDIS_OID_CO_ADD_PVC 0xFF000001 +#define RNDIS_OID_CO_DELETE_PVC 0xFF000002 +#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003 +#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004 +#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005 +#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006 +#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007 +#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008 +#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009 + +/* 802.3 Objects (Ethernet) */ +#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102 +#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103 +#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 +#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105 + +#define RNDIS_802_3_MAC_OPTION_PRIORITY 0x00000001 + +#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 +#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102 +#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 + +#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201 +#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 +#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203 +#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204 +#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 +#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 +#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 + +#define RNDIS_OID_802_11_BSSID 0x0d010101 +#define RNDIS_OID_802_11_SSID 0x0d010102 +#define RNDIS_OID_802_11_INFRASTRUCTURE_MODE 0x0d010108 +#define RNDIS_OID_802_11_ADD_WEP 0x0d010113 +#define RNDIS_OID_802_11_REMOVE_WEP 0x0d010114 +#define RNDIS_OID_802_11_DISASSOCIATE 0x0d010115 +#define RNDIS_OID_802_11_AUTHENTICATION_MODE 0x0d010118 +#define RNDIS_OID_802_11_PRIVACY_FILTER 0x0d010119 +#define RNDIS_OID_802_11_BSSID_LIST_SCAN 0x0d01011a +#define RNDIS_OID_802_11_ENCRYPTION_STATUS 0x0d01011b +#define RNDIS_OID_802_11_ADD_KEY 0x0d01011d +#define RNDIS_OID_802_11_REMOVE_KEY 0x0d01011e +#define RNDIS_OID_802_11_ASSOCIATION_INFORMATION 0x0d01011f +#define RNDIS_OID_802_11_CAPABILITY 0x0d010122 +#define RNDIS_OID_802_11_PMKID 0x0d010123 +#define RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED 0x0d010203 +#define RNDIS_OID_802_11_NETWORK_TYPE_IN_USE 0x0d010204 +#define RNDIS_OID_802_11_TX_POWER_LEVEL 0x0d010205 +#define RNDIS_OID_802_11_RSSI 0x0d010206 +#define RNDIS_OID_802_11_RSSI_TRIGGER 0x0d010207 +#define RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD 0x0d010209 +#define RNDIS_OID_802_11_RTS_THRESHOLD 0x0d01020a +#define RNDIS_OID_802_11_SUPPORTED_RATES 0x0d01020e +#define RNDIS_OID_802_11_CONFIGURATION 0x0d010211 +#define RNDIS_OID_802_11_POWER_MODE 0x0d010216 +#define RNDIS_OID_802_11_BSSID_LIST 0x0d010217 + +/* Plug and Play capabilities */ +#define RNDIS_OID_PNP_CAPABILITIES 0xFD010100 +#define RNDIS_OID_PNP_SET_POWER 0xFD010101 +#define RNDIS_OID_PNP_QUERY_POWER 0xFD010102 +#define RNDIS_OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 +#define RNDIS_OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 +#define RNDIS_OID_PNP_ENABLE_WAKE_UP 0xFD010106 + +/* RNDIS_PNP_CAPABILITIES.Flags constants */ +#define RNDIS_DEVICE_WAKE_UP_ENABLE 0x00000001 +#define RNDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002 +#define RNDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004 + +#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001 +#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002 +#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005 +#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006 +#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007 + +#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001 +#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002 +#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005 +#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 577592ea0ea..2c1de8982c8 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -801,6 +801,10 @@ rtattr_failure: return table; } +extern int ndo_dflt_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx); #endif /* __KERNEL__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 111f26b6e28..0e501714d47 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -117,11 +117,11 @@ struct nf_conntrack { #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info { - atomic_t use; - struct net_device *physindev; - struct net_device *physoutdev; - unsigned int mask; - unsigned long data[32 / sizeof(unsigned long)]; + atomic_t use; + unsigned int mask; + struct net_device *physindev; + struct net_device *physoutdev; + unsigned long data[32 / sizeof(unsigned long)]; }; #endif @@ -470,7 +470,8 @@ struct sk_buff { __u8 wifi_acked_valid:1; __u8 wifi_acked:1; __u8 no_fcs:1; - /* 9/11 bit hole (depending on ndisc_nodetype presence) */ + __u8 head_frag:1; + /* 8/10 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #ifdef CONFIG_NET_DMA @@ -560,9 +561,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) extern void kfree_skb(struct sk_buff *skb); extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); +extern struct kmem_cache *skbuff_head_cache; + +extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); +extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize); + extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node); -extern struct sk_buff *build_skb(void *data); +extern struct sk_buff *build_skb(void *data, unsigned int frag_size); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -643,11 +650,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->head + skb->end; } + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end; +} #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->end; } + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end - skb->head; +} #endif /* Internal */ @@ -881,10 +898,11 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) { - struct sk_buff *list = ((const struct sk_buff *)list_)->next; - if (list == (struct sk_buff *)list_) - list = NULL; - return list; + struct sk_buff *skb = list_->next; + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; } /** @@ -900,6 +918,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, const struct sk_buff_head *list_) { struct sk_buff *next = skb->next; + if (next == (struct sk_buff *)list_) next = NULL; return next; @@ -920,10 +939,12 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *list = ((const struct sk_buff *)list_)->prev; - if (list == (struct sk_buff *)list_) - list = NULL; - return list; + struct sk_buff *skb = list_->prev; + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; + } /** @@ -1664,31 +1685,11 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -/** - * __dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * @gfp_mask: get_free_pages mask, passed to alloc_skb - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. - */ -static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - gfp_t gfp_mask) -{ - struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); - if (likely(skb)) - skb_reserve(skb, NET_SKB_PAD); - return skb; -} - -extern struct sk_buff *dev_alloc_skb(unsigned int length); +extern void *netdev_alloc_frag(unsigned int fragsz); extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask); + unsigned int length, + gfp_t gfp_mask); /** * netdev_alloc_skb - allocate an skbuff for rx on a specific device @@ -1704,11 +1705,25 @@ extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, - unsigned int length) + unsigned int length) { return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } +/* legacy helper around __netdev_alloc_skb() */ +static inline struct sk_buff *__dev_alloc_skb(unsigned int length, + gfp_t gfp_mask) +{ + return __netdev_alloc_skb(NULL, length, gfp_mask); +} + +/* legacy helper around netdev_alloc_skb() */ +static inline struct sk_buff *dev_alloc_skb(unsigned int length) +{ + return netdev_alloc_skb(NULL, length); +} + + static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp) { @@ -1963,8 +1978,8 @@ static inline int skb_add_data(struct sk_buff *skb, return -EFAULT; } -static inline int skb_can_coalesce(struct sk_buff *skb, int i, - const struct page *page, int off) +static inline bool skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) { if (i) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; @@ -1972,7 +1987,7 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, return page == skb_frag_page(frag) && off == frag->page_offset + skb_frag_size(frag); } - return 0; + return false; } static inline int __skb_linearize(struct sk_buff *skb) @@ -2552,7 +2567,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) return false; skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); - if (skb_end_pointer(skb) - skb->head < skb_size) + if (skb_end_offset(skb) < skb_size) return false; if (skb_shared(skb) || skb_cloned(skb)) @@ -2560,5 +2575,19 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) return true; } + +/** + * skb_head_is_locked - Determine if the skb->head is locked down + * @skb: skb to check + * + * The head on skbs build around a head frag can be removed if they are + * not cloned. This function returns true if the skb head is locked down + * due to either being allocated via kmalloc, or by being a clone with + * multiple references to the head. + */ +static inline bool skb_head_is_locked(const struct sk_buff *skb) +{ + return !skb->head_frag || skb_cloned(skb); +} #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 251729a4788..db4bae78bda 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -32,8 +32,8 @@ struct sock_diag_handler { int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); }; -int sock_diag_register(struct sock_diag_handler *h); -void sock_diag_unregister(struct sock_diag_handler *h); +int sock_diag_register(const struct sock_diag_handler *h); +void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); diff --git a/include/linux/socket.h b/include/linux/socket.h index b84bbd48b87..25d6322fb63 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -68,13 +68,13 @@ struct msghdr { __kernel_size_t msg_iovlen; /* Number of blocks */ void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */ __kernel_size_t msg_controllen; /* Length of cmsg list */ - unsigned msg_flags; + unsigned int msg_flags; }; /* For recvmmsg/sendmmsg */ struct mmsghdr { struct msghdr msg_hdr; - unsigned msg_len; + unsigned int msg_len; }; /* diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 0dddc9e42b6..b69bdb1e08b 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -28,6 +28,51 @@ #include <linux/platform_device.h> +#define STMMAC_RX_COE_NONE 0 +#define STMMAC_RX_COE_TYPE1 1 +#define STMMAC_RX_COE_TYPE2 2 + +/* Define the macros for CSR clock range parameters to be passed by + * platform code. + * This could also be configured at run time using CPU freq framework. */ + +/* MDC Clock Selection define*/ +#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */ +#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */ +#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */ +#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */ +#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ +#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ + +/* The MDC clock could be set higher than the IEEE 802.3 + * specified frequency limit 0f 2.5 MHz, by programming a clock divider + * of value different than the above defined values. The resultant MDIO + * clock frequency of 12.5 MHz is applicable for the interfacing chips + * supporting higher MDC clocks. + * The MDC clock selection macros need to be defined for MDC clock rate + * of 12.5 MHz, corresponding to the following selection. + */ +#define STMMAC_CSR_I_4 0x8 /* clk_csr_i/4 */ +#define STMMAC_CSR_I_6 0x9 /* clk_csr_i/6 */ +#define STMMAC_CSR_I_8 0xA /* clk_csr_i/8 */ +#define STMMAC_CSR_I_10 0xB /* clk_csr_i/10 */ +#define STMMAC_CSR_I_12 0xC /* clk_csr_i/12 */ +#define STMMAC_CSR_I_14 0xD /* clk_csr_i/14 */ +#define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */ +#define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */ + +/* AXI DMA Burst length suported */ +#define DMA_AXI_BLEN_4 (1 << 1) +#define DMA_AXI_BLEN_8 (1 << 2) +#define DMA_AXI_BLEN_16 (1 << 3) +#define DMA_AXI_BLEN_32 (1 << 4) +#define DMA_AXI_BLEN_64 (1 << 5) +#define DMA_AXI_BLEN_128 (1 << 6) +#define DMA_AXI_BLEN_256 (1 << 7) +#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \ + | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \ + | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256) + /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { @@ -38,16 +83,25 @@ struct stmmac_mdio_bus_data { int probed_phy_irq; }; +struct stmmac_dma_cfg { + int pbl; + int fixed_burst; + int mixed_burst; + int burst_len; +}; + struct plat_stmmacenet_data { + char *phy_bus_name; int bus_id; int phy_addr; int interface; struct stmmac_mdio_bus_data *mdio_bus_data; - int pbl; + struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; int enh_desc; int tx_coe; + int rx_coe; int bugged_jumbo; int pmt; int force_sf_dma_mode; @@ -56,6 +110,7 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev); void (*exit)(struct platform_device *pdev); void *custom_cfg; + void *custom_data; void *bsp_priv; }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b6c62d29438..d9b42c5be08 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -106,6 +106,22 @@ enum { #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ +#define TCP_REPAIR 19 /* TCP sock is under repair right now */ +#define TCP_REPAIR_QUEUE 20 +#define TCP_QUEUE_SEQ 21 +#define TCP_REPAIR_OPTIONS 22 + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +enum { + TCP_NO_QUEUE, + TCP_RECV_QUEUE, + TCP_SEND_QUEUE, + TCP_QUEUES_NR, +}; /* for TCP_INFO socket option */ #define TCPI_OPT_TIMESTAMPS 1 @@ -353,7 +369,11 @@ struct tcp_sock { u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ thin_dupack : 1,/* Fast retransmit on first dupack */ - unused : 2; + repair : 1, + unused : 1; + u8 repair_queue; + u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ + early_retrans_delayed:1; /* Delayed ER timer installed */ /* RTT measurement */ u32 srtt; /* smoothed round trip time << 3 */ diff --git a/include/linux/trdevice.h b/include/linux/trdevice.h deleted file mode 100644 index bfc84a7aecc..00000000000 --- a/include/linux/trdevice.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. NET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * Definitions for the Token-ring handlers. - * - * Version: @(#)eth.h 1.0.4 05/13/93 - * - * Authors: Ross Biro - * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> - * - * Relocated to include/linux where it belongs by Alan Cox - * <gw4pts@gw4pts.ampr.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * WARNING: This move may well be temporary. This file will get merged with others RSN. - * - */ -#ifndef _LINUX_TRDEVICE_H -#define _LINUX_TRDEVICE_H - - -#include <linux/if_tr.h> - -#ifdef __KERNEL__ -extern __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev); -extern void tr_source_route(struct sk_buff *skb, struct trh_hdr *trh, struct net_device *dev); -extern struct net_device *alloc_trdev(int sizeof_priv); - -#endif - -#endif /* _LINUX_TRDEVICE_H */ diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index 88fceb718c7..d44ef85db17 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -20,6 +20,8 @@ #ifndef __LINUX_USB_RNDIS_HOST_H #define __LINUX_USB_RNDIS_HOST_H +#include <linux/rndis.h> + /* * CONTROL uses CDC "encapsulated commands" with funky notifications. * - control-out: SEND_ENCAPSULATED @@ -49,47 +51,6 @@ struct rndis_msg_hdr { */ #define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) -#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000) - -/* codes for "msg_type" field of rndis messages; - * only the data channel uses packet messages (maybe batched); - * everything else goes on the control channel. - */ -#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */ -#define RNDIS_MSG_INIT cpu_to_le32(0x00000002) -#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_HALT cpu_to_le32(0x00000003) -#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004) -#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_SET cpu_to_le32(0x00000005) -#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_RESET cpu_to_le32(0x00000006) -#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007) -#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008) -#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) - -/* codes for "status" field of completion messages */ -#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000) -#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001) -#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015) -#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb) -#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b) -#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c) -#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION cpu_to_le32(0x40010012) - -/* codes for OID_GEN_PHYSICAL_MEDIUM */ -#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000) -#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001) -#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002) -#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003) -#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004) -#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005) -#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006) -#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007) -#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008) -#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009) - struct rndis_data_hdr { __le32 msg_type; /* RNDIS_MSG_PACKET */ __le32 msg_len; /* rndis_data_hdr + data_len + pad */ @@ -222,29 +183,6 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */ __le32 status; } __attribute__ ((packed)); -/* NOTE: about 30 OIDs are "mandatory" for peripherals to support ... and - * there are gobs more that may optionally be supported. We'll avoid as much - * of that mess as possible. - */ -#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101) -#define OID_GEN_MAXIMUM_FRAME_SIZE cpu_to_le32(0x00010106) -#define OID_GEN_CURRENT_PACKET_FILTER cpu_to_le32(0x0001010e) -#define OID_GEN_PHYSICAL_MEDIUM cpu_to_le32(0x00010202) - -/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ -#define RNDIS_PACKET_TYPE_DIRECTED cpu_to_le32(0x00000001) -#define RNDIS_PACKET_TYPE_MULTICAST cpu_to_le32(0x00000002) -#define RNDIS_PACKET_TYPE_ALL_MULTICAST cpu_to_le32(0x00000004) -#define RNDIS_PACKET_TYPE_BROADCAST cpu_to_le32(0x00000008) -#define RNDIS_PACKET_TYPE_SOURCE_ROUTING cpu_to_le32(0x00000010) -#define RNDIS_PACKET_TYPE_PROMISCUOUS cpu_to_le32(0x00000020) -#define RNDIS_PACKET_TYPE_SMT cpu_to_le32(0x00000040) -#define RNDIS_PACKET_TYPE_ALL_LOCAL cpu_to_le32(0x00000080) -#define RNDIS_PACKET_TYPE_GROUP cpu_to_le32(0x00001000) -#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL cpu_to_le32(0x00002000) -#define RNDIS_PACKET_TYPE_FUNCTIONAL cpu_to_le32(0x00004000) -#define RNDIS_PACKET_TYPE_MAC_FRAME cpu_to_le32(0x00008000) - /* default filter used with RNDIS devices */ #define RNDIS_DEFAULT_FILTER ( \ RNDIS_PACKET_TYPE_DIRECTED | \ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 970d5a2a904..2470f541af5 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -49,8 +49,11 @@ #define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ #define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ #define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ +#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the + * network */ #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ +#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ @@ -152,4 +155,15 @@ struct virtio_net_ctrl_mac { #define VIRTIO_NET_CTRL_VLAN_ADD 0 #define VIRTIO_NET_CTRL_VLAN_DEL 1 +/* + * Control link announce acknowledgement + * + * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that + * driver has recevied the notification; device would clear the + * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives + * this command. + */ +#define VIRTIO_NET_CTRL_ANNOUNCE 3 + #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0 + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 757a17638b1..f2b801c4b55 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -92,7 +92,7 @@ extern void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); static inline unsigned long addrconf_timeout_fixup(u32 timeout, - unsigned unit) + unsigned int unit) { if (timeout == 0xffffffff) return ~0UL; @@ -131,9 +131,9 @@ extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); extern void ipv6_sock_mc_close(struct sock *sk); -extern int inet6_mc_check(struct sock *sk, - const struct in6_addr *mc_addr, - const struct in6_addr *src_addr); +extern bool inet6_mc_check(struct sock *sk, + const struct in6_addr *mc_addr, + const struct in6_addr *src_addr); extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); @@ -146,10 +146,10 @@ extern void ipv6_mc_init_dev(struct inet6_dev *idev); extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); -extern int ipv6_chk_mcast_addr(struct net_device *dev, - const struct in6_addr *group, - const struct in6_addr *src_addr); -extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); +extern bool ipv6_chk_mcast_addr(struct net_device *dev, + const struct in6_addr *group, + const struct in6_addr *src_addr); +extern bool ipv6_is_mld(struct sk_buff *skb, int nexthdr); extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao); @@ -163,8 +163,8 @@ extern void ipv6_sock_ac_close(struct sock *sk); extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); -extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, - const struct in6_addr *addr); +extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, + const struct in6_addr *addr); /* Device notifier */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index ca68e2cef23..2ee33da36a7 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -22,7 +22,7 @@ extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; struct unix_address { atomic_t refcnt; int len; - unsigned hash; + unsigned int hash; struct sockaddr_un name[0]; }; diff --git a/include/net/ax25.h b/include/net/ax25.h index 94e09d361bb..5d2352154cf 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -215,7 +215,7 @@ typedef struct ax25_dev { struct ax25_dev *next; struct net_device *dev; struct net_device *forward; - struct ctl_table *systable; + struct ctl_table_header *sysheader; int values[AX25_MAX_VALUES]; #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; @@ -441,11 +441,11 @@ extern void ax25_uid_free(void); /* sysctl_net_ax25.c */ #ifdef CONFIG_SYSCTL -extern void ax25_register_sysctl(void); -extern void ax25_unregister_sysctl(void); +extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev); +extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); #else -static inline void ax25_register_sysctl(void) {}; -static inline void ax25_unregister_sysctl(void) {}; +static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; } +static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {} #endif /* CONFIG_SYSCTL */ #endif diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h index 6db8ecf52aa..439dadc8102 100644 --- a/include/net/caif/caif_hsi.h +++ b/include/net/caif/caif_hsi.h @@ -123,12 +123,21 @@ struct cfhsi_rx_state { bool piggy_desc; }; +/* Priority mapping */ +enum { + CFHSI_PRIO_CTL = 0, + CFHSI_PRIO_VI, + CFHSI_PRIO_VO, + CFHSI_PRIO_BEBK, + CFHSI_PRIO_LAST, +}; + /* Structure implemented by CAIF HSI drivers. */ struct cfhsi { struct caif_dev_common cfdev; struct net_device *ndev; struct platform_device *pdev; - struct sk_buff_head qhead; + struct sk_buff_head qhead[CFHSI_PRIO_LAST]; struct cfhsi_drv drv; struct cfhsi_dev *dev; int tx_state; @@ -151,8 +160,14 @@ struct cfhsi { wait_queue_head_t wake_up_wait; wait_queue_head_t wake_down_wait; wait_queue_head_t flush_fifo_wait; - struct timer_list timer; + struct timer_list inactivity_timer; struct timer_list rx_slowpath_timer; + + /* TX aggregation */ + unsigned long aggregation_timeout; + int aggregation_len; + struct timer_list aggregation_timer; + unsigned long bits; }; diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index 6bd200a4754..83a89ba3005 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h @@ -188,11 +188,18 @@ struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt); */ void *cfpkt_tonative(struct cfpkt *pkt); - /* * Returns packet information for a packet. * pkt Packet to get info from; * @return Packet information */ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt); + +/** cfpkt_set_prio - set priority for a CAIF packet. + * + * @pkt: The CAIF packet to be adjusted. + * @prio: one of TC_PRIO_ constants. + */ +void cfpkt_set_prio(struct cfpkt *pkt, int prio); + #endif /* CFPKT_H_ */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 83d800c31e3..adb2320bccd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -521,6 +521,7 @@ struct station_parameters { * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled * @STATION_INFO_STA_FLAGS: @sta_flags filled * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled + * @STATION_INFO_T_OFFSET: @t_offset filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -542,7 +543,8 @@ enum station_info_flags { STATION_INFO_CONNECTED_TIME = 1<<16, STATION_INFO_ASSOC_REQ_IES = 1<<17, STATION_INFO_STA_FLAGS = 1<<18, - STATION_INFO_BEACON_LOSS_COUNT = 1<<19 + STATION_INFO_BEACON_LOSS_COUNT = 1<<19, + STATION_INFO_T_OFFSET = 1<<20, }; /** @@ -643,6 +645,7 @@ struct sta_bss_parameters { * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values * @beacon_loss_count: Number of times beacon loss event has triggered. + * @t_offset: Time offset of the station relative to this host. */ struct station_info { u32 filled; @@ -671,6 +674,7 @@ struct station_info { size_t assoc_req_ies_len; u32 beacon_loss_count; + s64 t_offset; /* * Note: Add a new enum station_info_flags value for each new field and @@ -798,6 +802,8 @@ struct mesh_config { /* ttl used in path selection information elements */ u8 element_ttl; bool auto_open_plinks; + /* neighbor offset synchronization */ + u32 dot11MeshNbrOffsetMaxNeighbor; /* HWMP parameters */ u8 dot11MeshHWMPmaxPREQretries; u32 path_refresh_time; @@ -815,12 +821,14 @@ struct mesh_config { bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; s32 rssi_threshold; + u16 ht_opmode; }; /** * struct mesh_setup - 802.11s mesh setup configuration * @mesh_id: the mesh ID * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes + * @sync_method: which synchronization method to use * @path_sel_proto: which path selection protocol to use * @path_metric: which metric to use * @ie: vendor information elements (optional) @@ -834,8 +842,9 @@ struct mesh_config { struct mesh_setup { const u8 *mesh_id; u8 mesh_id_len; - u8 path_sel_proto; - u8 path_metric; + u8 sync_method; + u8 path_sel_proto; + u8 path_metric; const u8 *ie; u8 ie_len; bool is_authenticated; @@ -845,7 +854,7 @@ struct mesh_setup { /** * struct ieee80211_txq_params - TX queue parameters - * @queue: TX queue identifier (NL80211_TXQ_Q_*) + * @ac: AC identifier * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range * 1..32767] @@ -854,7 +863,7 @@ struct mesh_setup { * @aifs: Arbitration interframe space [0..255] */ struct ieee80211_txq_params { - enum nl80211_txq_q queue; + enum nl80211_ac ac; u16 txop; u16 cwmin; u16 cwmax; @@ -1336,6 +1345,9 @@ struct cfg80211_gtk_rekey_data { * be %NULL or contain the enabled Wake-on-Wireless triggers that are * configured for the device. * @resume: wiphy device needs to be resumed + * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback + * to call device_set_wakeup_enable() to enable/disable wakeup from + * the device. * * @add_virtual_intf: create a new virtual interface with the given name, * must set the struct wireless_dev's iftype. Beware: You must create @@ -1503,10 +1515,21 @@ struct cfg80211_gtk_rekey_data { * later passes to cfg80211_probe_status(). * * @set_noack_map: Set the NoAck Map for the TIDs. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * See @ethtool_ops.get_sset_count + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * See @ethtool_ops.get_ethtool_stats + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * See @ethtool_ops.get_strings */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); int (*resume)(struct wiphy *wiphy); + void (*set_wakeup)(struct wiphy *wiphy, bool enabled); struct net_device * (*add_virtual_intf)(struct wiphy *wiphy, char *name, @@ -1698,7 +1721,15 @@ struct cfg80211_ops { struct net_device *dev, u16 noack_map); - struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy); + struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy, + enum nl80211_channel_type *type); + + int (*get_et_sset_count)(struct wiphy *wiphy, + struct net_device *dev, int sset); + void (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev, + u32 sset, u8 *data); }; /* @@ -1732,10 +1763,6 @@ struct cfg80211_ops { * hints read the documenation for regulatory_hint_found_beacon() * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this * wiphy at all - * @WIPHY_FLAG_ENFORCE_COMBINATIONS: Set this flag to enforce interface - * combinations for this device. This flag is used for backward - * compatibility only until all drivers advertise combinations and - * they will always be enforced. * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled * by default -- this flag will be set depending on the kernel's default * on wiphy_new(), but can be changed by the driver if it has a good @@ -1780,7 +1807,7 @@ enum wiphy_flags { WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_MESH_AUTH = BIT(10), WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11), - WIPHY_FLAG_ENFORCE_COMBINATIONS = BIT(12), + /* use hole at 12 */ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), @@ -3343,6 +3370,17 @@ int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, enum nl80211_channel_type channel_type); /* + * cfg80211_ch_switch_notify - update wdev channel and notify userspace + * @dev: the device which switched channels + * @freq: new channel frequency (in MHz) + * @type: channel type + * + * Acquires wdev_lock, so must only be called from sleepable driver context! + */ +void cfg80211_ch_switch_notify(struct net_device *dev, int freq, + enum nl80211_channel_type type); + +/* * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) * @rate: given rate_info to calculate bitrate from * diff --git a/include/net/codel.h b/include/net/codel.h new file mode 100644 index 00000000000..550debfc240 --- /dev/null +++ b/include/net/codel.h @@ -0,0 +1,342 @@ +#ifndef __NET_SCHED_CODEL_H +#define __NET_SCHED_CODEL_H + +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/skbuff.h> +#include <net/pkt_sched.h> +#include <net/inet_ecn.h> +#include <linux/reciprocal_div.h> + +/* Controlling Queue Delay (CoDel) algorithm + * ========================================= + * Source : Kathleen Nichols and Van Jacobson + * http://queue.acm.org/detail.cfm?id=2209336 + * + * Implemented on linux by Dave Taht and Eric Dumazet + */ + + +/* CoDel uses a 1024 nsec clock, encoded in u32 + * This gives a range of 2199 seconds, because of signed compares + */ +typedef u32 codel_time_t; +typedef s32 codel_tdiff_t; +#define CODEL_SHIFT 10 +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) + +static inline codel_time_t codel_get_time(void) +{ + u64 ns = ktime_to_ns(ktime_get()); + + return ns >> CODEL_SHIFT; +} + +#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0) +#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0) +#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0) +#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0) + +/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ +struct codel_skb_cb { + codel_time_t enqueue_time; +}; + +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) +{ + return get_codel_cb(skb)->enqueue_time; +} + +static void codel_set_enqueue_time(struct sk_buff *skb) +{ + get_codel_cb(skb)->enqueue_time = codel_get_time(); +} + +static inline u32 codel_time_to_us(codel_time_t val) +{ + u64 valns = ((u64)val << CODEL_SHIFT); + + do_div(valns, NSEC_PER_USEC); + return (u32)valns; +} + +/** + * struct codel_params - contains codel parameters + * @target: target queue size (in time units) + * @interval: width of moving time window + * @ecn: is Explicit Congestion Notification enabled + */ +struct codel_params { + codel_time_t target; + codel_time_t interval; + bool ecn; +}; + +/** + * struct codel_vars - contains codel variables + * @count: how many drops we've done since the last time we + * entered dropping state + * @lastcount: count at entry to dropping state + * @dropping: set to true if in dropping state + * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 + * @first_above_time: when we went (or will go) continuously above target + * for interval + * @drop_next: time to drop next packet, or when we dropped last + * @ldelay: sojourn time of last dequeued packet + */ +struct codel_vars { + u32 count; + u32 lastcount; + bool dropping; + u16 rec_inv_sqrt; + codel_time_t first_above_time; + codel_time_t drop_next; + codel_time_t ldelay; +}; + +#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ +/* needed shift to get a Q0.32 number from rec_inv_sqrt */ +#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) + +/** + * struct codel_stats - contains codel shared variables and stats + * @maxpacket: largest packet we've seen so far + * @drop_count: temp count of dropped packets in dequeue() + * ecn_mark: number of packets we ECN marked instead of dropping + */ +struct codel_stats { + u32 maxpacket; + u32 drop_count; + u32 ecn_mark; +}; + +static void codel_params_init(struct codel_params *params) +{ + params->interval = MS2TIME(100); + params->target = MS2TIME(5); + params->ecn = false; +} + +static void codel_vars_init(struct codel_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); +} + +static void codel_stats_init(struct codel_stats *stats) +{ + stats->maxpacket = 256; +} + +/* + * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ +static void codel_Newton_step(struct codel_vars *vars) +{ + u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; + u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; +} + +/* + * CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static codel_time_t codel_control_law(codel_time_t t, + codel_time_t interval, + u32 rec_inv_sqrt) +{ + return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); +} + + +static bool codel_should_drop(const struct sk_buff *skb, + struct Qdisc *sch, + struct codel_vars *vars, + struct codel_params *params, + struct codel_stats *stats, + codel_time_t now) +{ + bool ok_to_drop; + + if (!skb) { + vars->first_above_time = 0; + return false; + } + + vars->ldelay = now - codel_get_enqueue_time(skb); + sch->qstats.backlog -= qdisc_pkt_len(skb); + + if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) + stats->maxpacket = qdisc_pkt_len(skb); + + if (codel_time_before(vars->ldelay, params->target) || + sch->qstats.backlog <= stats->maxpacket) { + /* went below - stay below for at least interval */ + vars->first_above_time = 0; + return false; + } + ok_to_drop = false; + if (vars->first_above_time == 0) { + /* just went above from below. If we stay above + * for at least interval we'll say it's ok to drop + */ + vars->first_above_time = now + params->interval; + } else if (codel_time_after(now, vars->first_above_time)) { + ok_to_drop = true; + } + return ok_to_drop; +} + +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, + struct Qdisc *sch); + +static struct sk_buff *codel_dequeue(struct Qdisc *sch, + struct codel_params *params, + struct codel_vars *vars, + struct codel_stats *stats, + codel_skb_dequeue_t dequeue_func) +{ + struct sk_buff *skb = dequeue_func(vars, sch); + codel_time_t now; + bool drop; + + if (!skb) { + vars->dropping = false; + return skb; + } + now = codel_get_time(); + drop = codel_should_drop(skb, sch, vars, params, stats, now); + if (vars->dropping) { + if (!drop) { + /* sojourn time below target - leave dropping state */ + vars->dropping = false; + } else if (codel_time_after_eq(now, vars->drop_next)) { + /* It's time for the next drop. Drop the current + * packet and dequeue the next. The dequeue might + * take us out of dropping state. + * If not, schedule the next drop. + * A large backlog might result in drop rates so high + * that the next drop should happen now, + * hence the while loop. + */ + while (vars->dropping && + codel_time_after_eq(now, vars->drop_next)) { + vars->count++; /* dont care of possible wrap + * since there is no more divide + */ + codel_Newton_step(vars); + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + goto end; + } + qdisc_drop(skb, sch); + stats->drop_count++; + skb = dequeue_func(vars, sch); + if (!codel_should_drop(skb, sch, + vars, params, stats, now)) { + /* leave dropping state */ + vars->dropping = false; + } else { + /* and schedule the next drop */ + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + } + } + } + } else if (drop) { + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + } else { + qdisc_drop(skb, sch); + stats->drop_count++; + + skb = dequeue_func(vars, sch); + drop = codel_should_drop(skb, sch, vars, params, + stats, now); + } + vars->dropping = true; + /* if min went above target close to when we last went below it + * assume that the drop rate that controlled the queue on the + * last cycle is a good starting point to control it now. + */ + if (codel_time_before(now - vars->drop_next, + 16 * params->interval)) { + vars->count = (vars->count - vars->lastcount) | 1; + /* we dont care if rec_inv_sqrt approximation + * is not very precise : + * Next Newton steps will correct it quadratically. + */ + codel_Newton_step(vars); + } else { + vars->count = 1; + vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; + } + vars->lastcount = vars->count; + vars->drop_next = codel_control_law(now, params->interval, + vars->rec_inv_sqrt); + } +end: + return skb; +} +#endif diff --git a/include/net/compat.h b/include/net/compat.h index a974ae92d18..6e956532498 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -42,12 +42,12 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *); extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int); -extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned); +extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int); extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, - unsigned, unsigned); -extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned); + unsigned int, unsigned int); +extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int); extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *, - unsigned, unsigned, + unsigned int, unsigned int, struct compat_timespec __user *); extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index f55c980d8e2..fc5d5dcebb0 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -48,6 +48,8 @@ struct dcbnl_rtnl_ops { /* IEEE 802.1Qaz std */ int (*ieee_getets) (struct net_device *, struct ieee_ets *); int (*ieee_setets) (struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getapp) (struct net_device *, struct dcb_app *); diff --git a/include/net/dn.h b/include/net/dn.h index 814af0b9387..c88bf4ebd33 100644 --- a/include/net/dn.h +++ b/include/net/dn.h @@ -199,7 +199,7 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp) fld->fld_dport = scp->addrrem; } -extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu); +extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu); #define DN_MENUVER_ACC 0x01 #define DN_MENUVER_USR 0x02 diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index 782ef7cb493..1ee9d4bda30 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -31,7 +31,7 @@ struct dn_fib_res { struct dn_fib_nh { struct net_device *nh_dev; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; int nh_weight; int nh_power; @@ -45,7 +45,7 @@ struct dn_fib_info { int fib_treeref; atomic_t fib_clntref; int fib_dead; - unsigned fib_flags; + unsigned int fib_flags; int fib_protocol; __le16 fib_prefsrc; __u32 fib_priority; @@ -140,7 +140,7 @@ extern void dn_fib_table_cleanup(void); */ extern void dn_fib_rules_init(void); extern void dn_fib_rules_cleanup(void); -extern unsigned dnet_addr_type(__le16 addr); +extern unsigned int dnet_addr_type(__le16 addr); extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res); extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 81712cfa1dd..c507e05d172 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -76,8 +76,8 @@ struct dn_route { __le16 rt_src_map; __le16 rt_dst_map; - unsigned rt_flags; - unsigned rt_type; + unsigned int rt_flags; + unsigned int rt_type; }; static inline bool dn_is_input_route(struct dn_route *rt) diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index e1c2ee0eef4..3682a0a076c 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -12,7 +12,7 @@ struct sk_buff; struct dst_ops { unsigned short family; __be16 protocol; - unsigned gc_thresh; + unsigned int gc_thresh; int (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); diff --git a/include/net/icmp.h b/include/net/icmp.h index 75d61564907..9ac2524d140 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -25,7 +25,7 @@ struct icmp_err { int errno; - unsigned fatal:1; + unsigned int fatal:1; }; extern const struct icmp_err icmp_err_convert[]; @@ -41,7 +41,6 @@ struct net; extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); extern int icmp_rcv(struct sk_buff *skb); -extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int icmp_init(void); extern void icmp_out_count(struct net *net, unsigned char type); diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 57430555487..d104c882fc2 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -1,7 +1,7 @@ /* * An interface between IEEE802.15.4 device and rest of the kernel. * - * Copyright (C) 2007, 2008, 2009 Siemens AG + * Copyright (C) 2007-2012 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 @@ -21,11 +21,14 @@ * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Maxim Osipov <maxim.osipov@siemens.com> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H +#include <net/af_ieee802154.h> + /* * A control block of skb passed between the ARPHRD_IEEE802154 device * and other stack parts. @@ -110,12 +113,26 @@ struct ieee802154_mlme_ops { u8 (*get_bsn)(const struct net_device *dev); }; -static inline struct ieee802154_mlme_ops *ieee802154_mlme_ops( - const struct net_device *dev) +/* The IEEE 802.15.4 standard defines 2 type of the devices: + * - FFD - full functionality device + * - RFD - reduce functionality device + * + * So 2 sets of mlme operations are needed + */ +struct ieee802154_reduced_mlme_ops { + struct wpan_phy *(*get_phy)(const struct net_device *dev); +}; + +static inline struct ieee802154_mlme_ops * +ieee802154_mlme_ops(const struct net_device *dev) { return dev->ml_priv; } -#endif - +static inline struct ieee802154_reduced_mlme_ops * +ieee802154_reduced_mlme_ops(const struct net_device *dev) +{ + return dev->ml_priv; +} +#endif diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 51a7031b4aa..93563221d29 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -120,7 +120,7 @@ struct ifmcaddr6 { unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct timer_list mca_timer; - unsigned mca_flags; + unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; @@ -209,60 +209,6 @@ static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } -static inline void ipv6_tr_mc_map(const struct in6_addr *addr, char *buf) -{ - /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */ - - if (((addr->s6_addr[0] == 0xFF) && - ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 1)) || - ((addr->s6_addr[0] == 0xFF) && - (addr->s6_addr[1] == 0x02) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr16[4] == 0) && - (addr->s6_addr[10] == 0) && - (addr->s6_addr[11] == 1) && - (addr->s6_addr[12] == 0xff))) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x01; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - /* All routers FF0x::2 */ - } else if ((addr->s6_addr[0] ==0xff) && - ((addr->s6_addr[1] & 0xF0) == 0) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 2)) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x02; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - } else { - unsigned char i ; - - i = addr->s6_addr[15] & 7 ; - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x01 << i ; - buf[4]=0x00; - buf[5]=0x00; - } -} - static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf) { buf[0] = 0x00; diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 3207e58ee01..1866a676c81 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -23,7 +23,7 @@ struct sock; struct sockaddr; extern int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); extern struct dst_entry* inet6_csk_route_req(struct sock *sk, const struct request_sock *req); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index dbf9aab34c8..7d83f90f203 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -45,6 +45,7 @@ struct inet_connection_sock_af_ops { struct dst_entry *dst); struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it); u16 net_header_len; + u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); @@ -60,7 +61,7 @@ struct inet_connection_sock_af_ops { #endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); int (*bind_conflict)(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); }; /** inet_connection_sock - INET connection oriented sock @@ -245,7 +246,7 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk, const __be32 raddr, const __be32 laddr); extern int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); extern int inet_csk_get_port(struct sock *sk, unsigned short snum); extern struct dst_entry* inet_csk_route_req(struct sock *sk, diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 16ff29a7bb3..2431cf83aec 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -46,8 +46,7 @@ struct inet_frags { void *arg); void (*destructor)(struct inet_frag_queue *); void (*skb_free)(struct sk_buff *); - int (*match)(struct inet_frag_queue *q, - void *arg); + bool (*match)(struct inet_frag_queue *q, void *arg); void (*frag_expire)(unsigned long data); }; diff --git a/include/net/ip.h b/include/net/ip.h index b53d65f24f7..83e0619f59d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -141,23 +141,6 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) extern int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -/* - * Map a multicast IP onto multicast MAC for type Token Ring. - * This conforms to RFC1469 Option 2 Multicasting i.e. - * using a functional address to transmit / receive - * multicast packets. - */ - -static inline void ip_tr_mc_map(__be32 addr, char *buf) -{ - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x04; - buf[4]=0x00; - buf[5]=0x00; -} - struct ip_reply_arg { struct kvec iov[1]; int flags; @@ -222,9 +205,6 @@ static inline int inet_is_reserved_local_port(int port) extern int sysctl_ip_nonlocal_bind; -extern struct ctl_path net_core_path[]; -extern struct ctl_path net_ipv4_ctl_path[]; - /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2ad92ca4e6f..37c1a1ed82c 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -146,7 +146,7 @@ struct rt6_rtnl_dump_arg { extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); extern void rt6_ifdown(struct net *net, struct net_device *dev); -extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); +extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu); extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); @@ -175,7 +175,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, spin_unlock(&sk->sk_dst_lock); } -static inline int ipv6_unicast_destination(struct sk_buff *skb) +static inline bool ipv6_unicast_destination(const struct sk_buff *skb) { struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 10422ef14e2..78df0866cc3 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -49,7 +49,7 @@ struct fib_nh { struct net_device *nh_dev; struct hlist_node nh_hash; struct fib_info *nh_parent; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; #ifdef CONFIG_IP_ROUTE_MULTIPATH int nh_weight; @@ -74,7 +74,7 @@ struct fib_info { struct net *fib_net; int fib_treeref; atomic_t fib_clntref; - unsigned fib_flags; + unsigned int fib_flags; unsigned char fib_dead; unsigned char fib_protocol; unsigned char fib_scope; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 72522f08737..d6146b4811c 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -10,7 +10,6 @@ #include <asm/types.h> /* for __uXX types */ -#include <linux/sysctl.h> /* for ctl_path */ #include <linux/list.h> /* for struct list_head */ #include <linux/spinlock.h> /* for struct rwlock_t */ #include <linux/atomic.h> /* for struct atomic_t */ @@ -505,6 +504,7 @@ struct ip_vs_conn { * state transition triggerd * synchronization */ + unsigned long sync_endtime; /* jiffies + sent_retries */ /* Control members */ struct ip_vs_conn *control; /* Master control connection */ @@ -580,8 +580,8 @@ struct ip_vs_service_user_kern { /* virtual service options */ char *sched_name; char *pe_name; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout in sec */ + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout in sec */ u32 netmask; /* persistent netmask */ }; @@ -592,7 +592,7 @@ struct ip_vs_dest_user_kern { u16 port; /* real server options */ - unsigned conn_flags; /* connection flags */ + unsigned int conn_flags; /* connection flags */ int weight; /* destination weight */ /* thresholds for active connections */ @@ -616,8 +616,8 @@ struct ip_vs_service { union nf_inet_addr addr; /* IP address for virtual service */ __be16 port; /* port number for the service */ __u32 fwmark; /* firewall mark of the service */ - unsigned flags; /* service status flags */ - unsigned timeout; /* persistent timeout in ticks */ + unsigned int flags; /* service status flags */ + unsigned int timeout; /* persistent timeout in ticks */ __be32 netmask; /* grouping granularity */ struct net *net; @@ -647,7 +647,7 @@ struct ip_vs_dest { u16 af; /* address family */ __be16 port; /* port number of the server */ union nf_inet_addr addr; /* IP address of the server */ - volatile unsigned flags; /* dest status flags */ + volatile unsigned int flags; /* dest status flags */ atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ @@ -784,6 +784,16 @@ struct ip_vs_app { void (*timeout_change)(struct ip_vs_app *app, int flags); }; +struct ipvs_master_sync_state { + struct list_head sync_queue; + struct ip_vs_sync_buff *sync_buff; + int sync_queue_len; + unsigned int sync_queue_delay; + struct task_struct *master_thread; + struct delayed_work master_wakeup_work; + struct netns_ipvs *ipvs; +}; + /* IPVS in network namespace */ struct netns_ipvs { int gen; /* Generation */ @@ -870,10 +880,15 @@ struct netns_ipvs { #endif int sysctl_snat_reroute; int sysctl_sync_ver; + int sysctl_sync_ports; + int sysctl_sync_qlen_max; + int sysctl_sync_sock_size; int sysctl_cache_bypass; int sysctl_expire_nodest_conn; int sysctl_expire_quiescent_template; int sysctl_sync_threshold[2]; + unsigned int sysctl_sync_refresh_period; + int sysctl_sync_retries; int sysctl_nat_icmp_send; /* ip_vs_lblc */ @@ -889,13 +904,11 @@ struct netns_ipvs { spinlock_t est_lock; struct timer_list est_timer; /* Estimation timer */ /* ip_vs_sync */ - struct list_head sync_queue; spinlock_t sync_lock; - struct ip_vs_sync_buff *sync_buff; + struct ipvs_master_sync_state *ms; spinlock_t sync_buff_lock; - struct sockaddr_in sync_mcast_addr; - struct task_struct *master_thread; - struct task_struct *backup_thread; + struct task_struct **backup_threads; + int threads_mask; int send_mesg_maxlen; int recv_mesg_maxlen; volatile int sync_state; @@ -912,6 +925,14 @@ struct netns_ipvs { #define DEFAULT_SYNC_THRESHOLD 3 #define DEFAULT_SYNC_PERIOD 50 #define DEFAULT_SYNC_VER 1 +#define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) +#define DEFAULT_SYNC_RETRIES 0 +#define IPVS_SYNC_WAKEUP_RATE 8 +#define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) +#define IPVS_SYNC_SEND_DELAY (HZ / 50) +#define IPVS_SYNC_CHECK_PERIOD HZ +#define IPVS_SYNC_FLUSH_TIME (HZ * 2) +#define IPVS_SYNC_PORTS_MAX (1 << 6) #ifdef CONFIG_SYSCTL @@ -922,7 +943,17 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) static inline int sysctl_sync_period(struct netns_ipvs *ipvs) { - return ipvs->sysctl_sync_threshold[1]; + return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); +} + +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_retries; } static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) @@ -930,6 +961,21 @@ static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) return ipvs->sysctl_sync_ver; } +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_ports); +} + +static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_qlen_max; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_sock_size; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -942,18 +988,43 @@ static inline int sysctl_sync_period(struct netns_ipvs *ipvs) return DEFAULT_SYNC_PERIOD; } +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_REFRESH_PERIOD; +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_RETRIES & 3; +} + static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_VER; } +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return 1; +} + +static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return IPVS_SYNC_QLEN_MAX; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return 0; +} + #endif /* * IPVS core functions * (from ip_vs_core.c) */ -extern const char *ip_vs_proto_name(unsigned proto); +extern const char *ip_vs_proto_name(unsigned int proto); extern void ip_vs_init_hash_table(struct list_head *table, int rows); #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) @@ -1014,7 +1085,7 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, const union nf_inet_addr *daddr, - __be16 dport, unsigned flags, + __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark); extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); @@ -1184,10 +1255,8 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); * IPVS control data and functions (from ip_vs_ctl.c) */ extern struct ip_vs_stats ip_vs_stats; -extern const struct ctl_path net_vs_ctl_path[]; extern int sysctl_ip_vs_sync_ver; -extern void ip_vs_sync_switch_mode(struct net *net, int mode); extern struct ip_vs_service * ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport); @@ -1221,7 +1290,7 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); extern int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); extern int stop_sync_thread(struct net *net, int state); -extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp); +extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); /* diff --git a/include/net/ipip.h b/include/net/ipip.h index a32654d5273..a93cf6d7e94 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -54,8 +54,10 @@ struct ip_tunnel_prl_entry { \ err = ip_local_out(skb); \ if (likely(net_xmit_eval(err) == 0)) { \ + u64_stats_update_begin(&(stats1)->syncp); \ (stats1)->tx_bytes += pkt_len; \ (stats1)->tx_packets++; \ + u64_stats_update_end(&(stats1)->syncp); \ } else { \ (stats2)->tx_errors++; \ (stats2)->tx_aborted_errors++; \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e4170a22fc6..aecf88436ab 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -113,7 +113,6 @@ struct frag_hdr { /* sysctls */ extern int sysctl_mld_max_msf; -extern struct ctl_path net_ipv6_ctl_path[]; #define _DEVINC(net, statname, modifier, idev, field) \ ({ \ @@ -264,7 +263,7 @@ extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_t struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); -extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb); +extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); int ip6_frag_nqueues(struct net *net); int ip6_frag_mem(struct net *net); @@ -333,8 +332,8 @@ static inline void ipv6_addr_set(struct in6_addr *addr, addr->s6_addr32[3] = w4; } -static inline int ipv6_addr_equal(const struct in6_addr *a1, - const struct in6_addr *a2) +static inline bool ipv6_addr_equal(const struct in6_addr *a1, + const struct in6_addr *a2) { return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | @@ -342,27 +341,27 @@ static inline int ipv6_addr_equal(const struct in6_addr *a1, (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; } -static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, - unsigned int prefixlen) +static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, + unsigned int prefixlen) { - unsigned pdw, pbi; + unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) - return 0; + return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) - return 0; + return false; - return 1; + return true; } -static inline int ipv6_prefix_equal(const struct in6_addr *a1, - const struct in6_addr *a2, - unsigned int prefixlen) +static inline bool ipv6_prefix_equal(const struct in6_addr *a1, + const struct in6_addr *a2, + unsigned int prefixlen) { return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32, prefixlen); @@ -388,21 +387,21 @@ struct ip6_create_arg { }; void ip6_frag_init(struct inet_frag_queue *q, void *a); -int ip6_frag_match(struct inet_frag_queue *q, void *a); +bool ip6_frag_match(struct inet_frag_queue *q, void *a); -static inline int ipv6_addr_any(const struct in6_addr *a) +static inline bool ipv6_addr_any(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3]) == 0; } -static inline int ipv6_addr_loopback(const struct in6_addr *a) +static inline bool ipv6_addr_loopback(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0; } -static inline int ipv6_addr_v4mapped(const struct in6_addr *a) +static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0; @@ -412,7 +411,7 @@ static inline int ipv6_addr_v4mapped(const struct in6_addr *a) * Check for a RFC 4843 ORCHID address * (Overlay Routable Cryptographic Hash Identifiers) */ -static inline int ipv6_addr_orchid(const struct in6_addr *a) +static inline bool ipv6_addr_orchid(const struct in6_addr *a) { return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); } @@ -560,7 +559,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb, extern int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, __be16 *frag_offp); -extern int ipv6_ext_hdr(u8 nexthdr); +extern bool ipv6_ext_hdr(u8 nexthdr); extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); @@ -661,8 +660,6 @@ extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); extern int ipv6_sysctl_register(void); extern void ipv6_sysctl_unregister(void); -extern int ipv6_static_sysctl_register(void); -extern void ipv6_static_sysctl_unregister(void); #endif #endif /* _NET_IPV6_H */ diff --git a/include/net/lapb.h b/include/net/lapb.h index fd2bf572ee1..df892a94f2c 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -149,4 +149,10 @@ extern int lapb_t1timer_running(struct lapb_cb *lapb); */ #define LAPB_DEBUG 0 +#define lapb_dbg(level, fmt, ...) \ +do { \ + if (level < LAPB_DEBUG) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + #endif diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h index 23a409381fa..6ca3113df39 100644 --- a/include/net/llc_c_ev.h +++ b/include/net/llc_c_ev.h @@ -264,6 +264,6 @@ extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) { return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < - (unsigned)sk->sk_rcvbuf; + (unsigned int)sk->sk_rcvbuf; } #endif /* LLC_C_EV_H */ diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index f57e7d46a45..5a93d13ac95 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -13,7 +13,6 @@ */ #include <linux/if_ether.h> -#include <linux/if_tr.h> /* Lengths of frame formats */ #define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ @@ -253,10 +252,6 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) { - memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN); - *sa &= 0x7F; - } } /** @@ -270,8 +265,6 @@ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) - memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN); } /** diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 9210bdc7bd8..4d6e6c6818d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -95,9 +95,11 @@ struct device; * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. */ enum ieee80211_max_queues { - IEEE80211_MAX_QUEUES = 4, + IEEE80211_MAX_QUEUES = 16, }; +#define IEEE80211_INVAL_HW_QUEUE 0xff + /** * enum ieee80211_ac_numbers - AC numbers as used in mac80211 * @IEEE80211_AC_VO: voice @@ -244,7 +246,7 @@ enum ieee80211_rssi_event { * @channel_type: Channel type for this BSS -- the hardware might be * configured for HT40+ while this BSS only uses no-HT, for * example. - * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info). + * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. * This field is only valid when the channel type is one of the HT types. * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value * implies disabled @@ -522,7 +524,7 @@ struct ieee80211_tx_rate { * * @flags: transmit info flags, defined above * @band: the band to transmit on (use for checking for races) - * @antenna_sel_tx: antenna to use, 0 for automatic diversity + * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC * @ack_frame_id: internal frame ID for TX status, used internally * @control: union for control data * @status: union for status data @@ -538,7 +540,7 @@ struct ieee80211_tx_info { u32 flags; u8 band; - u8 antenna_sel_tx; + u8 hw_queue; u16 ack_frame_id; @@ -564,7 +566,8 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; int ack_signal; u8 ampdu_len; - /* 15 bytes free */ + u8 antenna; + /* 14 bytes free */ } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -888,6 +891,8 @@ enum ieee80211_vif_flags { * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed * at runtime, mac80211 will never touch this field + * @hw_queue: hardware queue for each AC + * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). */ @@ -896,7 +901,12 @@ struct ieee80211_vif { struct ieee80211_bss_conf bss_conf; u8 addr[ETH_ALEN]; bool p2p; + + u8 cab_queue; + u8 hw_queue[IEEE80211_NUM_ACS]; + u32 driver_flags; + /* must be last */ u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); }; @@ -1174,6 +1184,15 @@ enum sta_notify_cmd { * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while * being idle (i.e. mac80211 doesn't have to go idle-off during the * the scan). + * + * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of + * a virtual monitor interface when monitor interfaces are the only + * active interfaces. + * + * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface + * queue mapping in order to use different queues (not just one per AC) + * for different virtual interfaces. See the doc section on HW queue + * control for more details. */ enum ieee80211_hw_flags { IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, @@ -1190,13 +1209,13 @@ enum ieee80211_hw_flags { IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, - /* reuse bit 14 */ + IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, - /* reuse bit 20 */ + IEEE80211_HW_QUEUE_CONTROL = 1<<20, IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, IEEE80211_HW_AP_LINK_PS = 1<<22, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, @@ -1266,6 +1285,9 @@ enum ieee80211_hw_flags { * @max_tx_aggregation_subframes: maximum number of subframes in an * aggregate an HT driver will transmit, used by the peer as a * hint to size its reorder buffer. + * + * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX + * (if %IEEE80211_HW_QUEUE_CONTROL is set) */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -1286,6 +1308,7 @@ struct ieee80211_hw { u8 max_rate_tries; u8 max_rx_aggregation_subframes; u8 max_tx_aggregation_subframes; + u8 offchannel_tx_hw_queue; }; /** @@ -1694,6 +1717,61 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); */ /** + * DOC: HW queue control + * + * Before HW queue control was introduced, mac80211 only had a single static + * assignment of per-interface AC software queues to hardware queues. This + * was problematic for a few reasons: + * 1) off-channel transmissions might get stuck behind other frames + * 2) multiple virtual interfaces couldn't be handled correctly + * 3) after-DTIM frames could get stuck behind other frames + * + * To solve this, hardware typically uses multiple different queues for all + * the different usages, and this needs to be propagated into mac80211 so it + * won't have the same problem with the software queues. + * + * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability + * flag that tells it that the driver implements its own queue control. To do + * so, the driver will set up the various queues in each &struct ieee80211_vif + * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will + * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and + * if necessary will queue the frame on the right software queue that mirrors + * the hardware queue. + * Additionally, the driver has to then use these HW queue IDs for the queue + * management functions (ieee80211_stop_queue() et al.) + * + * The driver is free to set up the queue mappings as needed, multiple virtual + * interfaces may map to the same hardware queues if needed. The setup has to + * happen during add_interface or change_interface callbacks. For example, a + * driver supporting station+station and station+AP modes might decide to have + * 10 hardware queues to handle different scenarios: + * + * 4 AC HW queues for 1st vif: 0, 1, 2, 3 + * 4 AC HW queues for 2nd vif: 4, 5, 6, 7 + * after-DTIM queue for AP: 8 + * off-channel queue: 9 + * + * It would then set up the hardware like this: + * hw.offchannel_tx_hw_queue = 9 + * + * and the first virtual interface that is added as follows: + * vif.hw_queue[IEEE80211_AC_VO] = 0 + * vif.hw_queue[IEEE80211_AC_VI] = 1 + * vif.hw_queue[IEEE80211_AC_BE] = 2 + * vif.hw_queue[IEEE80211_AC_BK] = 3 + * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE + * and the second virtual interface with 4-7. + * + * If queue 6 gets full, for example, mac80211 would only stop the second + * virtual interface's BE queue since virtual interface queues are per AC. + * + * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE + * whenever the queue is not used (i.e. the interface is not in AP mode) if the + * queue could potentially be shared since mac80211 will look at cab_queue when + * a queue is stopped/woken even if the interface is not in AP mode. + */ + +/** * enum ieee80211_filter_flags - hardware filter flags * * These flags determine what the filter in hardware should be @@ -1780,6 +1858,18 @@ enum ieee80211_frame_release_type { }; /** + * enum ieee80211_rate_control_changed - flags to indicate what changed + * + * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit + * to this station changed. + * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. + */ +enum ieee80211_rate_control_changed { + IEEE80211_RC_BW_CHANGED = BIT(0), + IEEE80211_RC_SMPS_CHANGED = BIT(1), +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -1980,6 +2070,14 @@ enum ieee80211_frame_release_type { * up the list of states. * The callback can sleep. * + * @sta_rc_update: Notifies the driver of changes to the bitrates that can be + * used to transmit to the station. The changes are advertised with bits + * from &enum ieee80211_rate_control_changed and the values are reflected + * in the station data. This callback should only be used when the driver + * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since + * otherwise the rate control algorithm is notified directly. + * Must be atomic. + * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. * Returns a negative error code on failure. @@ -2125,6 +2223,14 @@ enum ieee80211_frame_release_type { * The @tids parameter is a bitmap and tells the driver which TIDs the * frames will be on; it will at most have two bits set. * This callback must be atomic. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); @@ -2135,6 +2241,7 @@ struct ieee80211_ops { #ifdef CONFIG_PM int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int (*resume)(struct ieee80211_hw *hw); + void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled); #endif int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -2196,8 +2303,12 @@ struct ieee80211_ops { struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); + void (*sta_rc_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed); int (*conf_tx)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, + struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -2250,6 +2361,15 @@ struct ieee80211_ops { u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data); + + int (*get_et_sset_count)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); + void (*get_et_stats)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); }; /** @@ -2844,6 +2964,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_band band, size_t frame_len, struct ieee80211_rate *rate); @@ -3512,19 +3633,6 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); /* Rate control API */ /** - * enum rate_control_changed - flags to indicate which parameter changed - * - * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have - * changed, rate control algorithm can update its internal state if needed. - * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate - * control algorithm needs to adjust accordingly. - */ -enum rate_control_changed { - IEEE80211_RC_HT_CHANGED = BIT(0), - IEEE80211_RC_SMPS_CHANGED = BIT(1), -}; - -/** * struct ieee80211_tx_rate_control - rate control information for/from RC algo * * @hw: The hardware the algorithm is invoked for. @@ -3569,9 +3677,8 @@ struct rate_control_ops { void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta); void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, - void *priv_sta, u32 changed, - enum nl80211_channel_type oper_chan_type); + struct ieee80211_sta *sta, void *priv_sta, + u32 changed); void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); @@ -3706,8 +3813,20 @@ void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); -int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb); +int ieee80211_add_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic); int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, - struct sk_buff *skb); + struct sk_buff *skb, bool need_basic); + +/** + * ieee80211_ave_rssi - report the average rssi for the specified interface + * + * @vif: the specified virtual interface + * + * This function return the average rssi value for the requested interface. + * It assumes that the given vif is valid. + */ +int ieee80211_ave_rssi(struct ieee80211_vif *vif); + #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h new file mode 100644 index 00000000000..c9f8ab5cc68 --- /dev/null +++ b/include/net/mac802154.h @@ -0,0 +1,136 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef NET_MAC802154_H +#define NET_MAC802154_H + +#include <net/af_ieee802154.h> + +/* The following flags are used to indicate changed address settings from + * the stack to the hardware. + */ + +/* indicates that the Short Address changed */ +#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +/* indicates that the IEEE Address changed */ +#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +/* indicates that the PAN ID changed */ +#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +/* indicates that PAN Coordinator status changed */ +#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 + +struct ieee802154_hw_addr_filt { + __le16 pan_id; /* Each independent PAN selects a unique + * identifier. This PAN id allows communication + * between devices within a network using short + * addresses and enables transmissions between + * devices across independent networks. + */ + __le16 short_addr; + u8 ieee_addr[IEEE802154_ADDR_LEN]; + u8 pan_coord; +}; + +struct ieee802154_dev { + /* filled by the driver */ + int extra_tx_headroom; + u32 flags; + struct device *parent; + + /* filled by mac802154 core */ + struct ieee802154_hw_addr_filt hw_filt; + void *priv; + struct wpan_phy *phy; +}; + +/* Checksum is in hardware and is omitted from a packet + * + * These following flags are used to indicate hardware capabilities to + * the stack. Generally, flags here should have their meaning + * done in a way that the simplest hardware doesn't need setting + * any particular flags. There are some exceptions to this rule, + * however, so you are advised to review these flags carefully. + */ + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that receiver will autorespond with ACK frames. */ +#define IEEE802154_HW_AACK 0x00000002 + +/* struct ieee802154_ops - callbacks from mac802154 to the driver + * + * This structure contains various callbacks that the driver may + * handle or, in some cases, must handle, for example to transmit + * a frame. + * + * start: Handler that 802.15.4 module calls for device initialization. + * This function is called before the first interface is attached. + * + * stop: Handler that 802.15.4 module calls for device cleanup. + * This function is called after the last interface is removed. + * + * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. + * This function should return zero or negative errno. Called with + * pib_lock held. + * + * ed: Handler that 802.15.4 module calls for Energy Detection. + * This function should place the value for detected energy + * (usually device-dependant) in the level pointer and return + * either zero or negative errno. Called with pib_lock held. + * + * set_channel: + * Set radio for listening on specific channel. + * Set the device for listening on specified channel. + * Returns either zero, or negative errno. Called with pib_lock held. + * + * set_hw_addr_filt: + * Set radio for listening on specific address. + * Set the device for listening on specified address. + * Returns either zero, or negative errno. + */ +struct ieee802154_ops { + struct module *owner; + int (*start)(struct ieee802154_dev *dev); + void (*stop)(struct ieee802154_dev *dev); + int (*xmit)(struct ieee802154_dev *dev, + struct sk_buff *skb); + int (*ed)(struct ieee802154_dev *dev, u8 *level); + int (*set_channel)(struct ieee802154_dev *dev, + int page, + int channel); + int (*set_hw_addr_filt)(struct ieee802154_dev *dev, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed); + int (*ieee_addr)(struct ieee802154_dev *dev, + u8 addr[IEEE802154_ADDR_LEN]); +}; + +/* Basic interface to register ieee802154 device */ +struct ieee802154_dev * +ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops); +void ieee802154_free_device(struct ieee802154_dev *dev); +int ieee802154_register_device(struct ieee802154_dev *dev); +void ieee802154_unregister_device(struct ieee802154_dev *dev); + +void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, + u8 lqi); + +#endif /* NET_MAC802154_H */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 6f9c25a76cd..c02b6ad3f6c 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -34,6 +34,7 @@ enum { __ND_OPT_ARRAY_MAX, ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ ND_OPT_RDNSS = 25, /* RFC5006 */ + ND_OPT_DNSSL = 31, /* RFC6106 */ __ND_OPT_MAX }; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 34c996f4618..6cdfeedb650 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -195,7 +195,6 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ADMIN 0x80000000 extern void neigh_table_init(struct neigh_table *tbl); -extern void neigh_table_init_no_netlink(struct neigh_table *tbl); extern int neigh_table_clear(struct neigh_table *tbl); extern struct neighbour * neigh_lookup(struct neigh_table *tbl, const void *pkey, @@ -323,7 +322,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) #ifdef CONFIG_BRIDGE_NETFILTER static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { - unsigned seq, hh_alen; + unsigned int seq, hh_alen; do { seq = read_seqbegin(&hh->hh_lock); @@ -336,7 +335,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { - unsigned seq; + unsigned int seq; int hh_len; do { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index ee547c14981..ac9195e6a06 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -279,14 +279,25 @@ extern void unregister_pernet_subsys(struct pernet_operations *); extern int register_pernet_device(struct pernet_operations *); extern void unregister_pernet_device(struct pernet_operations *); -struct ctl_path; struct ctl_table; struct ctl_table_header; -extern struct ctl_table_header *register_net_sysctl_table(struct net *net, - const struct ctl_path *path, struct ctl_table *table); -extern struct ctl_table_header *register_net_sysctl_rotable( - const struct ctl_path *path, struct ctl_table *table); +#ifdef CONFIG_SYSCTL +extern int net_sysctl_init(void); +extern struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table); extern void unregister_net_sysctl_table(struct ctl_table_header *header); +#else +static inline int net_sysctl_init(void) { return 0; } +static inline struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table) +{ + return NULL; +} +static inline void unregister_net_sysctl_table(struct ctl_table_header *header) +{ +} +#endif + #endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index ab86036bbf0..cce7f6a798b 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -321,14 +321,8 @@ extern unsigned int nf_conntrack_max; extern unsigned int nf_conntrack_hash_rnd; void init_nf_conntrack_hash_rnd(void); -#define NF_CT_STAT_INC(net, count) \ - __this_cpu_inc((net)->ct.stat->count) -#define NF_CT_STAT_INC_ATOMIC(net, count) \ -do { \ - local_bh_disable(); \ - __this_cpu_inc((net)->ct.stat->count); \ - local_bh_enable(); \ -} while (0) +#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) +#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define MODULE_ALIAS_NFCT_HELPER(helper) \ MODULE_ALIAS("nfct-helper-" helper) diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 5767dc242de..1d1889409b9 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -60,8 +60,8 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); } -extern int nf_conntrack_helper_init(void); -extern void nf_conntrack_helper_fini(void); +extern int nf_conntrack_helper_init(struct net *net); +extern void nf_conntrack_helper_fini(struct net *net); extern int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff, diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index e8010f445ae..9699c028b74 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -65,7 +65,7 @@ struct nf_conntrack_l3proto { #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_header; - struct ctl_path *ctl_table_path; + const char *ctl_table_path; struct ctl_table *ctl_table; #endif /* CONFIG_SYSCTL */ diff --git a/include/net/netlink.h b/include/net/netlink.h index f394fe5d764..785f37a3b44 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -102,20 +102,6 @@ * nla_put_flag(skb, type) add flag attribute to skb * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb * - * Exceptions Based Attribute Construction: - * NLA_PUT(skb, type, len, data) add attribute to skb - * NLA_PUT_U8(skb, type, value) add u8 attribute to skb - * NLA_PUT_U16(skb, type, value) add u16 attribute to skb - * NLA_PUT_U32(skb, type, value) add u32 attribute to skb - * NLA_PUT_U64(skb, type, value) add u64 attribute to skb - * NLA_PUT_STRING(skb, type, str) add string attribute to skb - * NLA_PUT_FLAG(skb, type) add flag attribute to skb - * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb - * - * The meaning of these functions is equal to their lower case - * variants but they jump to the label nla_put_failure in case - * of a failure. - * * Nested Attributes Construction: * nla_nest_start(skb, type) start a nested attribute * nla_nest_end(skb, nla) finalize a nested attribute @@ -772,6 +758,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) } /** + * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put(skb, attrtype, sizeof(__be16), &value); +} + +/** + * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) +{ + return nla_put(skb, attrtype, sizeof(__le16), &value); +} + +/** * nla_put_u32 - Add a u32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -783,7 +802,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) } /** - * nla_put_64 - Add a u64 netlink attribute to a socket buffer + * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put(skb, attrtype, sizeof(__be32), &value); +} + +/** + * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) +{ + return nla_put(skb, attrtype, sizeof(__le32), &value); +} + +/** + * nla_put_u64 - Add a u64 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value @@ -794,6 +846,39 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) } /** + * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put(skb, attrtype, sizeof(__be64), &value); +} + +/** + * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) +{ + return nla_put(skb, attrtype, sizeof(__le64), &value); +} + +/** * nla_put_string - Add a string netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -828,60 +913,6 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, return nla_put(skb, attrtype, sizeof(u64), &tmp); } -#define NLA_PUT(skb, attrtype, attrlen, data) \ - do { \ - if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \ - goto nla_put_failure; \ - } while(0) - -#define NLA_PUT_TYPE(skb, type, attrtype, value) \ - do { \ - type __tmp = value; \ - NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \ - } while(0) - -#define NLA_PUT_U8(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u8, attrtype, value) - -#define NLA_PUT_U16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u16, attrtype, value) - -#define NLA_PUT_LE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __le16, attrtype, value) - -#define NLA_PUT_BE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be16, attrtype, value) - -#define NLA_PUT_NET16(skb, attrtype, value) \ - NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_U32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u32, attrtype, value) - -#define NLA_PUT_BE32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be32, attrtype, value) - -#define NLA_PUT_NET32(skb, attrtype, value) \ - NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_U64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u64, attrtype, value) - -#define NLA_PUT_BE64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be64, attrtype, value) - -#define NLA_PUT_NET64(skb, attrtype, value) \ - NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_STRING(skb, attrtype, value) \ - NLA_PUT(skb, attrtype, strlen(value) + 1, value) - -#define NLA_PUT_FLAG(skb, attrtype) \ - NLA_PUT(skb, attrtype, 0, NULL) - -#define NLA_PUT_MSECS(skb, attrtype, jiffies) \ - NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies)) - /** * nla_get_u32 - return payload of u32 attribute * @nla: u32 netlink attribute diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 7a911eca0f1..a053a19870c 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -26,11 +26,14 @@ struct netns_ct { int sysctl_tstamp; int sysctl_checksum; unsigned int sysctl_log_invalid; /* Log invalid packets */ + int sysctl_auto_assign_helper; + bool auto_assign_helper_warned; #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; + struct ctl_table_header *helper_sysctl_header; #endif char *slabname; }; diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 548d78f2cc4..c06ac58ca10 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -5,7 +5,7 @@ struct net; -static inline unsigned net_hash_mix(struct net *net) +static inline unsigned int net_hash_mix(struct net *net) { #ifdef CONFIG_NET_NS /* diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 81abfcb2eb4..b42be53587b 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -12,7 +12,9 @@ struct ctl_table_header; struct netns_sysctl_ipv6 { #ifdef CONFIG_SYSCTL - struct ctl_table_header *table; + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; #endif int bindv6only; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h new file mode 100644 index 00000000000..aca65a5a9d0 --- /dev/null +++ b/include/net/nfc/hci.h @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __NET_HCI_H +#define __NET_HCI_H + +#include <linux/skbuff.h> + +#include <net/nfc/nfc.h> + +struct nfc_hci_dev; + +struct nfc_hci_ops { + int (*open) (struct nfc_hci_dev *hdev); + void (*close) (struct nfc_hci_dev *hdev); + int (*hci_ready) (struct nfc_hci_dev *hdev); + int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb); + int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols); + int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*data_exchange) (struct nfc_hci_dev *hdev, + struct nfc_target *target, + struct sk_buff *skb, struct sk_buff **res_skb); +}; + +#define NFC_HCI_MAX_CUSTOM_GATES 15 +struct nfc_hci_init_data { + u8 gate_count; + u8 gates[NFC_HCI_MAX_CUSTOM_GATES]; + char session_id[9]; +}; + +typedef int (*xmit) (struct sk_buff *skb, void *cb_data); + +#define NFC_HCI_MAX_GATES 256 + +struct nfc_hci_dev { + struct nfc_dev *ndev; + + u32 max_data_link_payload; + + struct mutex msg_tx_mutex; + + struct list_head msg_tx_queue; + + struct workqueue_struct *msg_tx_wq; + struct work_struct msg_tx_work; + + struct timer_list cmd_timer; + struct hci_msg *cmd_pending_msg; + + struct sk_buff_head rx_hcp_frags; + + struct workqueue_struct *msg_rx_wq; + struct work_struct msg_rx_work; + + struct sk_buff_head msg_rx_queue; + + struct nfc_hci_ops *ops; + + struct nfc_hci_init_data init_data; + + void *clientdata; + + u8 gate2pipe[NFC_HCI_MAX_GATES]; + + bool poll_started; + struct nfc_target *targets; + int target_count; + + u8 sw_romlib; + u8 sw_patch; + u8 sw_flashlib_major; + u8 sw_flashlib_minor; + + u8 hw_derivative; + u8 hw_version; + u8 hw_mpw; + u8 hw_software; + u8 hw_bsid; +}; + +/* hci device allocation */ +struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, + int tx_tailroom, + int max_link_payload); +void nfc_hci_free_device(struct nfc_hci_dev *hdev); + +int nfc_hci_register_device(struct nfc_hci_dev *hdev); +void nfc_hci_unregister_device(struct nfc_hci_dev *hdev); + +void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata); +void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev); + +/* Host IDs */ +#define NFC_HCI_HOST_CONTROLLER_ID 0x00 +#define NFC_HCI_TERMINAL_HOST_ID 0x01 +#define NFC_HCI_UICC_HOST_ID 0x02 + +/* Host Controller Gates and registry indexes */ +#define NFC_HCI_ADMIN_GATE 0x00 +#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01 +#define NFC_HCI_ADMIN_MAX_PIPE 0x02 +#define NFC_HCI_ADMIN_WHITELIST 0x03 +#define NFC_HCI_ADMIN_HOST_LIST 0x04 + +#define NFC_HCI_LOOPBACK_GATE 0x04 + +#define NFC_HCI_ID_MGMT_GATE 0x05 +#define NFC_HCI_ID_MGMT_VERSION_SW 0x01 +#define NFC_HCI_ID_MGMT_VERSION_HW 0x03 +#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04 +#define NFC_HCI_ID_MGMT_MODEL_ID 0x05 +#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02 +#define NFC_HCI_ID_MGMT_GATES_LIST 0x06 + +#define NFC_HCI_LINK_MGMT_GATE 0x06 +#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01 + +#define NFC_HCI_RF_READER_B_GATE 0x11 +#define NFC_HCI_RF_READER_B_PUPI 0x03 +#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04 +#define NFC_HCI_RF_READER_B_AFI 0x02 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05 + +#define NFC_HCI_RF_READER_A_GATE 0x13 +#define NFC_HCI_RF_READER_A_UID 0x02 +#define NFC_HCI_RF_READER_A_ATQA 0x04 +#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05 +#define NFC_HCI_RF_READER_A_SAK 0x03 +#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06 +#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01 + +#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5) +#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1 +#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3 + +/* Generic events */ +#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01 +#define NFC_HCI_EVT_POST_DATA 0x02 +#define NFC_HCI_EVT_HOT_PLUG 0x03 + +/* Reader RF gates events */ +#define NFC_HCI_EVT_READER_REQUESTED 0x10 +#define NFC_HCI_EVT_END_OPERATION 0x11 + +/* Reader Application gate events */ +#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10 + +/* receiving messages from lower layer */ +void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb); +void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb); +void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, + struct sk_buff *skb); +void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb); + +/* connecting to gates and sending hci instructions */ +int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate); +int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate); +int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev); +int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + struct sk_buff **skb); +int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + const u8 *param, size_t param_len); +int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb); +int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, + const u8 *param, size_t param_len); +int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, + const u8 *param, size_t param_len); + +#endif /* __NET_HCI_H */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index bac070bf351..9a2505a5b8d 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -62,10 +62,12 @@ struct nfc_ops { int (*data_exchange)(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context); + int (*check_presence)(struct nfc_dev *dev, u32 target_idx); }; #define NFC_TARGET_IDX_ANY -1 #define NFC_MAX_GT_LEN 48 +#define NFC_TARGET_IDX_NONE 0xffffffff struct nfc_target { u32 idx; @@ -78,6 +80,8 @@ struct nfc_target { u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; u8 sensf_res_len; u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; + u8 hci_reader_gate; + u8 logical_idx; }; struct nfc_genl_data { @@ -86,7 +90,8 @@ struct nfc_genl_data { }; struct nfc_dev { - unsigned idx; + unsigned int idx; + u32 target_next_idx; struct nfc_target *targets; int n_targets; int targets_generation; @@ -94,7 +99,7 @@ struct nfc_dev { struct device dev; bool dev_up; bool polling; - bool remote_activated; + u32 activated_target_idx; bool dep_link_up; u32 dep_rf_mode; struct nfc_genl_data genl_data; @@ -103,6 +108,10 @@ struct nfc_dev { int tx_headroom; int tx_tailroom; + struct timer_list check_pres_timer; + struct workqueue_struct *check_pres_wq; + struct work_struct check_pres_work; + struct nfc_ops *ops; }; #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) @@ -181,6 +190,7 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); +int nfc_target_lost(struct nfc_dev *dev, u32 target_idx); int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h new file mode 100644 index 00000000000..1071987d040 --- /dev/null +++ b/include/net/nfc/shdlc.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __NFC_SHDLC_H +#define __NFC_SHDLC_H + +struct nfc_shdlc; + +struct nfc_shdlc_ops { + int (*open) (struct nfc_shdlc *shdlc); + void (*close) (struct nfc_shdlc *shdlc); + int (*hci_ready) (struct nfc_shdlc *shdlc); + int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb); + int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols); + int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate, + struct nfc_target *target); + int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate, + struct nfc_target *target); + int (*data_exchange) (struct nfc_shdlc *shdlc, + struct nfc_target *target, + struct sk_buff *skb, struct sk_buff **res_skb); +}; + +enum shdlc_state { + SHDLC_DISCONNECTED = 0, + SHDLC_CONNECTING = 1, + SHDLC_NEGOCIATING = 2, + SHDLC_CONNECTED = 3 +}; + +struct nfc_shdlc { + struct mutex state_mutex; + enum shdlc_state state; + int hard_fault; + + struct nfc_hci_dev *hdev; + + wait_queue_head_t *connect_wq; + int connect_tries; + int connect_result; + struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */ + + u8 w; /* window size */ + bool srej_support; + + struct timer_list t1_timer; /* send ack timeout */ + bool t1_active; + + struct timer_list t2_timer; /* guard/retransmit timeout */ + bool t2_active; + + int ns; /* next seq num for send */ + int nr; /* next expected seq num for receive */ + int dnr; /* oldest sent unacked seq num */ + + struct sk_buff_head rcv_q; + + struct sk_buff_head send_q; + bool rnr; /* other side is not ready to receive */ + + struct sk_buff_head ack_pending_q; + + struct workqueue_struct *sm_wq; + struct work_struct sm_work; + + struct nfc_shdlc_ops *ops; + + int client_headroom; + int client_tailroom; + + void *clientdata; +}; + +void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb); + +struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, int tx_tailroom, + int max_link_payload, const char *devname); + +void nfc_shdlc_free(struct nfc_shdlc *shdlc); + +void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata); +void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc); +struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc); + +#endif /* __NFC_SHDLC_H */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index fffdc603f4c..66f5ac370f9 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -107,7 +107,7 @@ extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ -static inline unsigned psched_mtu(const struct net_device *dev) +static inline unsigned int psched_mtu(const struct net_device *dev) { return dev->mtu + dev->hard_header_len; } diff --git a/include/net/rawv6.h b/include/net/rawv6.h index cf757723445..e7ea660e4db 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -5,7 +5,7 @@ void raw6_icmp_error(struct sk_buff *, int nexthdr, u8 type, u8 code, int inner_offset, __be32); -int raw6_local_deliver(struct sk_buff *, int); +bool raw6_local_deliver(struct sk_buff *, int); extern int rawv6_rcv(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/route.h b/include/net/route.h index b1c0d5b564c..ed2b78e2375 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -50,7 +50,7 @@ struct rtable { __be32 rt_key_src; int rt_genid; - unsigned rt_flags; + unsigned int rt_flags; __u16 rt_type; __u8 rt_key_tos; @@ -185,8 +185,8 @@ extern unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph unsigned short new_mtu, struct net_device *dev); extern void ip_rt_send_redirect(struct sk_buff *skb); -extern unsigned inet_addr_type(struct net *net, __be32 addr); -extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); +extern unsigned int inet_addr_type(struct net *net, __be32 addr); +extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); extern void ip_rt_multicast_event(struct in_device *); extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 37029390197..bbcfd099343 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -41,9 +41,11 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @get_size: Function to calculate required room for dumping device * specific netlink attributes * @fill_info: Function to dump device specific netlink attributes - * @get_xstats_size: Function to calculate required room for dumping devic + * @get_xstats_size: Function to calculate required room for dumping device * specific statistics * @fill_xstats: Function to dump device specific statistics + * @get_tx_queues: Function to determine number of transmit queues to create when + * creating a new device. */ struct rtnl_link_ops { struct list_head list; @@ -75,9 +77,8 @@ struct rtnl_link_ops { size_t (*get_xstats_size)(const struct net_device *dev); int (*fill_xstats)(struct sk_buff *skb, const struct net_device *dev); - int (*get_tx_queues)(struct net *net, struct nlattr *tb[], - unsigned int *tx_queues, - unsigned int *real_tx_queues); + int (*get_tx_queues)(struct net *net, + struct nlattr *tb[]); }; extern int __rtnl_link_register(struct rtnl_link_ops *ops); @@ -94,7 +95,7 @@ extern void rtnl_link_unregister(struct rtnl_link_ops *ops); * @fill_link_af: Function to fill IFLA_AF_SPEC with address family * specific netlink attributes. * @get_link_af_size: Function to calculate size of address family specific - * netlink attributes exlusive the container attribute. + * netlink attributes. * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr * for invalid configuration settings. * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 88949a99453..e4652fe5895 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1145,10 +1145,10 @@ struct sctp_outq { /* Data pending that has never been transmitted. */ struct list_head out_chunk_list; - unsigned out_qlen; /* Total length of queued data chunks. */ + unsigned int out_qlen; /* Total length of queued data chunks. */ /* Error of send failed, may used in SCTP_SEND_FAILED event. */ - unsigned error; + unsigned int error; /* These are control chunks we want to send. */ struct list_head control_chunk_list; @@ -2000,8 +2000,8 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); void sctp_assoc_sync_pmtu(struct sctp_association *); -void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned); -void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); +void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); +void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, diff --git a/include/net/sock.h b/include/net/sock.h index 5a0a58ac412..da931555e00 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -97,7 +97,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp) #else /* Validate arguments and do nothing */ static inline __printf(2, 3) -void SOCK_DEBUG(struct sock *sk, const char *msg, ...) +void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) { } #endif @@ -372,11 +372,22 @@ struct sock { void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); - int (*sk_backlog_rcv)(struct sock *sk, - struct sk_buff *skb); + int (*sk_backlog_rcv)(struct sock *sk, + struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); }; +/* + * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK + * or not whether his port will be reused by someone else. SK_FORCE_REUSE + * on a socket means that the socket will reuse everybody else's port + * without looking at the other's sk_reuse value. + */ + +#define SK_NO_REUSE 0 +#define SK_CAN_REUSE 1 +#define SK_FORCE_REUSE 2 + static inline int sk_peek_offset(struct sock *sk, int flags) { if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) @@ -443,40 +454,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk) NULL; } -static inline int sk_unhashed(const struct sock *sk) +static inline bool sk_unhashed(const struct sock *sk) { return hlist_unhashed(&sk->sk_node); } -static inline int sk_hashed(const struct sock *sk) +static inline bool sk_hashed(const struct sock *sk) { return !sk_unhashed(sk); } -static __inline__ void sk_node_init(struct hlist_node *node) +static inline void sk_node_init(struct hlist_node *node) { node->pprev = NULL; } -static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) +static inline void sk_nulls_node_init(struct hlist_nulls_node *node) { node->pprev = NULL; } -static __inline__ void __sk_del_node(struct sock *sk) +static inline void __sk_del_node(struct sock *sk) { __hlist_del(&sk->sk_node); } /* NB: equivalent to hlist_del_init_rcu */ -static __inline__ int __sk_del_node_init(struct sock *sk) +static inline bool __sk_del_node_init(struct sock *sk) { if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); - return 1; + return true; } - return 0; + return false; } /* Grab socket reference count. This operation is valid only @@ -498,9 +509,9 @@ static inline void __sock_put(struct sock *sk) atomic_dec(&sk->sk_refcnt); } -static __inline__ int sk_del_node_init(struct sock *sk) +static inline bool sk_del_node_init(struct sock *sk) { - int rc = __sk_del_node_init(sk); + bool rc = __sk_del_node_init(sk); if (rc) { /* paranoid for a while -acme */ @@ -511,18 +522,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) } #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) -static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) +static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) { if (sk_hashed(sk)) { hlist_nulls_del_init_rcu(&sk->sk_nulls_node); - return 1; + return true; } - return 0; + return false; } -static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) +static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) { - int rc = __sk_nulls_del_node_init_rcu(sk); + bool rc = __sk_nulls_del_node_init_rcu(sk); if (rc) { /* paranoid for a while -acme */ @@ -532,40 +543,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) return rc; } -static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_node, list); } -static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void sk_add_node(struct sock *sk, struct hlist_head *list) { sock_hold(sk); __sk_add_node(sk, list); } -static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) +static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) { sock_hold(sk); hlist_add_head_rcu(&sk->sk_node, list); } -static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } -static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { sock_hold(sk); __sk_nulls_add_node_rcu(sk, list); } -static __inline__ void __sk_del_bind_node(struct sock *sk) +static inline void __sk_del_bind_node(struct sock *sk) { __hlist_del(&sk->sk_bind_node); } -static __inline__ void sk_add_bind_node(struct sock *sk, +static inline void sk_add_bind_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_bind_node, list); @@ -639,7 +650,7 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } -static inline int sock_flag(struct sock *sk, enum sock_flags flag) +static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); } @@ -654,7 +665,7 @@ static inline void sk_acceptq_added(struct sock *sk) sk->sk_ack_backlog++; } -static inline int sk_acceptq_is_full(struct sock *sk) +static inline bool sk_acceptq_is_full(const struct sock *sk) { return sk->sk_ack_backlog > sk->sk_max_ack_backlog; } @@ -662,19 +673,19 @@ static inline int sk_acceptq_is_full(struct sock *sk) /* * Compute minimal free write space needed to queue new packets. */ -static inline int sk_stream_min_wspace(struct sock *sk) +static inline int sk_stream_min_wspace(const struct sock *sk) { return sk->sk_wmem_queued >> 1; } -static inline int sk_stream_wspace(struct sock *sk) +static inline int sk_stream_wspace(const struct sock *sk) { return sk->sk_sndbuf - sk->sk_wmem_queued; } extern void sk_stream_write_space(struct sock *sk); -static inline int sk_stream_memory_free(struct sock *sk) +static inline bool sk_stream_memory_free(const struct sock *sk) { return sk->sk_wmem_queued < sk->sk_sndbuf; } @@ -699,17 +710,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) * Do not take into account this skb truesize, * to allow even a single big packet to come. */ -static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) +static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb, + unsigned int limit) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize > sk->sk_rcvbuf; + return qsize > limit; } /* The per-socket spinlock must be held here. */ -static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) +static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, + unsigned int limit) { - if (sk_rcvqueues_full(sk, skb)) + if (sk_rcvqueues_full(sk, skb, limit)) return -ENOBUFS; __sk_add_backlog(sk, skb); @@ -796,26 +809,26 @@ struct module; * transport -> network interface is defined by struct inet_proto */ struct proto { - void (*close)(struct sock *sk, + void (*close)(struct sock *sk, long timeout); int (*connect)(struct sock *sk, - struct sockaddr *uaddr, + struct sockaddr *uaddr, int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept) (struct sock *sk, int flags, int *err); + struct sock * (*accept)(struct sock *sk, int flags, int *err); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); int (*init)(struct sock *sk); void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); - int (*setsockopt)(struct sock *sk, int level, + int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); - int (*getsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - int __user *option); + int (*getsockopt)(struct sock *sk, int level, + int optname, char __user *optval, + int __user *option); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, @@ -832,14 +845,14 @@ struct proto { struct msghdr *msg, size_t len); int (*recvmsg)(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, - int *addr_len); + size_t len, int noblock, int flags, + int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); - int (*bind)(struct sock *sk, + int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len); - int (*backlog_rcv) (struct sock *sk, + int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); /* Keeping track of sk's, looking them up, and port selection methods. */ @@ -1160,7 +1173,7 @@ proto_memory_pressure(struct proto *prot) extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); extern int sock_prot_inuse_get(struct net *net, struct proto *proto); #else -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc) { } @@ -1247,24 +1260,24 @@ static inline int sk_mem_pages(int amt) return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; } -static inline int sk_has_account(struct sock *sk) +static inline bool sk_has_account(struct sock *sk) { /* return true if protocol supports memory accounting */ return !!sk->sk_prot->memory_allocated; } -static inline int sk_wmem_schedule(struct sock *sk, int size) +static inline bool sk_wmem_schedule(struct sock *sk, int size) { if (!sk_has_account(sk)) - return 1; + return true; return size <= sk->sk_forward_alloc || __sk_mem_schedule(sk, size, SK_MEM_SEND); } -static inline int sk_rmem_schedule(struct sock *sk, int size) +static inline bool sk_rmem_schedule(struct sock *sk, int size) { if (!sk_has_account(sk)) - return 1; + return true; return size <= sk->sk_forward_alloc || __sk_mem_schedule(sk, size, SK_MEM_RECV); } @@ -1329,7 +1342,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class. */ -#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ +#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ @@ -1337,7 +1350,7 @@ do { \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ sizeof((sk)->sk_lock)); \ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ - (skey), (sname)); \ + (skey), (sname)); \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) @@ -1397,13 +1410,13 @@ extern int sock_setsockopt(struct socket *sock, int level, unsigned int optlen); extern int sock_getsockopt(struct socket *sock, int level, - int op, char __user *optval, + int op, char __user *optval, int __user *optlen); -extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, +extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode); -extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, +extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, @@ -1425,7 +1438,7 @@ static inline void sock_update_classid(struct sock *sk) * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */ -extern int sock_no_bind(struct socket *, +extern int sock_no_bind(struct socket *, struct sockaddr *, int); extern int sock_no_connect(struct socket *, struct sockaddr *, int, int); @@ -1454,7 +1467,7 @@ extern int sock_no_mmap(struct file *file, struct vm_area_struct *vma); extern ssize_t sock_no_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, + int offset, size_t size, int flags); /* @@ -1477,7 +1490,7 @@ extern void sk_common_release(struct sock *sk); /* * Default socket callbacks and setup code */ - + /* Initialise core socket variables */ extern void sock_init_data(struct socket *sock, struct sock *sk); @@ -1677,7 +1690,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); -static inline int sk_can_gso(const struct sock *sk) +static inline bool sk_can_gso(const struct sock *sk) { return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } @@ -1794,7 +1807,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk) * * Returns true if socket has write or read allocations */ -static inline int sk_has_allocations(const struct sock *sk) +static inline bool sk_has_allocations(const struct sock *sk) { return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); } @@ -1833,9 +1846,7 @@ static inline int sk_has_allocations(const struct sock *sk) */ static inline bool wq_has_sleeper(struct socket_wq *wq) { - - /* - * We need to be sure we are in sync with the + /* We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier is paired in the sock_poll_wait. @@ -1857,22 +1868,21 @@ static inline void sock_poll_wait(struct file *filp, { if (!poll_does_not_wait(p) && wait_address) { poll_wait(filp, wait_address, p); - /* - * We need to be sure we are in sync with the + /* We need to be sure we are in sync with the * socket flags modification. * * This memory barrier is paired in the wq_has_sleeper. - */ + */ smp_mb(); } } /* - * Queue a received datagram if it will fit. Stream and sequenced + * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * - * Inlined as it's very short and called for pretty much every + * Inlined as it's very short and called for pretty much every * packet ever received. */ @@ -1898,10 +1908,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, +extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); -extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); +extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); @@ -1910,7 +1920,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); /* * Recover an error report and clear atomically */ - + static inline int sock_error(struct sock *sk) { int err; @@ -1926,7 +1936,7 @@ static inline unsigned long sock_wspace(struct sock *sk) if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); - if (amt < 0) + if (amt < 0) amt = 0; } return amt; @@ -1970,7 +1980,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) /* * Default write policy as shown to user space via poll/select/SIGIO */ -static inline int sock_writeable(const struct sock *sk) +static inline bool sock_writeable(const struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); } @@ -1980,12 +1990,12 @@ static inline gfp_t gfp_any(void) return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } -static inline long sock_rcvtimeo(const struct sock *sk, int noblock) +static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_rcvtimeo; } -static inline long sock_sndtimeo(const struct sock *sk, int noblock) +static inline long sock_sndtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_sndtimeo; } @@ -2008,7 +2018,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); -static __inline__ void +static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { ktime_t kt = skb->tstamp; @@ -2049,7 +2059,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, (1UL << SOCK_RCVTSTAMP) | \ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ - (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ + (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) if (sk->sk_flags & FLAGS_TS_OR_DROPS) @@ -2078,7 +2088,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); * locked so that the sk_buff queue operation is ok. */ #ifdef CONFIG_NET_DMA -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); if (!copied_early) @@ -2087,7 +2097,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e __skb_queue_tail(&sk->sk_async_wait_queue, skb); } #else -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); @@ -2134,8 +2144,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag); extern int sock_get_timestamp(struct sock *, struct timeval __user *); extern int sock_get_timestampns(struct sock *, struct timespec __user *); -/* - * Enable debug/info messages +/* + * Enable debug/info messages */ extern int net_msg_warn; #define NETDEBUG(fmt, args...) \ diff --git a/include/net/tcp.h b/include/net/tcp.h index f75a04d752c..e79aa48d9fc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -123,7 +123,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #endif #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) -#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */ +#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now * used as a fallback RTO for the * initial data transmission if no @@ -252,6 +252,7 @@ extern int sysctl_tcp_max_ssthresh; extern int sysctl_tcp_cookie_size; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; +extern int sysctl_tcp_early_retrans; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; @@ -262,14 +263,14 @@ extern int tcp_memory_pressure; * and worry about wraparound (automatic with unsigned arithmetic). */ -static inline int before(__u32 seq1, __u32 seq2) +static inline bool before(__u32 seq1, __u32 seq2) { return (__s32)(seq1-seq2) < 0; } #define after(seq2, seq1) before(seq1, seq2) /* is s2<=s1<=s3 ? */ -static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) +static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) { return seq3 - seq2 >= seq1 - seq2; } @@ -304,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk) } /* syncookies: no recent synqueue overflow on this listening socket? */ -static inline int tcp_synq_no_recent_overflow(const struct sock *sk) +static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); @@ -366,13 +367,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk, #define TCP_ECN_DEMAND_CWR 4 #define TCP_ECN_SEEN 8 -static __inline__ void -TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) -{ - if (sysctl_tcp_ecn && th->ece && th->cwr) - inet_rsk(req)->ecn_ok = 1; -} - enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, @@ -389,12 +383,13 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock **prev); extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); -extern int tcp_use_frto(struct sock *sk); +extern bool tcp_use_frto(struct sock *sk); extern void tcp_enter_frto(struct sock *sk); extern void tcp_enter_loss(struct sock *sk, int how); extern void tcp_clear_retrans(struct tcp_sock *tp); extern void tcp_update_metrics(struct sock *sk); extern void tcp_close(struct sock *sk, long timeout); +extern void tcp_init_sock(struct sock *sk); extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); extern int tcp_getsockopt(struct sock *sk, int level, int optname, @@ -435,6 +430,9 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_values *rvp); extern int tcp_disconnect(struct sock *sk, int flags); +void tcp_connect_init(struct sock *sk); +void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); +int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; @@ -472,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle); -extern int tcp_may_send_now(struct sock *sk); +extern bool tcp_may_send_now(struct sock *sk); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern void tcp_retransmit_timer(struct sock *sk); extern void tcp_xmit_retransmit_queue(struct sock *); @@ -486,15 +484,17 @@ extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); extern int tcp_send_synack(struct sock *); -extern int tcp_syn_flood_action(struct sock *sk, - const struct sk_buff *skb, - const char *proto); +extern bool tcp_syn_flood_action(struct sock *sk, + const struct sk_buff *skb, + const char *proto); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); /* tcp_input.c */ extern void tcp_cwnd_application_limited(struct sock *sk); +extern void tcp_resume_early_retransmit(struct sock *sk); +extern void tcp_rearm_rto(struct sock *sk); /* tcp_timer.c */ extern void tcp_init_xmit_timers(struct sock *); @@ -540,8 +540,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, extern void tcp_initialize_rcv_mss(struct sock *sk); -extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu); -extern int tcp_mss_to_mtu(const struct sock *sk, int mss); +extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); +extern int tcp_mss_to_mtu(struct sock *sk, int mss); extern void tcp_mtup_init(struct sock *sk); extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); @@ -609,6 +609,8 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp) */ extern u32 __tcp_select_window(struct sock *sk); +void tcp_send_window_probe(struct sock *sk); + /* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot * of jiffies in the buffer control blocks below. We decided @@ -645,21 +647,38 @@ struct tcp_skb_cb { __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ + __u8 sacked; /* State flags for SACK/FACK. */ #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_LOST 0x04 /* SKB is lost */ #define TCPCB_TAGBITS 0x07 /* All tag bits */ - __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ - /* 1 byte hole */ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) + __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ + /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) +/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set + * + * If we receive a SYN packet with these bits set, it means a network is + * playing bad games with TOS bits. In order to avoid possible false congestion + * notifications, we disable TCP ECN negociation. + */ +static inline void +TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb) +{ + const struct tcphdr *th = tcp_hdr(skb); + + if (sysctl_tcp_ecn && th->ece && th->cwr && + INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) + inet_rsk(req)->ecn_ok = 1; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ @@ -775,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp) return tp->rx_opt.sack_ok; } -static inline int tcp_is_reno(const struct tcp_sock *tp) +static inline bool tcp_is_reno(const struct tcp_sock *tp) { return !tcp_is_sack(tp); } -static inline int tcp_is_fack(const struct tcp_sock *tp) +static inline bool tcp_is_fack(const struct tcp_sock *tp) { return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; } @@ -790,6 +809,21 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; } +/* TCP early-retransmit (ER) is similar to but more conservative than + * the thin-dupack feature. Enable ER only if thin-dupack is disabled. + */ +static inline void tcp_enable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = sysctl_tcp_early_retrans && + !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3; + tp->early_retrans_delayed = 0; +} + +static inline void tcp_disable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = 0; +} + static inline unsigned int tcp_left_out(const struct tcp_sock *tp) { return tp->sacked_out + tp->lost_out; @@ -867,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp) { return tp->snd_una + tp->snd_wnd; } -extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); +extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, const struct sk_buff *skb) @@ -910,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) return __skb_checksum_complete(skb); } -static inline int tcp_checksum_complete(struct sk_buff *skb) +static inline bool tcp_checksum_complete(struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete(skb); @@ -940,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) * * NOTE: is this not too big to inline? */ -static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) +static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (sysctl_tcp_low_latency || !tp->ucopy.task) - return 0; + return false; __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; @@ -969,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) (3 * tcp_rto_min(sk)) / 4, TCP_RTO_MAX); } - return 1; + return true; } @@ -1074,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk) return fin_timeout; } -static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, - int paws_win) +static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, + int paws_win) { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) - return 1; + return true; if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) - return 1; + return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, * then following tcp messages have valid values. Ignore 0 value, * or else 'negative' tsval might forbid us to accept their packets. */ if (!rx_opt->ts_recent) - return 1; - return 0; + return true; + return false; } -static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, - int rst) +static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, + int rst) { if (tcp_paws_check(rx_opt, 0)) - return 0; + return false; /* RST segments are not recommended to carry timestamp, and, if they do, it is recommended to ignore PAWS because @@ -1110,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, However, we can relax time bounds for RST segments to MSL. */ if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) - return 0; - return 1; + return false; + return true; } static inline void tcp_mib_init(struct net *net) @@ -1226,7 +1260,7 @@ extern void tcp_put_md5sig_pool(void); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, - unsigned header_len); + unsigned int header_len); extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key); @@ -1349,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) __skb_unlink(skb, &sk->sk_write_queue); } -static inline int tcp_write_queue_empty(struct sock *sk) +static inline bool tcp_write_queue_empty(struct sock *sk) { return skb_queue_empty(&sk->sk_write_queue); } @@ -1406,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk, /* Determines whether this is a thin stream (which may suffer from * increased latency). Used to trigger latency-reducing mechanisms. */ -static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp) +static inline bool tcp_stream_is_thin(struct tcp_sock *tp) { return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); } diff --git a/include/net/udp.h b/include/net/udp.h index 5d606d9da9e..065f379c650 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -81,7 +81,7 @@ struct udp_table { extern struct udp_table udp_table; extern void udp_table_init(struct udp_table *, const char *); static inline struct udp_hslot *udp_hashslot(struct udp_table *table, - struct net *net, unsigned num) + struct net *net, unsigned int num) { return &table->hash[udp_hashfn(net, num, table->mask)]; } @@ -267,4 +267,8 @@ extern void udp_init(void); extern int udp4_ufo_send_check(struct sk_buff *skb); extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features); +extern void udp_encap_enable(void); +#if IS_ENABLED(CONFIG_IPV6) +extern void udpv6_encap_enable(void); +#endif #endif /* _UDP_H */ diff --git a/include/net/wimax.h b/include/net/wimax.h index 322ff4fbdb4..bbb74f990ca 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -423,8 +423,8 @@ struct wimax_dev { int (*op_reset)(struct wimax_dev *wimax_dev); struct rfkill *rfkill; - unsigned rf_hw; - unsigned rf_sw; + unsigned int rf_hw; + unsigned int rf_sw; char name[32]; struct dentry *debugfs_dentry; diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h index ff27f1b078d..b52bda8d13b 100644 --- a/include/net/wpan-phy.h +++ b/include/net/wpan-phy.h @@ -25,6 +25,14 @@ #include <linux/mutex.h> #include <linux/bug.h> +/* According to the IEEE 802.15.4 stadard the upper most significant bits of + * the 32-bit channel bitmaps shall be used as an integer value to specify 32 + * possible channel pages. The lower 27 bits of the channel bit map shall be + * used as a bit mask to specify channel numbers within a channel page. + */ +#define WPAN_NUM_CHANNELS 27 +#define WPAN_NUM_PAGES 32 + struct wpan_phy { struct mutex pib_lock; @@ -43,7 +51,7 @@ struct wpan_phy { int idx; struct net_device *(*add_iface)(struct wpan_phy *phy, - const char *name); + const char *name, int type); void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); diff --git a/include/net/x25.h b/include/net/x25.h index a06119a0512..b4a8a892312 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -305,7 +305,7 @@ static inline void x25_unregister_sysctl(void) {}; #endif /* CONFIG_SYSCTL */ struct x25_skb_cb { - unsigned flags; + unsigned int flags; }; #define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb)) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 96239e78e62..e0a55df5bde 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -886,15 +886,15 @@ __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) return port; } -extern int xfrm_selector_match(const struct xfrm_selector *sel, - const struct flowi *fl, - unsigned short family); +extern bool xfrm_selector_match(const struct xfrm_selector *sel, + const struct flowi *fl, + unsigned short family); #ifdef CONFIG_SECURITY_NETWORK_XFRM /* If neither has a context --> match * Otherwise, both must have a context and the sids, doi, alg must match */ -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { return ((!s1 && !s2) || (s1 && s2 && @@ -903,9 +903,9 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct (s1->ctx_alg == s2->ctx_alg))); } #else -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { - return 1; + return true; } #endif @@ -1682,8 +1682,9 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) { - if (m->m | m->v) - NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); + if ((m->m | m->v) && + nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/802/Makefile b/net/802/Makefile index 7893d679910..a30d6e385ae 100644 --- a/net/802/Makefile +++ b/net/802/Makefile @@ -4,7 +4,6 @@ # Check the p8022 selections against net/core/Makefile. obj-$(CONFIG_LLC) += p8022.o psnap.o -obj-$(CONFIG_TR) += p8022.o psnap.o tr.o obj-$(CONFIG_NET_FC) += fc.o obj-$(CONFIG_FDDI) += fddi.o obj-$(CONFIG_HIPPI) += hippi.o diff --git a/net/802/fc.c b/net/802/fc.c index b324e31401a..05eea6b98bb 100644 --- a/net/802/fc.c +++ b/net/802/fc.c @@ -35,7 +35,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct fch_hdr *fch; int hdr_len; diff --git a/net/802/fddi.c b/net/802/fddi.c index 5ab25cd4314..9cda40661e0 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -51,7 +51,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { int hl = FDDI_K_SNAP_HLEN; struct fddihdr *fddi; diff --git a/net/802/garp.c b/net/802/garp.c index a5c22483043..8456f5d98b8 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, while (parent) { attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) parent = parent->rb_left; - else if (d > 0) + else if (d < 0) parent = parent->rb_right; else return attr; @@ -178,9 +178,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app, parent = *p; attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) p = &parent->rb_left; - else if (d > 0) + else if (d < 0) p = &parent->rb_right; else { /* The attribute already exists; re-use it. */ diff --git a/net/802/hippi.c b/net/802/hippi.c index 056794e6637..51a1f530417 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -45,7 +45,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; diff --git a/net/802/p8022.c b/net/802/p8022.c index 7f353c4f437..0bda8de7df5 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c @@ -1,6 +1,5 @@ /* - * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring - * is kept separate see p8022tr.c) + * NET3: Support for 802.2 demultiplexing off Ethernet * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version diff --git a/net/802/stp.c b/net/802/stp.c index 15540b7323c..2c40ba0ec11 100644 --- a/net/802/stp.c +++ b/net/802/stp.c @@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, proto = rcu_dereference(garp_protos[eh->h_dest[5] - GARP_ADDR_MIN]); if (proto && - compare_ether_addr(eh->h_dest, proto->group_address)) + !ether_addr_equal(eh->h_dest, proto->group_address)) goto err; } else proto = rcu_dereference(stp_proto); diff --git a/net/802/tr.c b/net/802/tr.c deleted file mode 100644 index b9a3a145e34..00000000000 --- a/net/802/tr.c +++ /dev/null @@ -1,676 +0,0 @@ -/* - * NET3: Token ring device handling subroutines - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes. - * Added rif table to /proc/net/tr_rif and rif timeout to - * /proc/sys/net/token-ring/rif_timeout. - * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged - * tr_header and tr_type_trans to handle passing IPX SNAP and - * 802.2 through the correct layers. Eliminated tr_reformat. - * - */ - -#include <asm/uaccess.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/jiffies.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/inet.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/skbuff.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/net.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/sysctl.h> -#include <linux/slab.h> -#include <net/arp.h> -#include <net/net_namespace.h> - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev); -static void rif_check_expire(unsigned long dummy); - -#define TR_SR_DEBUG 0 - -/* - * Each RIF entry we learn is kept this way - */ - -struct rif_cache { - unsigned char addr[TR_ALEN]; - int iface; - __be16 rcf; - __be16 rseg[8]; - struct rif_cache *next; - unsigned long last_used; - unsigned char local_ring; -}; - -#define RIF_TABLE_SIZE 32 - -/* - * We hash the RIF cache 32 ways. We do after all have to look it - * up a lot. - */ - -static struct rif_cache *rif_table[RIF_TABLE_SIZE]; - -static DEFINE_SPINLOCK(rif_lock); - - -/* - * Garbage disposal timer. - */ - -static struct timer_list rif_timer; - -static int sysctl_tr_rif_timeout = 60*10*HZ; - -static inline unsigned long rif_hash(const unsigned char *addr) -{ - unsigned long x; - - x = addr[0]; - x = (x << 2) ^ addr[1]; - x = (x << 2) ^ addr[2]; - x = (x << 2) ^ addr[3]; - x = (x << 2) ^ addr[4]; - x = (x << 2) ^ addr[5]; - - x ^= x >> 8; - - return x & (RIF_TABLE_SIZE - 1); -} - -/* - * Put the headers on a token ring packet. Token ring source routing - * makes this a little more exciting than on ethernet. - */ - -static int tr_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, - const void *daddr, const void *saddr, unsigned len) -{ - struct trh_hdr *trh; - int hdr_len; - - /* - * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls - * dev->hard_header directly. - */ - if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) - { - struct trllc *trllc; - - hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - trllc = (struct trllc *)(trh+1); - trllc->dsap = trllc->ssap = EXTENDED_SAP; - trllc->llc = UI_CMD; - trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00; - trllc->ethertype = htons(type); - } - else - { - hdr_len = sizeof(struct trh_hdr); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - } - - trh->ac=AC; - trh->fc=LLC_FRAME; - - if(saddr) - memcpy(trh->saddr,saddr,dev->addr_len); - else - memcpy(trh->saddr,dev->dev_addr,dev->addr_len); - - /* - * Build the destination and then source route the frame - */ - - if(daddr) - { - memcpy(trh->daddr,daddr,dev->addr_len); - tr_source_route(skb, trh, dev); - return hdr_len; - } - - return -hdr_len; -} - -/* - * A neighbour discovery of some species (eg arp) has completed. We - * can now send the packet. - */ - -static int tr_rebuild_header(struct sk_buff *skb) -{ - struct trh_hdr *trh=(struct trh_hdr *)skb->data; - struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); - struct net_device *dev = skb->dev; - - /* - * FIXME: We don't yet support IPv6 over token rings - */ - - if(trllc->ethertype != htons(ETH_P_IP)) { - printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype)); - return 0; - } - -#ifdef CONFIG_INET - if(arp_find(trh->daddr, skb)) { - return 1; - } - else -#endif - { - tr_source_route(skb,trh,dev); - return 0; - } -} - -/* - * Some of this is a bit hackish. We intercept RIF information - * used for source routing. We also grab IP directly and don't feed - * it via SNAP. - */ - -__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) -{ - - struct trh_hdr *trh; - struct trllc *trllc; - unsigned riflen=0; - - skb->dev = dev; - skb_reset_mac_header(skb); - trh = tr_hdr(skb); - - if(trh->saddr[0] & TR_RII) - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - - trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - if(*trh->daddr & 0x80) - { - if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN)) - skb->pkt_type=PACKET_BROADCAST; - else - skb->pkt_type=PACKET_MULTICAST; - } - else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E)) - { - skb->pkt_type=PACKET_MULTICAST; - } - else if(dev->flags & IFF_PROMISC) - { - if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN)) - skb->pkt_type=PACKET_OTHERHOST; - } - - if ((skb->pkt_type != PACKET_BROADCAST) && - (skb->pkt_type != PACKET_MULTICAST)) - tr_add_rif_info(trh,dev) ; - - /* - * Strip the SNAP header from ARP packets since we don't - * pass them through to the 802.2/SNAP layers. - */ - - if (trllc->dsap == EXTENDED_SAP && - (trllc->ethertype == htons(ETH_P_IP) || - trllc->ethertype == htons(ETH_P_IPV6) || - trllc->ethertype == htons(ETH_P_ARP))) - { - skb_pull(skb, sizeof(struct trllc)); - return trllc->ethertype; - } - - return htons(ETH_P_TR_802_2); -} - -/* - * We try to do source routing... - */ - -void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh, - struct net_device *dev) -{ - int slack; - unsigned int hash; - struct rif_cache *entry; - unsigned char *olddata; - unsigned long flags; - static const unsigned char mcast_func_addr[] - = {0xC0,0x00,0x00,0x04,0x00,0x00}; - - spin_lock_irqsave(&rif_lock, flags); - - /* - * Broadcasts are single route as stated in RFC 1042 - */ - if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) || - (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) ) - { - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; - } - else - { - hash = rif_hash(trh->daddr); - /* - * Walk the hash table and look for an entry - */ - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next); - - /* - * If we found an entry we can route the frame. - */ - if(entry) - { -#if TR_SR_DEBUG -printk("source routing for %pM\n", trh->daddr); -#endif - if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8) - { - trh->rcf=entry->rcf; - memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short)); - trh->rcf^=htons(TR_RCF_DIR_BIT); - trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */ - - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("entry found with rcf %04x\n", entry->rcf); - } - else - { - printk("entry found but without rcf length, local=%02x\n", entry->local_ring); -#endif - } - entry->last_used=jiffies; - } - else - { - /* - * Without the information we simply have to shout - * on the wire. The replies should rapidly clean this - * situation up. - */ - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("no entry in rif table found - broadcasting frame\n"); -#endif - } - } - - /* Compress the RIF here so we don't have to do it in the driver(s) */ - if (!(trh->saddr[0] & 0x80)) - slack = 18; - else - slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); - olddata = skb->data; - spin_unlock_irqrestore(&rif_lock, flags); - - skb_pull(skb, slack); - memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); -} - -/* - * We have learned some new RIF information for our source - * routing. - */ - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) -{ - unsigned int hash, rii_p = 0; - unsigned long flags; - struct rif_cache *entry; - unsigned char saddr0; - - spin_lock_irqsave(&rif_lock, flags); - saddr0 = trh->saddr[0]; - - /* - * Firstly see if the entry exists - */ - - if(trh->saddr[0] & TR_RII) - { - trh->saddr[0]&=0x7f; - if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2) - { - rii_p = 1; - } - } - - hash = rif_hash(trh->saddr); - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next); - - if(entry==NULL) - { -#if TR_SR_DEBUG - printk("adding rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - /* - * Allocate our new entry. A failure to allocate loses - * use the information. This is harmless. - * - * FIXME: We ought to keep some kind of cache size - * limiting and adjust the timers to suit. - */ - entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); - - if(!entry) - { - printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); - spin_unlock_irqrestore(&rif_lock, flags); - return; - } - - memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN); - entry->iface = dev->ifindex; - entry->next=rif_table[hash]; - entry->last_used=jiffies; - rif_table[hash]=entry; - - if (rii_p) - { - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - entry->local_ring = 0; - } - else - { - entry->local_ring = 1; - } - } - else /* Y. Tahara added */ - { - /* - * Update existing entries - */ - if (!entry->local_ring) - if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) && - !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) - { -#if TR_SR_DEBUG -printk("updating rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - } - entry->last_used=jiffies; - } - trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ - spin_unlock_irqrestore(&rif_lock, flags); -} - -/* - * Scan the cache with a timer and see what we need to throw out. - */ - -static void rif_check_expire(unsigned long dummy) -{ - int i; - unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; - - spin_lock_irqsave(&rif_lock, flags); - - for(i =0; i < RIF_TABLE_SIZE; i++) { - struct rif_cache *entry, **pentry; - - pentry = rif_table+i; - while((entry=*pentry) != NULL) { - unsigned long expires - = entry->last_used + sysctl_tr_rif_timeout; - - if (time_before_eq(expires, jiffies)) { - *pentry = entry->next; - kfree(entry); - } else { - pentry = &entry->next; - - if (time_before(expires, next_interval)) - next_interval = expires; - } - } - } - - spin_unlock_irqrestore(&rif_lock, flags); - - mod_timer(&rif_timer, next_interval); - -} - -/* - * Generate the /proc/net information for the token ring RIF - * routing. - */ - -#ifdef CONFIG_PROC_FS - -static struct rif_cache *rif_get_idx(loff_t pos) -{ - int i; - struct rif_cache *entry; - loff_t off = 0; - - for(i = 0; i < RIF_TABLE_SIZE; i++) - for(entry = rif_table[i]; entry; entry = entry->next) { - if (off == pos) - return entry; - ++off; - } - - return NULL; -} - -static void *rif_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(&rif_lock) -{ - spin_lock_irq(&rif_lock); - - return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; -} - -static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int i; - struct rif_cache *ent = v; - - ++*pos; - - if (v == SEQ_START_TOKEN) { - i = -1; - goto scan; - } - - if (ent->next) - return ent->next; - - i = rif_hash(ent->addr); - scan: - while (++i < RIF_TABLE_SIZE) { - if ((ent = rif_table[i]) != NULL) - return ent; - } - return NULL; -} - -static void rif_seq_stop(struct seq_file *seq, void *v) - __releases(&rif_lock) -{ - spin_unlock_irq(&rif_lock); -} - -static int rif_seq_show(struct seq_file *seq, void *v) -{ - int j, rcf_len, segment, brdgnmb; - struct rif_cache *entry = v; - - if (v == SEQ_START_TOKEN) - seq_puts(seq, - "if TR address TTL rcf routing segments\n"); - else { - struct net_device *dev = dev_get_by_index(&init_net, entry->iface); - long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout) - - (long) jiffies; - - seq_printf(seq, "%s %pM %7li ", - dev?dev->name:"?", - entry->addr, - ttl/HZ); - - if (entry->local_ring) - seq_puts(seq, "local\n"); - else { - - seq_printf(seq, "%04X", ntohs(entry->rcf)); - rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2; - if (rcf_len) - rcf_len >>= 1; - for(j = 1; j < rcf_len; j++) { - if(j==1) { - segment=ntohs(entry->rseg[j-1])>>4; - seq_printf(seq," %03X",segment); - } - - segment=ntohs(entry->rseg[j])>>4; - brdgnmb=ntohs(entry->rseg[j-1])&0x00f; - seq_printf(seq,"-%01X-%03X",brdgnmb,segment); - } - seq_putc(seq, '\n'); - } - - if (dev) - dev_put(dev); - } - return 0; -} - - -static const struct seq_operations rif_seq_ops = { - .start = rif_seq_start, - .next = rif_seq_next, - .stop = rif_seq_stop, - .show = rif_seq_show, -}; - -static int rif_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &rif_seq_ops); -} - -static const struct file_operations rif_seq_fops = { - .owner = THIS_MODULE, - .open = rif_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -#endif - -static const struct header_ops tr_header_ops = { - .create = tr_header, - .rebuild= tr_rebuild_header, -}; - -static void tr_setup(struct net_device *dev) -{ - /* - * Configure and register - */ - - dev->header_ops = &tr_header_ops; - - dev->type = ARPHRD_IEEE802_TR; - dev->hard_header_len = TR_HLEN; - dev->mtu = 2000; - dev->addr_len = TR_ALEN; - dev->tx_queue_len = 100; /* Long queues on tr */ - - memset(dev->broadcast,0xFF, TR_ALEN); - - /* New-style flags. */ - dev->flags = IFF_BROADCAST | IFF_MULTICAST ; -} - -/** - * alloc_trdev - Register token ring device - * @sizeof_priv: Size of additional driver-private structure to be allocated - * for this token ring device - * - * Fill in the fields of the device structure with token ring-generic values. - * - * Constructs a new net device, complete with a private data area of - * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for - * this private data area. - */ -struct net_device *alloc_trdev(int sizeof_priv) -{ - return alloc_netdev(sizeof_priv, "tr%d", tr_setup); -} - -#ifdef CONFIG_SYSCTL -static struct ctl_table tr_table[] = { - { - .procname = "rif_timeout", - .data = &sysctl_tr_rif_timeout, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { }, -}; - -static __initdata struct ctl_path tr_path[] = { - { .procname = "net", }, - { .procname = "token-ring", }, - { } -}; -#endif - -/* - * Called during bootup. We don't actually have to initialise - * too much for this. - */ - -static int __init rif_init(void) -{ - rif_timer.expires = jiffies + sysctl_tr_rif_timeout; - setup_timer(&rif_timer, rif_check_expire, 0); - add_timer(&rif_timer); -#ifdef CONFIG_SYSCTL - register_sysctl_paths(tr_path, tr_table); -#endif - proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); - return 0; -} - -module_init(rif_init); - -EXPORT_SYMBOL(tr_type_trans); -EXPORT_SYMBOL(alloc_trdev); - -MODULE_LICENSE("GPL"); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index efea35b02e7..6089f0cf23b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev, struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); /* May be called without an actual change */ - if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; /* vlan address was different from the old address and is equal to * the new address */ - if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_del(dev, vlandev->dev_addr); /* vlan address was equal to the old address and is different from * the new address */ - if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4d39d802be2..8ca533c95de 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ - if (!compare_ether_addr(eth_hdr(skb)->h_dest, - vlan_dev->dev_addr)) + if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9757c193c86..da1bc9c3cf3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev) !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; @@ -307,7 +307,7 @@ clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); del_unicast: - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); @@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); @@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) if (!(dev->flags & IFF_UP)) goto out; - if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { + if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 50711368ad6..708c80ea187 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) struct nlattr *nest; unsigned int i; - NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); + if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) + goto nla_put_failure; if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; - NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); + if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) + goto nla_put_failure; } if (vlan->nr_ingress_mappings) { nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); @@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = i; m.to = vlan->ingress_priority_map[i]; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } nla_nest_end(skb, nest); } @@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = pm->priority; m.to = (pm->vlan_qos >> 13) & 0x7; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } } nla_nest_end(skb, nest); diff --git a/net/9p/client.c b/net/9p/client.c index b23a17c431c..a170893d70e 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1530,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -1605,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, struct p9_req_t *req; p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -2040,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) char *dataptr; p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index fccae26fa67..6449bae1570 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -513,7 +513,7 @@ error: clear_bit(Wworksched, &m->wsched); } -static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); diff --git a/net/Kconfig b/net/Kconfig index e07272d0bb2..1e47bd03dde 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -207,10 +207,10 @@ source "net/ipx/Kconfig" source "drivers/net/appletalk/Kconfig" source "net/x25/Kconfig" source "net/lapb/Kconfig" -source "net/econet/Kconfig" source "net/wanrouter/Kconfig" source "net/phonet/Kconfig" source "net/ieee802154/Kconfig" +source "net/mac802154/Kconfig" source "net/sched/Kconfig" source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" @@ -295,7 +295,7 @@ config NET_TCPPROBE module will be called tcp_probe. config NET_DROP_MONITOR - boolean "Network packet drop alerting service" + tristate "Network packet drop alerting service" depends on INET && EXPERIMENTAL && TRACEPOINTS ---help--- This feature provides an alerting service to userspace in the diff --git a/net/Makefile b/net/Makefile index ad432fa4d93..4f4ee083064 100644 --- a/net/Makefile +++ b/net/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ obj-$(CONFIG_DECNET) += decnet/ -obj-$(CONFIG_ECONET) += econet/ obj-$(CONFIG_PHONET) += phonet/ ifneq ($(CONFIG_VLAN_8021Q),) obj-y += 8021q/ @@ -60,6 +59,7 @@ ifneq ($(CONFIG_DCB),) obj-y += dcb/ endif obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(CONFIG_MAC802154) += mac802154/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index bfa9ab93eda..0301b328cf0 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -63,7 +63,7 @@ #include <net/tcp_states.h> #include <net/route.h> #include <linux/atalk.h> -#include "../core/kmap_skb.h" +#include <linux/highmem.h> struct datalink_proto *ddp_dl, *aarp_dl; static const struct proto_ops atalk_dgram_ops; @@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); sum = atalk_sum_partial(vaddr + frag->page_offset + offset - start, copy, sum); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); if (!(len -= copy)) return sum; diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c index 04e9c0da7aa..ebb864361f7 100644 --- a/net/appletalk/sysctl_net_atalk.c +++ b/net/appletalk/sysctl_net_atalk.c @@ -42,20 +42,14 @@ static struct ctl_table atalk_table[] = { { }, }; -static struct ctl_path atalk_path[] = { - { .procname = "net", }, - { .procname = "appletalk", }, - { } -}; - static struct ctl_table_header *atalk_table_header; void atalk_register_sysctl(void) { - atalk_table_header = register_sysctl_paths(atalk_path, atalk_table); + atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table); } void atalk_unregister_sysctl(void) { - unregister_sysctl_table(atalk_table_header); + unregister_net_sysctl_table(atalk_table_header); } diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 353fccf1cde..4819d31533e 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -73,7 +73,7 @@ struct br2684_vcc { #ifdef CONFIG_ATM_BR2684_IPFILTER struct br2684_filter filter; #endif /* CONFIG_ATM_BR2684_IPFILTER */ - unsigned copies_needed, copies_failed; + unsigned int copies_needed, copies_failed; }; struct br2684_dev { diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 62dc8bfe6fe..bbd3b639992 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, error = sock_get_timestampns(sk, argp); goto done; case ATM_SETSC: - if (net_ratelimit()) - pr_warning("ATM_SETSC is obsolete; used by %s:%d\n", - current->comm, task_pid_nr(current)); + net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n", + current->comm, task_pid_nr(current)); error = 0; goto done; case ATMSIGD_CTRL: @@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, work for 32-bit userspace. TBH I don't really want to think about it at all. dwmw2. */ if (compat) { - if (net_ratelimit()) - pr_warning("32-bit task cannot be atmsigd\n"); + net_warn_ratelimited("32-bit task cannot be atmsigd\n"); error = -EINVAL; goto done; } diff --git a/net/atm/lec.c b/net/atm/lec.c index f1964caa0f8..a7d172105c9 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -26,11 +26,6 @@ #include <linux/spinlock.h> #include <linux/seq_file.h> -/* TokenRing if needed */ -#ifdef CONFIG_TR -#include <linux/trdevice.h> -#endif - /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> @@ -163,50 +158,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ /* - * Modelled after tr_type_trans - * All multicast and ARE or STE frames go to BUS. - * Non source routed frames go by destination address. - * Last hop source routed frames go by destination address. - * Not last hop source routed frames go by _next_ route descriptor. - * Returns pointer to destination MAC address or fills in rdesc - * and returns NULL. - */ -#ifdef CONFIG_TR -static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) -{ - struct trh_hdr *trh; - unsigned int riflen, num_rdsc; - - trh = (struct trh_hdr *)packet; - if (trh->daddr[0] & (uint8_t) 0x80) - return bus_mac; /* multicast */ - - if (trh->saddr[0] & TR_RII) { - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - if ((ntohs(trh->rcf) >> 13) != 0) - return bus_mac; /* ARE or STE */ - } else - return trh->daddr; /* not source routed */ - - if (riflen < 6) - return trh->daddr; /* last hop, source routed */ - - /* riflen is 6 or more, packet has more than one route descriptor */ - num_rdsc = (riflen / 2) - 1; - memset(rdesc, 0, ETH_ALEN); - /* offset 4 comes from LAN destination field in LE control frames */ - if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) - memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); - else { - memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); - rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); - } - - return NULL; -} -#endif /* CONFIG_TR */ - -/* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * @@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; -#ifdef CONFIG_TR - unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ -#endif int is_rdesc; pr_debug("called\n"); @@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, } skb_push(skb, 2); - /* Put le header to place, works for TokenRing too */ + /* Put le header to place */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); -#ifdef CONFIG_TR - /* - * Ugly. Use this to realign Token Ring packets for - * e.g. PCA-200E driver. - */ - if (priv->is_trdev) { - skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); - kfree_skb(skb); - if (skb2 == NULL) - return NETDEV_TX_OK; - skb = skb2; - } -#endif - #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 @@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ -#ifdef CONFIG_TR - if (priv->is_trdev) - min_frame_size = LEC_MINIMUM_8025_SIZE; - else -#endif - min_frame_size = LEC_MINIMUM_8023_SIZE; + min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, @@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; -#ifdef CONFIG_TR - if (priv->is_trdev) { - dst = get_tr_dst(skb->data + 2, rdesc); - if (dst == NULL) { - dst = rdesc; - is_rdesc = 1; - } - } -#endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", @@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) dev_kfree_skb(skb); return; } -#ifdef CONFIG_TR - if (priv->is_trdev) - dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; - else -#endif - dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; + dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match @@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { -#ifdef CONFIG_TR - if (priv->is_trdev) - src = - ((struct lecdatahdr_8025 *)skb->data)-> - h_source; - else -#endif - src = - ((struct lecdatahdr_8023 *)skb->data)-> - h_source; + src = ((struct lecdatahdr_8023 *)skb->data)->h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); @@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ -#ifdef CONFIG_TR - if (priv->is_trdev) - skb->protocol = tr_type_trans(skb, dev); - else -#endif - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); @@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) i = 0; else i = arg; -#ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; -#else /* Reserve the top NUM_TR_DEVS for TR */ - if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) - return -EINVAL; -#endif if (!dev_lec[i]) { - int is_trdev, size; - - is_trdev = 0; - if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) - is_trdev = 1; + int size; size = sizeof(struct lec_priv); -#ifdef CONFIG_TR - if (is_trdev) - dev_lec[i] = alloc_trdev(size); - else -#endif - dev_lec[i] = alloc_etherdev(size); + dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; @@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) } priv = netdev_priv(dev_lec[i]); - priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) @@ -1255,7 +1141,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); - if (compare_ether_addr(lan_dst, dev->dev_addr)) + if (!ether_addr_equal(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ @@ -1662,7 +1548,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { - if (!compare_ether_addr(mac_addr, entry->mac_addr)) + if (ether_addr_equal(mac_addr, entry->mac_addr)) return entry; } return NULL; @@ -1849,7 +1735,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ - if (!compare_ether_addr(mac_to_find, bus_mac)) + if (ether_addr_equal(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: @@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv, struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; - unsigned char *src; -#ifdef CONFIG_TR - struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; - - if (priv->is_trdev) - src = tr_hdr->h_source; - else -#endif - src = hdr->h_source; + unsigned char *src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, diff --git a/net/atm/lec.h b/net/atm/lec.h index dfc07196646..c730e57de19 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -142,7 +142,6 @@ struct lec_priv { int itfnum; /* e.g. 2 for lec2, 5 for lec5 */ struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */ int is_proxy; /* bridge between ATM and Ethernet */ - int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */ }; struct lec_vcc_priv { diff --git a/net/atm/mpc.c b/net/atm/mpc.c index aa972409f09..d4cc1be5c36 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, goto non_ip; while (i < mpc->number_of_mps_macs) { - if (!compare_ether_addr(eth->h_dest, - (mpc->mps_macs + i*ETH_ALEN))) + if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN)) if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */ return NETDEV_TX_OK; i++; diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 53e50029227..5bdd300db0f 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, size_t nbytes, loff_t *ppos) { char *page, *p; - unsigned len; + unsigned int len; if (nbytes == 0) return 0; diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 614d3fc47ed..ce1e59fdae7 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -62,12 +62,25 @@ struct pppoatm_vcc { void (*old_pop)(struct atm_vcc *, struct sk_buff *); /* keep old push/pop for detaching */ enum pppoatm_encaps encaps; + atomic_t inflight; + unsigned long blocked; int flags; /* SC_COMP_PROT - compress protocol */ struct ppp_channel chan; /* interface to generic ppp layer */ struct tasklet_struct wakeup_tasklet; }; /* + * We want to allow two packets in the queue. The one that's currently in + * flight, and *one* queued up ready for the ATM device to send immediately + * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so + * inflight == -2 represents an empty queue, -1 one packet, and zero means + * there are two packets in the queue. + */ +#define NONE_INFLIGHT -2 + +#define BLOCKED 0 + +/* * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol * ID (0xC021) used in autodetection */ @@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg) static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) { struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); + pvcc->old_pop(atmvcc, skb); + atomic_dec(&pvcc->inflight); + /* - * We don't really always want to do this since it's - * really inefficient - it would be much better if we could - * test if we had actually throttled the generic layer. - * Unfortunately then there would be a nasty SMP race where - * we could clear that flag just as we refuse another packet. - * For now we do the safe thing. + * We always used to run the wakeup tasklet unconditionally here, for + * fear of race conditions where we clear the BLOCKED flag just as we + * refuse another packet in pppoatm_send(). This was quite inefficient. + * + * In fact it's OK. The PPP core will only ever call pppoatm_send() + * while holding the channel->downl lock. And ppp_output_wakeup() as + * called by the tasklet will *also* grab that lock. So even if another + * CPU is in pppoatm_send() right now, the tasklet isn't going to race + * with it. The wakeup *will* happen after the other CPU is safely out + * of pppoatm_send() again. + * + * So if the CPU in pppoatm_send() has already set the BLOCKED bit and + * it about to return, that's fine. We trigger a wakeup which will + * happen later. And if the CPU in pppoatm_send() *hasn't* set the + * BLOCKED bit yet, that's fine too because of the double check in + * pppoatm_may_send() which is commented there. */ - tasklet_schedule(&pvcc->wakeup_tasklet); + if (test_and_clear_bit(BLOCKED, &pvcc->blocked)) + tasklet_schedule(&pvcc->wakeup_tasklet); } /* @@ -184,6 +211,51 @@ error: ppp_input_error(&pvcc->chan, 0); } +static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) +{ + /* + * It's not clear that we need to bother with using atm_may_send() + * to check we don't exceed sk->sk_sndbuf. If userspace sets a + * value of sk_sndbuf which is lower than the MTU, we're going to + * block for ever. But the code always did that before we introduced + * the packet count limit, so... + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT)) + return 1; + + /* + * We use test_and_set_bit() rather than set_bit() here because + * we need to ensure there's a memory barrier after it. The bit + * *must* be set before we do the atomic_inc() on pvcc->inflight. + * There's no smp_mb__after_set_bit(), so it's this or abuse + * smp_mb__after_clear_bit(). + */ + test_and_set_bit(BLOCKED, &pvcc->blocked); + + /* + * We may have raced with pppoatm_pop(). If it ran for the + * last packet in the queue, *just* before we set the BLOCKED + * bit, then it might never run again and the channel could + * remain permanently blocked. Cope with that race by checking + * *again*. If it did run in that window, we'll have space on + * the queue now and can return success. It's harmless to leave + * the BLOCKED flag set, since it's only used as a trigger to + * run the wakeup tasklet. Another wakeup will never hurt. + * If pppoatm_pop() is running but hasn't got as far as making + * space on the queue yet, then it hasn't checked the BLOCKED + * flag yet either, so we're safe in that case too. It'll issue + * an "immediate" wakeup... where "immediate" actually involves + * taking the PPP channel's ->downl lock, which is held by the + * code path that calls pppoatm_send(), and is thus going to + * wait for us to finish. + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero(&pvcc->inflight)) + return 1; + + return 0; +} /* * Called by the ppp_generic.c to send a packet - returns true if packet * was accepted. If we return false, then it's our job to call @@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) struct sk_buff *n; n = skb_realloc_headroom(skb, LLC_LEN); if (n != NULL && - !atm_may_send(pvcc->atmvcc, n->truesize)) { + !pppoatm_may_send(pvcc, n->truesize)) { kfree_skb(n); goto nospace; } @@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) skb = n; if (skb == NULL) return DROP_PACKET; - } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + } else if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); break; case e_vc: - if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; break; case e_autodetect: @@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) if (pvcc == NULL) return -ENOMEM; pvcc->atmvcc = atmvcc; + + /* Maximum is zero, so that we can use atomic_inc_not_zero() */ + atomic_set(&pvcc->inflight, NONE_INFLIGHT); pvcc->old_push = atmvcc->push; pvcc->old_pop = atmvcc->pop; pvcc->encaps = (enum pppoatm_encaps) be.encaps; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 509c8ac02b6..86767ca908a 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type, { struct sk_buff *skb; struct atmsvc_msg *msg; - static unsigned session = 0; + static unsigned int session = 0; pr_debug("%d (0x%p)\n", (int)type, vcc); while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL))) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9d9a6a3edbd..051f7abae66 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1990,7 +1990,6 @@ static int __init ax25_init(void) sock_register(&ax25_family_ops); dev_add_pack(&ax25_packet_type); register_netdevice_notifier(&ax25_dev_notifier); - ax25_register_sysctl(); proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); @@ -2013,7 +2012,6 @@ static void __exit ax25_exit(void) proc_net_remove(&init_net, "ax25_calls"); unregister_netdevice_notifier(&ax25_dev_notifier); - ax25_unregister_sysctl(); dev_remove_pack(&ax25_packet_type); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index d0de30e8959..3d106767b27 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -59,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev) return; } - ax25_unregister_sysctl(); - dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -90,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); - ax25_register_sysctl(); + ax25_register_dev_sysctl(ax25_dev); } void ax25_dev_device_down(struct net_device *dev) @@ -100,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev) if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; - ax25_unregister_sysctl(); + ax25_unregister_dev_sysctl(ax25_dev); spin_lock_bh(&ax25_dev_lock); @@ -120,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -130,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -138,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - - ax25_register_sysctl(); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 846ae4e2b11..67de6b33f2c 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -48,7 +48,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { unsigned char *buff; @@ -219,7 +219,7 @@ put: int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { return -AX25_HEADER_LEN; } diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index ebe0ef3f1d8..d5744b75251 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; #endif -static struct ctl_table_header *ax25_table_header; - -static ctl_table *ax25_table; -static int ax25_table_size; - -static struct ctl_path ax25_path[] = { - { .procname = "net", }, - { .procname = "ax25", }, - { } -}; - static const ctl_table ax25_param_table[] = { { .procname = "ip_default_mode", @@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = { { } /* that's all, folks! */ }; -void ax25_register_sysctl(void) +int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { - ax25_dev *ax25_dev; - int n, k; - - spin_lock_bh(&ax25_dev_lock); - for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) - ax25_table_size += sizeof(ctl_table); - - if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { - spin_unlock_bh(&ax25_dev_lock); - return; - } - - for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { - struct ctl_table *child = kmemdup(ax25_param_table, - sizeof(ax25_param_table), - GFP_ATOMIC); - if (!child) { - while (n--) - kfree(ax25_table[n].child); - kfree(ax25_table); - spin_unlock_bh(&ax25_dev_lock); - return; - } - ax25_table[n].child = ax25_dev->systable = child; - ax25_table[n].procname = ax25_dev->dev->name; - ax25_table[n].mode = 0555; - - - for (k = 0; k < AX25_MAX_VALUES; k++) - child[k].data = &ax25_dev->values[k]; - - n++; + char path[sizeof("net/ax25/") + IFNAMSIZ]; + int k; + struct ctl_table *table; + + table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (k = 0; k < AX25_MAX_VALUES; k++) + table[k].data = &ax25_dev->values[k]; + + snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name); + ax25_dev->sysheader = register_net_sysctl(&init_net, path, table); + if (!ax25_dev->sysheader) { + kfree(table); + return -ENOMEM; } - spin_unlock_bh(&ax25_dev_lock); - - ax25_table_header = register_sysctl_paths(ax25_path, ax25_table); + return 0; } -void ax25_unregister_sysctl(void) +void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) { - ctl_table *p; - unregister_sysctl_table(ax25_table_header); - - for (p = ax25_table; p->procname; p++) - kfree(p->child); - kfree(ax25_table); + struct ctl_table_header *header = ax25_dev->sysheader; + struct ctl_table *table; + + if (header) { + ax25_dev->sysheader = NULL; + table = header->ctl_table_arg; + unregister_net_sysctl_table(header); + kfree(table); + } } diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 2b68d068eaf..53f5244e28f 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -7,19 +7,28 @@ config BATMAN_ADV depends on NET select CRC16 default n - ---help--- + help + B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is + a routing protocol for multi-hop ad-hoc mesh networks. The + networks may be wired or wireless. See + http://www.open-mesh.org/ for more information and user space + tools. - B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is - a routing protocol for multi-hop ad-hoc mesh networks. The - networks may be wired or wireless. See - http://www.open-mesh.org/ for more information and user space - tools. +config BATMAN_ADV_BLA + bool "Bridge Loop Avoidance" + depends on BATMAN_ADV && INET + default y + help + This option enables BLA (Bridge Loop Avoidance), a mechanism + to avoid Ethernet frames looping when mesh nodes are connected + to both the same LAN and the same mesh. If you will never use + more than one mesh node in the same LAN, you can safely remove + this feature and save some space. config BATMAN_ADV_DEBUG bool "B.A.T.M.A.N. debugging" - depends on BATMAN_ADV != n - ---help--- - + depends on BATMAN_ADV + help This is an option for use by developers; most people should say N here. This enables compilation of support for outputting debugging information to the kernel log. The diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 4e392ebedb6..6d5c1940667 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bat_sysfs.o batman-adv-y += bitarray.o +batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index c3b0548b175..3b588f86d77 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -32,6 +32,7 @@ #include "soft-interface.h" #include "vis.h" #include "icmp_socket.h" +#include "bridge_loop_avoidance.h" static struct dentry *bat_debugfs; @@ -82,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) va_start(args, fmt); vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); - fdebug_log(bat_priv->debug_log, "[%10lu] %s", - (jiffies / HZ), tmp_log_buf); + fdebug_log(bat_priv->debug_log, "[%10u] %s", + jiffies_to_msecs(jiffies), tmp_log_buf); va_end(args); return 0; @@ -238,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file) return single_open(file, gw_client_seq_print_text, net_dev); } -static int softif_neigh_open(struct inode *inode, struct file *file) +static int transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, softif_neigh_seq_print_text, net_dev); + return single_open(file, tt_global_seq_print_text, net_dev); } -static int transtable_global_open(struct inode *inode, struct file *file) +#ifdef CONFIG_BATMAN_ADV_BLA +static int bla_claim_table_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, tt_global_seq_print_text, net_dev); + return single_open(file, bla_claim_table_seq_print_text, net_dev); } +#endif static int transtable_local_open(struct inode *inode, struct file *file) { @@ -282,16 +285,20 @@ struct bat_debuginfo bat_debuginfo_##_name = { \ static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); -static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); +#ifdef CONFIG_BATMAN_ADV_BLA +static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open); +#endif static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, - &bat_debuginfo_softif_neigh, &bat_debuginfo_transtable_global, +#ifdef CONFIG_BATMAN_ADV_BLA + &bat_debuginfo_bla_claim_table, +#endif &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, NULL, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a6d5d63fb6a..dc53798ebb4 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -30,33 +30,69 @@ #include "send.h" #include "bat_algo.h" -static void bat_iv_ogm_init(struct hard_iface *hard_iface) +static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + struct orig_node *orig_node, + struct orig_node *orig_neigh, + uint32_t seqno) +{ + struct neigh_node *neigh_node; + + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); + if (!neigh_node) + goto out; + + INIT_LIST_HEAD(&neigh_node->bonding_list); + + neigh_node->orig_node = orig_neigh; + neigh_node->if_incoming = hard_iface; + + spin_lock_bh(&orig_node->neigh_list_lock); + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + spin_unlock_bh(&orig_node->neigh_list_lock); + +out: + return neigh_node; +} + +static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; + uint32_t random_seqno; + int res = -1; - hard_iface->packet_len = BATMAN_OGM_LEN; + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); + atomic_set(&hard_iface->seqno, random_seqno); + + hard_iface->packet_len = BATMAN_OGM_HLEN; hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); + if (!hard_iface->packet_buff) + goto out; + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->header.packet_type = BAT_OGM; + batman_ogm_packet->header.packet_type = BAT_IV_OGM; batman_ogm_packet->header.version = COMPAT_VERSION; batman_ogm_packet->header.ttl = 2; batman_ogm_packet->flags = NO_FLAGS; batman_ogm_packet->tq = TQ_MAX_VALUE; batman_ogm_packet->tt_num_changes = 0; batman_ogm_packet->ttvn = 0; + + res = 0; + +out: + return res; } -static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) { - struct batman_ogm_packet *batman_ogm_packet; - - batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; - batman_ogm_packet->header.ttl = TTL; + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = NULL; } -static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -67,6 +103,15 @@ static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) hard_iface->net_dev->dev_addr, ETH_ALEN); } +static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; + batman_ogm_packet->header.ttl = TTL; +} + /* when do we schedule our own ogm to be sent */ static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) { @@ -92,7 +137,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, int tt_num_changes) { - int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); + int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); return (next_buff_pos <= packet_len) && (next_buff_pos <= MAX_AGGREGATION_BYTES); @@ -132,7 +177,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, "Sending own" : "Forwarding")); bat_dbg(DBG_BATMAN, bat_priv, - "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", + "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -142,7 +187,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batman_ogm_packet->ttvn, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); packet_num++; batman_ogm_packet = (struct batman_ogm_packet *) @@ -191,7 +236,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) /* FIXME: what about aggregated packets ? */ bat_dbg(DBG_BATMAN, bat_priv, - "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n", + "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", (forw_packet->own ? "Sending own" : "Forwarding"), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -335,10 +380,9 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, if ((atomic_read(&bat_priv->aggregated_ogms)) && (packet_len < MAX_AGGREGATION_BYTES)) forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + - sizeof(struct ethhdr)); + ETH_HLEN); else - forw_packet_aggr->skb = dev_alloc_skb(packet_len + - sizeof(struct ethhdr)); + forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!forw_packet_aggr->skb) { if (!own_packet) @@ -346,7 +390,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, kfree(forw_packet_aggr); goto out; } - skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); + skb_reserve(forw_packet_aggr->skb, ETH_HLEN); INIT_HLIST_NODE(&forw_packet_aggr->list); @@ -461,11 +505,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, static void bat_iv_ogm_forward(struct orig_node *orig_node, const struct ethhdr *ethhdr, struct batman_ogm_packet *batman_ogm_packet, - int directlink, struct hard_iface *if_incoming) + bool is_single_hop_neigh, + bool is_from_best_next_hop, + struct hard_iface *if_incoming) { struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); - struct neigh_node *router; - uint8_t in_tq, in_ttl, tq_avg = 0; uint8_t tt_num_changes; if (batman_ogm_packet->header.ttl <= 1) { @@ -473,54 +517,43 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, return; } - router = orig_node_get_router(orig_node); + if (!is_from_best_next_hop) { + /* Mark the forwarded packet when it is not coming from our + * best next hop. We still need to forward the packet for our + * neighbor link quality detection to work in case the packet + * originated from a single hop neighbor. Otherwise we can + * simply drop the ogm. + */ + if (is_single_hop_neigh) + batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; + else + return; + } - in_tq = batman_ogm_packet->tq; - in_ttl = batman_ogm_packet->header.ttl; tt_num_changes = batman_ogm_packet->tt_num_changes; batman_ogm_packet->header.ttl--; memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); - /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast - * of our best tq value */ - if (router && router->tq_avg != 0) { - - /* rebroadcast ogm of best ranking neighbor as is */ - if (!compare_eth(router->addr, ethhdr->h_source)) { - batman_ogm_packet->tq = router->tq_avg; - - if (router->last_ttl) - batman_ogm_packet->header.ttl = - router->last_ttl - 1; - } - - tq_avg = router->tq_avg; - } - - if (router) - neigh_node_free_ref(router); - /* apply hop penalty */ batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); bat_dbg(DBG_BATMAN, bat_priv, - "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", - in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1, - batman_ogm_packet->header.ttl); + "Forwarding packet: tq: %i, ttl: %i\n", + batman_ogm_packet->tq, batman_ogm_packet->header.ttl); batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); /* switch of primaries first hop flag when forwarding */ batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; - if (directlink) + if (is_single_hop_neigh) batman_ogm_packet->flags |= DIRECTLINK; else batman_ogm_packet->flags &= ~DIRECTLINK; bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, - BATMAN_OGM_LEN + tt_len(tt_num_changes), + BATMAN_OGM_HLEN + tt_len(tt_num_changes), if_incoming, 0, bat_iv_ogm_fwd_send_time()); } @@ -603,12 +636,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (is_duplicate) continue; - spin_lock_bh(&tmp_neigh_node->tq_lock); + spin_lock_bh(&tmp_neigh_node->lq_update_lock); ring_buffer_set(tmp_neigh_node->tq_recv, &tmp_neigh_node->tq_index, 0); tmp_neigh_node->tq_avg = ring_buffer_avg(tmp_neigh_node->tq_recv); - spin_unlock_bh(&tmp_neigh_node->tq_lock); + spin_unlock_bh(&tmp_neigh_node->lq_update_lock); } if (!neigh_node) { @@ -618,8 +651,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (!orig_tmp) goto unlock; - neigh_node = create_neighbor(orig_node, orig_tmp, - ethhdr->h_source, if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, + orig_node, orig_tmp, + batman_ogm_packet->seqno); orig_node_free_ref(orig_tmp); if (!neigh_node) @@ -631,14 +665,14 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, rcu_read_unlock(); orig_node->flags = batman_ogm_packet->flags; - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; - spin_lock_bh(&neigh_node->tq_lock); + spin_lock_bh(&neigh_node->lq_update_lock); ring_buffer_set(neigh_node->tq_recv, &neigh_node->tq_index, batman_ogm_packet->tq); neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); - spin_unlock_bh(&neigh_node->tq_lock); + spin_unlock_bh(&neigh_node->lq_update_lock); if (!is_duplicate) { orig_node->last_ttl = batman_ogm_packet->header.ttl; @@ -744,19 +778,20 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, rcu_read_unlock(); if (!neigh_node) - neigh_node = create_neighbor(orig_neigh_node, - orig_neigh_node, - orig_neigh_node->orig, - if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, + orig_neigh_node->orig, + orig_neigh_node, + orig_neigh_node, + batman_ogm_packet->seqno); if (!neigh_node) goto out; - /* if orig_node is direct neighbor update neigh_node last_valid */ + /* if orig_node is direct neighbor update neigh_node last_seen */ if (orig_node == orig_neigh_node) - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; - orig_node->last_valid = jiffies; + orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ spin_lock_bh(&orig_node->ogm_cnt_lock); @@ -842,7 +877,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; /* signalize caller that the packet is to be dropped. */ - if (window_protected(bat_priv, seq_diff, + if (!hlist_empty(&orig_node->neigh_list) && + window_protected(bat_priv, seq_diff, &orig_node->batman_seqno_reset)) goto out; @@ -850,9 +886,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, hlist_for_each_entry_rcu(tmp_neigh_node, node, &orig_node->neigh_list, list) { - is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - batman_ogm_packet->seqno); + is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_ogm_packet->seqno); if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && (tmp_neigh_node->if_incoming == if_incoming)) @@ -866,13 +902,14 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff, set_mark); tmp_neigh_node->real_packet_count = - bit_packet_count(tmp_neigh_node->real_bits); + bitmap_weight(tmp_neigh_node->real_bits, + TQ_LOCAL_WINDOW_SIZE); } rcu_read_unlock(); if (need_update) { bat_dbg(DBG_BATMAN, bat_priv, - "updating last_seqno: old %d, new %d\n", + "updating last_seqno: old %u, new %u\n", orig_node->last_real_seqno, batman_ogm_packet->seqno); orig_node->last_real_seqno = batman_ogm_packet->seqno; } @@ -897,7 +934,9 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, struct neigh_node *orig_neigh_router = NULL; int has_directlink_flag; int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; - int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + int is_broadcast = 0, is_bidirectional; + bool is_single_hop_neigh = false; + bool is_from_best_next_hop = false; int is_duplicate; uint32_t if_incoming_seqno; @@ -913,7 +952,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, * packet in an aggregation. Here we expect that the padding * is always zero (or not 0x01) */ - if (batman_ogm_packet->header.packet_type != BAT_OGM) + if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) return; /* could be changed by schedule_own_packet() */ @@ -921,11 +960,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); - is_single_hop_neigh = (compare_eth(ethhdr->h_source, - batman_ogm_packet->orig) ? 1 : 0); + if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) + is_single_hop_neigh = true; bat_dbg(DBG_BATMAN, bat_priv, - "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", + "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, @@ -998,11 +1037,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); word = &(orig_neigh_node->bcast_own[offset]); - bit_mark(word, - if_incoming_seqno - + bat_set_bit(word, + if_incoming_seqno - batman_ogm_packet->seqno - 2); orig_neigh_node->bcast_own_sum[if_incoming->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); } @@ -1019,6 +1058,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, return; } + if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", + ethhdr->h_source); + return; + } + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); if (!orig_node) return; @@ -1043,6 +1089,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, if (router) router_router = orig_node_get_router(router->orig_node); + if ((router && router->tq_avg != 0) && + (compare_eth(router->addr, ethhdr->h_source))) + is_from_best_next_hop = true; + /* avoid temporary routing loops */ if (router && router_router && (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && @@ -1093,7 +1143,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, /* mark direct link on incoming interface */ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 1, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); @@ -1116,7 +1167,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 0, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) @@ -1132,13 +1184,25 @@ out: orig_node_free_ref(orig_node); } -static void bat_iv_ogm_receive(struct hard_iface *if_incoming, - struct sk_buff *skb) +static int bat_iv_ogm_receive(struct sk_buff *skb, + struct hard_iface *if_incoming) { + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batman_ogm_packet *batman_ogm_packet; struct ethhdr *ethhdr; int buff_pos = 0, packet_len; unsigned char *tt_buff, *packet_buff; + bool ret; + + ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); + if (!ret) + return NET_RX_DROP; + + /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface + * that does not have B.A.T.M.A.N. IV enabled ? + */ + if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) + return NET_RX_DROP; packet_len = skb_headlen(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -1152,31 +1216,50 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); - tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; + tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN; bat_iv_ogm_process(ethhdr, batman_ogm_packet, tt_buff, if_incoming); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); batman_ogm_packet = (struct batman_ogm_packet *) (packet_buff + buff_pos); } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, batman_ogm_packet->tt_num_changes)); + + kfree_skb(skb); + return NET_RX_SUCCESS; } static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", - .bat_ogm_init = bat_iv_ogm_init, - .bat_ogm_init_primary = bat_iv_ogm_init_primary, - .bat_ogm_update_mac = bat_iv_ogm_update_mac, + .bat_iface_enable = bat_iv_ogm_iface_enable, + .bat_iface_disable = bat_iv_ogm_iface_disable, + .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, + .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, .bat_ogm_schedule = bat_iv_ogm_schedule, .bat_ogm_emit = bat_iv_ogm_emit, - .bat_ogm_receive = bat_iv_ogm_receive, }; int __init bat_iv_init(void) { - return bat_algo_register(&batman_iv); + int ret; + + /* batman originator packet */ + ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); + if (ret < 0) + goto out; + + ret = bat_algo_register(&batman_iv); + if (ret < 0) + goto handler_unregister; + + goto out; + +handler_unregister: + recv_handler_unregister(BAT_IV_OGM); +out: + return ret; } diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 68ff759fc30..5bc7b66d32d 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \ .store = _store, \ }; -#define BAT_ATTR_STORE_BOOL(_name, _post_func) \ +#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ char *buff, size_t count) \ { \ @@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_BOOL(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_BOOL(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%s\n", \ @@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ "disabled" : "enabled"); \ } \ -/* Use this, if you are going to turn a [name] in bat_priv on or off */ -#define BAT_ATTR_BOOL(_name, _mode, _post_func) \ - static BAT_ATTR_STORE_BOOL(_name, _post_func) \ - static BAT_ATTR_SHOW_BOOL(_name) \ +/* Use this, if you are going to turn a [name] in the soft-interface + * (bat_priv) on or off */ +#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \ + static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ + static BAT_ATTR_SIF_SHOW_BOOL(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) -#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ +#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff, size_t count) \ + char *buff, size_t count) \ { \ struct net_device *net_dev = kobj_to_netdev(kobj); \ struct bat_priv *bat_priv = netdev_priv(net_dev); \ @@ -100,19 +101,62 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ attr, &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_UINT(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ } \ -/* Use this, if you are going to set [name] in bat_priv to unsigned integer - * values only */ -#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \ - static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ - static BAT_ATTR_SHOW_UINT(_name) \ +/* Use this, if you are going to set [name] in the soft-interface + * (bat_priv) to an unsigned integer value */ +#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_SIF_SHOW_UINT(_name) \ + static BAT_ATTR(_name, _mode, show_##_name, store_##_name) + + +#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ +ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff, size_t count) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = __store_uint_attr(buff, count, _min, _max, _post_func, \ + attr, &hard_iface->_name, net_dev); \ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +#define BAT_ATTR_HIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +/* Use this, if you are going to set [name] in hard_iface to an + * unsigned integer value*/ +#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_HIF_SHOW_UINT(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) @@ -149,7 +193,7 @@ static int store_bool_attr(char *buff, size_t count, atomic_read(attr) == 1 ? "enabled" : "disabled", enabled == 1 ? "enabled" : "disabled"); - atomic_set(attr, (unsigned)enabled); + atomic_set(attr, (unsigned int)enabled); return count; } @@ -268,7 +312,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? "client" : "server"); - atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); + atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp); return count; } @@ -354,7 +398,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr, curr_gw_mode_str, buff); gw_deselect(bat_priv); - atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); + atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); return count; } @@ -384,26 +428,32 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, return gw_bandwidth_set(net_dev, buff, count); } -BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); -BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); -BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); -BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +#ifdef CONFIG_BATMAN_ADV_BLA +BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); +#endif +BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); +BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL); static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); -BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); -BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); -BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, - post_gw_deselect); +BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); +BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); +BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, + post_gw_deselect); static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_DEBUG -BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); +BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); #endif static struct bat_attribute *mesh_attrs[] = { &bat_attr_aggregated_ogms, &bat_attr_bonding, +#ifdef CONFIG_BATMAN_ADV_BLA + &bat_attr_bridge_loop_avoidance, +#endif &bat_attr_fragmentation, &bat_attr_ap_isolation, &bat_attr_vis_mode, diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 6d0aa216b23..07ae6e1b8ac 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -24,100 +24,13 @@ #include <linux/bitops.h> -/* returns true if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno) -{ - int32_t diff, word_offset, word_num; - - diff = last_seqno - curr_seqno; - if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) { - return 0; - } else { - /* which word */ - word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE; - - if (test_bit(word_offset, &seq_bits[word_num])) - return 1; - else - return 0; - } -} - -/* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n) -{ - int32_t word_offset, word_num; - - /* if too old, just drop it */ - if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) - return; - - /* which word */ - word_num = n / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = n % WORD_BIT_SIZE; - - set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */ -} - /* shift the packet array by n places. */ -static void bit_shift(unsigned long *seq_bits, int32_t n) +static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) { - int32_t word_offset, word_num; - int32_t i; - if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) return; - word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ - word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */ - - for (i = NUM_WORDS - 1; i > word_num; i--) { - /* going from old to new, so we don't overwrite the data we copy - * from. - * - * left is high, right is low: FEDC BA98 7654 3210 - * ^^ ^^ - * vvvv - * ^^^^ = from, vvvvv =to, we'd have word_num==1 and - * word_offset==WORD_BIT_SIZE/2 ????? in this example. - * (=24 bits) - * - * our desired output would be: 9876 5432 1000 0000 - * */ - - seq_bits[i] = - (seq_bits[i - word_num] << word_offset) + - /* take the lower port from the left half, shift it left - * to its final position */ - (seq_bits[i - word_num - 1] >> - (WORD_BIT_SIZE-word_offset)); - /* and the upper part of the right half and shift it left to - * its position */ - /* for our example that would be: word[0] = 9800 + 0076 = - * 9876 */ - } - /* now for our last word, i==word_num, we only have its "left" half. - * that's the 1000 word in our example.*/ - - seq_bits[i] = (seq_bits[i - word_num] << word_offset); - - /* pad the rest with 0, if there is anything */ - i--; - - for (; i >= 0; i--) - seq_bits[i] = 0; -} - -static void bit_reset_window(unsigned long *seq_bits) -{ - int i; - for (i = 0; i < NUM_WORDS; i++) - seq_bits[i] = 0; + bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE); } @@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { if (set_mark) - bit_mark(seq_bits, -seq_num_diff); + bat_set_bit(seq_bits, -seq_num_diff); return 0; } @@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, * set the mark if required */ if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { - bit_shift(seq_bits, seq_num_diff); + bat_bitmap_shift_left(seq_bits, seq_num_diff); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "We missed a lot of packets (%i) !\n", seq_num_diff - 1); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "Other host probably restarted!\n"); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, /* never reached */ return 0; } - -/* count the hamming weight, how many good packets did we receive? just count - * the 1's. - */ -int bit_packet_count(const unsigned long *seq_bits) -{ - int i, hamming = 0; - - for (i = 0; i < NUM_WORDS; i++) - hamming += hweight_long(seq_bits[i]); - - return hamming; -} diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index c6135728a68..1835c15cda4 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -22,23 +22,33 @@ #ifndef _NET_BATMAN_ADV_BITARRAY_H_ #define _NET_BATMAN_ADV_BITARRAY_H_ -#define WORD_BIT_SIZE (sizeof(unsigned long) * 8) - /* returns true if the corresponding bit in the given seq_bits indicates true * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno); +static inline int bat_test_bit(const unsigned long *seq_bits, + uint32_t last_seqno, uint32_t curr_seqno) +{ + int32_t diff; + + diff = last_seqno - curr_seqno; + if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) + return 0; + else + return test_bit(diff, seq_bits); +} /* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n); +static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) +{ + /* if too old, just drop it */ + if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) + return; + set_bit(n, seq_bits); /* turn the position on */ +} /* receive and process one packet, returns 1 if received seq_num is considered * new, 0 if old */ int bit_get_packet(void *priv, unsigned long *seq_bits, int32_t seq_num_diff, int set_mark); -/* count the hamming weight, how many good packets did we receive? */ -int bit_packet_count(const unsigned long *seq_bits); - #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c new file mode 100644 index 00000000000..8bf97515a77 --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -0,0 +1,1580 @@ +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hash.h" +#include "hard-interface.h" +#include "originator.h" +#include "bridge_loop_avoidance.h" +#include "translation-table.h" +#include "send.h" + +#include <linux/etherdevice.h> +#include <linux/crc16.h> +#include <linux/if_arp.h> +#include <net/arp.h> +#include <linux/if_vlan.h> + +static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; + +static void bla_periodic_work(struct work_struct *work); +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw); + +/* return the index of the claim */ +static inline uint32_t choose_claim(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +/* return the index of the backbone gateway */ +static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + + +/* compares address and vid of two backbone gws */ +static int compare_backbone_gw(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct backbone_gw, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* compares address and vid of two claims */ +static int compare_claim(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct claim, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* free a backbone gw */ +static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) +{ + if (atomic_dec_and_test(&backbone_gw->refcount)) + kfree_rcu(backbone_gw, rcu); +} + +/* finally deinitialize the claim */ +static void claim_free_rcu(struct rcu_head *rcu) +{ + struct claim *claim; + + claim = container_of(rcu, struct claim, rcu); + + backbone_gw_free_ref(claim->backbone_gw); + kfree(claim); +} + +/* free a claim, call claim_free_rcu if its the last reference */ +static void claim_free_ref(struct claim *claim) +{ + if (atomic_dec_and_test(&claim->refcount)) + call_rcu(&claim->rcu, claim_free_rcu); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @data: search data (may be local/static data) + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct claim *claim_hash_find(struct bat_priv *bat_priv, + struct claim *data) +{ + struct hashtable_t *hash = bat_priv->claim_hash; + struct hlist_head *head; + struct hlist_node *node; + struct claim *claim; + struct claim *claim_tmp = NULL; + int index; + + if (!hash) + return NULL; + + index = choose_claim(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (!compare_claim(&claim->hash_entry, data)) + continue; + + if (!atomic_inc_not_zero(&claim->refcount)) + continue; + + claim_tmp = claim; + break; + } + rcu_read_unlock(); + + return claim_tmp; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @addr: the address of the originator + * @vid: the VLAN ID + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, + uint8_t *addr, short vid) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw search_entry, *backbone_gw; + struct backbone_gw *backbone_gw_tmp = NULL; + int index; + + if (!hash) + return NULL; + + memcpy(search_entry.orig, addr, ETH_ALEN); + search_entry.vid = vid; + + index = choose_backbone_gw(&search_entry, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_backbone_gw(&backbone_gw->hash_entry, + &search_entry)) + continue; + + if (!atomic_inc_not_zero(&backbone_gw->refcount)) + continue; + + backbone_gw_tmp = backbone_gw; + break; + } + rcu_read_unlock(); + + return backbone_gw_tmp; +} + +/* delete all claims for a backbone */ +static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) +{ + struct hashtable_t *hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct claim *claim; + int i; + spinlock_t *list_lock; /* protects write access to the hash lists */ + + hash = backbone_gw->bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(claim, node, node_tmp, + head, hash_entry) { + + if (claim->backbone_gw != backbone_gw) + continue; + + claim_free_ref(claim); + hlist_del_rcu(node); + } + spin_unlock_bh(list_lock); + } + + /* all claims gone, intialize CRC */ + backbone_gw->crc = BLA_CRC_INIT; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address to be announced within the claim + * @vid: the VLAN ID + * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) + * + * sends a claim frame according to the provided info. + */ +static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, + short vid, int claimtype) +{ + struct sk_buff *skb; + struct ethhdr *ethhdr; + struct hard_iface *primary_if; + struct net_device *soft_iface; + uint8_t *hw_src; + struct bla_claim_dst local_claim_dest; + uint32_t zeroip = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + return; + + memcpy(&local_claim_dest, &bat_priv->claim_dest, + sizeof(local_claim_dest)); + local_claim_dest.type = claimtype; + + soft_iface = primary_if->soft_iface; + + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, + /* IP DST: 0.0.0.0 */ + zeroip, + primary_if->soft_iface, + /* IP SRC: 0.0.0.0 */ + zeroip, + /* Ethernet DST: Broadcast */ + NULL, + /* Ethernet SRC/HW SRC: originator mac */ + primary_if->net_dev->dev_addr, + /* HW DST: FF:43:05:XX:00:00 + * with XX = claim type + * and YY:YY = group id + */ + (uint8_t *)&local_claim_dest); + + if (!skb) + goto out; + + ethhdr = (struct ethhdr *)skb->data; + hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); + + /* now we pretend that the client would have sent this ... */ + switch (claimtype) { + case CLAIM_TYPE_ADD: + /* normal claim frame + * set Ethernet SRC to the clients mac + */ + memcpy(ethhdr->h_source, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_DEL: + /* unclaim frame + * set HW SRC to the clients mac + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_ANNOUNCE: + /* announcement frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", + ethhdr->h_source, vid); + break; + case CLAIM_TYPE_REQUEST: + /* request frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + memcpy(ethhdr->h_dest, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", + ethhdr->h_source, ethhdr->h_dest, vid); + break; + + } + + if (vid != -1) + skb = vlan_insert_tag(skb, vid); + + skb_reset_mac_header(skb); + skb->protocol = eth_type_trans(skb, soft_iface); + bat_priv->stats.rx_packets++; + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; + soft_iface->last_rx = jiffies; + + netif_rx(skb); +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address of the originator + * @vid: the VLAN ID + * + * searches for the backbone gw or creates a new one if it could not + * be found. + */ +static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, + uint8_t *orig, short vid) +{ + struct backbone_gw *entry; + struct orig_node *orig_node; + int hash_added; + + entry = backbone_hash_find(bat_priv, orig, vid); + + if (entry) + return entry; + + bat_dbg(DBG_BLA, bat_priv, + "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", + orig, vid); + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; + + entry->vid = vid; + entry->lasttime = jiffies; + entry->crc = BLA_CRC_INIT; + entry->bat_priv = bat_priv; + atomic_set(&entry->request_sent, 0); + memcpy(entry->orig, orig, ETH_ALEN); + + /* one for the hash, one for returning */ + atomic_set(&entry->refcount, 2); + + hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, + choose_backbone_gw, entry, &entry->hash_entry); + + if (unlikely(hash_added != 0)) { + /* hash failed, free the structure */ + kfree(entry); + return NULL; + } + + /* this is a gateway now, remove any tt entries */ + orig_node = orig_hash_find(bat_priv, orig); + if (orig_node) { + tt_global_del_orig(bat_priv, orig_node, + "became a backbone gateway"); + orig_node_free_ref(orig_node); + } + return entry; +} + +/* update or add the own backbone gw to make sure we announce + * where we receive other backbone gws + */ +static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + short vid) +{ + struct backbone_gw *backbone_gw; + + backbone_gw = bla_get_backbone_gw(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (unlikely(!backbone_gw)) + return; + + backbone_gw->lasttime = jiffies; + backbone_gw_free_ref(backbone_gw); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @vid: the vid where the request came on + * + * Repeat all of our own claims, and finally send an ANNOUNCE frame + * to allow the requester another check if the CRC is correct now. + */ +static void bla_answer_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, short vid) +{ + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + struct claim *claim; + struct backbone_gw *backbone_gw; + int i; + + bat_dbg(DBG_BLA, bat_priv, + "bla_answer_request(): received a claim request, send all of our own claims again\n"); + + backbone_gw = backbone_hash_find(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (!backbone_gw) + return; + + hash = bat_priv->claim_hash; + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + /* only own claims are interesting */ + if (claim->backbone_gw != backbone_gw) + continue; + + bla_send_claim(bat_priv, claim->addr, claim->vid, + CLAIM_TYPE_ADD); + } + rcu_read_unlock(); + } + + /* finally, send an announcement frame */ + bla_send_announce(bat_priv, backbone_gw); + backbone_gw_free_ref(backbone_gw); +} + +/** + * @backbone_gw: the backbone gateway from whom we are out of sync + * + * When the crc is wrong, ask the backbone gateway for a full table update. + * After the request, it will repeat all of his own claims and finally + * send an announcement claim with which we can check again. + */ +static void bla_send_request(struct backbone_gw *backbone_gw) +{ + /* first, remove all old entries */ + bla_del_backbone_claims(backbone_gw); + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "Sending REQUEST to %pM\n", + backbone_gw->orig); + + /* send request */ + bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, + backbone_gw->vid, CLAIM_TYPE_REQUEST); + + /* no local broadcasts should be sent or received, for now. */ + if (!atomic_read(&backbone_gw->request_sent)) { + atomic_inc(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 1); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @backbone_gw: our backbone gateway which should be announced + * + * This function sends an announcement. It is called from multiple + * places. + */ +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw) +{ + uint8_t mac[ETH_ALEN]; + uint16_t crc; + + memcpy(mac, announce_mac, 4); + crc = htons(backbone_gw->crc); + memcpy(&mac[4], (uint8_t *)&crc, 2); + + bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); + +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @mac: the mac address of the claim + * @vid: the VLAN ID of the frame + * @backbone_gw: the backbone gateway which claims it + * + * Adds a claim in the claim hash. + */ +static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid, struct backbone_gw *backbone_gw) +{ + struct claim *claim; + struct claim search_claim; + int hash_added; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + /* create a new claim entry if it does not exist yet. */ + if (!claim) { + claim = kzalloc(sizeof(*claim), GFP_ATOMIC); + if (!claim) + return; + + memcpy(claim->addr, mac, ETH_ALEN); + claim->vid = vid; + claim->lasttime = jiffies; + claim->backbone_gw = backbone_gw; + + atomic_set(&claim->refcount, 2); + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", + mac, vid); + hash_added = hash_add(bat_priv->claim_hash, compare_claim, + choose_claim, claim, &claim->hash_entry); + + if (unlikely(hash_added != 0)) { + /* only local changes happened. */ + kfree(claim); + return; + } + } else { + claim->lasttime = jiffies; + if (claim->backbone_gw == backbone_gw) + /* no need to register a new backbone */ + goto claim_free_ref; + + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): changing ownership for %pM, vid %d\n", + mac, vid); + + claim->backbone_gw->crc ^= + crc16(0, claim->addr, ETH_ALEN); + backbone_gw_free_ref(claim->backbone_gw); + + } + /* set (new) backbone gw */ + atomic_inc(&backbone_gw->refcount); + claim->backbone_gw = backbone_gw; + + backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + backbone_gw->lasttime = jiffies; + +claim_free_ref: + claim_free_ref(claim); +} + +/* Delete a claim from the claim hash which has the + * given mac address and vid. + */ +static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid) +{ + struct claim search_claim, *claim; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + if (!claim) + return; + + bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); + + hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); + claim_free_ref(claim); /* reference from the hash is gone */ + + claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + + /* don't need the reference from hash_find() anymore */ + claim_free_ref(claim); +} + +/* check for ANNOUNCE frame, return 1 if handled */ +static int handle_announce(struct bat_priv *bat_priv, + uint8_t *an_addr, uint8_t *backbone_addr, short vid) +{ + struct backbone_gw *backbone_gw; + uint16_t crc; + + if (memcmp(an_addr, announce_mac, 4) != 0) + return 0; + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + + /* handle as ANNOUNCE frame */ + backbone_gw->lasttime = jiffies; + crc = ntohs(*((uint16_t *)(&an_addr[4]))); + + bat_dbg(DBG_BLA, bat_priv, + "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", + vid, backbone_gw->orig, crc); + + if (backbone_gw->crc != crc) { + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", + backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, + crc); + + bla_send_request(backbone_gw); + } else { + /* if we have sent a request and the crc was OK, + * we can allow traffic again. + */ + if (atomic_read(&backbone_gw->request_sent)) { + atomic_dec(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 0); + } + } + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for REQUEST frame, return 1 if handled */ +static int handle_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + struct ethhdr *ethhdr, short vid) +{ + /* check for REQUEST frame */ + if (!compare_eth(backbone_addr, ethhdr->h_dest)) + return 0; + + /* sanity check, this should not happen on a normal switch, + * we ignore it in this case. + */ + if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) + return 1; + + bat_dbg(DBG_BLA, bat_priv, + "handle_request(): REQUEST vid %d (sent by %pM)...\n", + vid, ethhdr->h_source); + + bla_answer_request(bat_priv, primary_if, vid); + return 1; +} + +/* check for UNCLAIM frame, return 1 if handled */ +static int handle_unclaim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* unclaim in any case if it is our own */ + if (primary_if && compare_eth(backbone_addr, + primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); + + backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); + + if (!backbone_gw) + return 1; + + /* this must be an UNCLAIM frame */ + bat_dbg(DBG_BLA, bat_priv, + "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", + claim_addr, vid, backbone_gw->orig); + + bla_del_claim(bat_priv, claim_addr, vid); + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for CLAIM frame, return 1 if handled */ +static int handle_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* register the gateway if not yet available, and add the claim. */ + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + /* this must be a CLAIM frame */ + bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); + + /* TODO: we could call something like tt_local_del() here. */ + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @hw_src: the Hardware source in the ARP Header + * @hw_dst: the Hardware destination in the ARP Header + * @ethhdr: pointer to the Ethernet header of the claim frame + * + * checks if it is a claim packet and if its on the same group. + * This function also applies the group ID of the sender + * if it is in the same mesh. + * + * returns: + * 2 - if it is a claim packet and on the same group + * 1 - if is a claim packet from another group + * 0 - if it is not a claim packet + */ +static int check_claim_group(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *hw_src, uint8_t *hw_dst, + struct ethhdr *ethhdr) +{ + uint8_t *backbone_addr; + struct orig_node *orig_node; + struct bla_claim_dst *bla_dst, *bla_dst_own; + + bla_dst = (struct bla_claim_dst *)hw_dst; + bla_dst_own = &bat_priv->claim_dest; + + /* check if it is a claim packet in general */ + if (memcmp(bla_dst->magic, bla_dst_own->magic, + sizeof(bla_dst->magic)) != 0) + return 0; + + /* if announcement packet, use the source, + * otherwise assume it is in the hw_src + */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + backbone_addr = hw_src; + break; + case CLAIM_TYPE_REQUEST: + case CLAIM_TYPE_ANNOUNCE: + case CLAIM_TYPE_DEL: + backbone_addr = ethhdr->h_source; + break; + default: + return 0; + } + + /* don't accept claim frames from ourselves */ + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + return 0; + + /* if its already the same group, it is fine. */ + if (bla_dst->group == bla_dst_own->group) + return 2; + + /* lets see if this originator is in our mesh */ + orig_node = orig_hash_find(bat_priv, backbone_addr); + + /* dont accept claims from gateways which are not in + * the same mesh or group. + */ + if (!orig_node) + return 1; + + /* if our mesh friends mac is bigger, use it for ourselves. */ + if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { + bat_dbg(DBG_BLA, bat_priv, + "taking other backbones claim group: %04x\n", + ntohs(bla_dst->group)); + bla_dst_own->group = bla_dst->group; + } + + orig_node_free_ref(orig_node); + + return 2; +} + + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * + * Check if this is a claim frame, and process it accordingly. + * + * returns 1 if it was a claim frame, otherwise return 0 to + * tell the callee that it can use the frame on its own. + */ +static int bla_process_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct sk_buff *skb) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct arphdr *arphdr; + uint8_t *hw_src, *hw_dst; + struct bla_claim_dst *bla_dst; + uint16_t proto; + int headlen; + short vid = -1; + int ret; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + vhdr = (struct vlan_ethhdr *)ethhdr; + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + proto = ntohs(vhdr->h_vlan_encapsulated_proto); + headlen = sizeof(*vhdr); + } else { + proto = ntohs(ethhdr->h_proto); + headlen = ETH_HLEN; + } + + if (proto != ETH_P_ARP) + return 0; /* not a claim frame */ + + /* this must be a ARP frame. check if it is a claim. */ + + if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) + return 0; + + /* pskb_may_pull() may have modified the pointers, get ethhdr again */ + ethhdr = (struct ethhdr *)skb_mac_header(skb); + arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); + + /* Check whether the ARP frame carries a valid + * IP information + */ + + if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) + return 0; + if (arphdr->ar_pro != htons(ETH_P_IP)) + return 0; + if (arphdr->ar_hln != ETH_ALEN) + return 0; + if (arphdr->ar_pln != 4) + return 0; + + hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); + hw_dst = hw_src + ETH_ALEN + 4; + bla_dst = (struct bla_claim_dst *)hw_dst; + + /* check if it is a claim frame. */ + ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); + if (ret == 1) + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + + if (ret < 2) + return ret; + + /* become a backbone gw ourselves on this vlan if not happened yet */ + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + + /* check for the different types of claim frames ... */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + if (handle_claim(bat_priv, primary_if, hw_src, + ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_DEL: + if (handle_unclaim(bat_priv, primary_if, + ethhdr->h_source, hw_src, vid)) + return 1; + break; + + case CLAIM_TYPE_ANNOUNCE: + if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_REQUEST: + if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) + return 1; + break; + } + + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + return 1; +} + +/* Check when we last heard from other nodes, and remove them in case of + * a time out, or clean all backbone gws if now is set. + */ +static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct hashtable_t *hash; + spinlock_t *list_lock; /* protects write access to the hash lists */ + int i; + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(backbone_gw, node, node_tmp, + head, hash_entry) { + if (now) + goto purge_now; + if (!has_timed_out(backbone_gw->lasttime, + BLA_BACKBONE_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "bla_purge_backbone_gw(): backbone gw %pM timed out\n", + backbone_gw->orig); + +purge_now: + /* don't wait for the pending request anymore */ + if (atomic_read(&backbone_gw->request_sent)) + atomic_dec(&bat_priv->bla_num_requests); + + bla_del_backbone_claims(backbone_gw); + + hlist_del_rcu(node); + backbone_gw_free_ref(backbone_gw); + } + spin_unlock_bh(list_lock); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the selected primary interface, may be NULL if now is set + * @now: whether the whole hash shall be wiped now + * + * Check when we heard last time from our own claims, and remove them in case of + * a time out, or clean all claims if now is set + */ +static void bla_purge_claims(struct bat_priv *bat_priv, + struct hard_iface *primary_if, int now) +{ + struct claim *claim; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + hash = bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (now) + goto purge_now; + if (!compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + if (!has_timed_out(claim->lasttime, + BLA_CLAIM_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, bat_priv, + "bla_purge_claims(): %pM, vid %d, time out\n", + claim->addr, claim->vid); + +purge_now: + handle_unclaim(bat_priv, primary_if, + claim->backbone_gw->orig, + claim->addr, claim->vid); + } + rcu_read_unlock(); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the new selected primary_if + * @oldif: the old primary interface, may be NULL + * + * Update the backbone gateways when the own orig address changes. + * + */ +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + /* reset bridge loop avoidance group id */ + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); + + if (!oldif) { + bla_purge_claims(bat_priv, NULL, 1); + bla_purge_backbone_gw(bat_priv, 1); + return; + } + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + /* own orig still holds the old value. */ + if (!compare_eth(backbone_gw->orig, + oldif->net_dev->dev_addr)) + continue; + + memcpy(backbone_gw->orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + /* send an announce frame so others will ask for our + * claims and update their tables. + */ + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +} + + + +/* (re)start the timer */ +static void bla_start_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); + queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, + msecs_to_jiffies(BLA_PERIOD_LENGTH)); +} + +/* periodic work to do: + * * purge structures when they are too old + * * send announcements + */ +static void bla_periodic_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, bla_work); + struct hlist_node *node; + struct hlist_head *head; + struct backbone_gw *backbone_gw; + struct hashtable_t *hash; + struct hard_iface *primary_if; + int i; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + bla_purge_claims(bat_priv, primary_if, 0); + bla_purge_backbone_gw(bat_priv, 0); + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto out; + + hash = bat_priv->backbone_hash; + if (!hash) + goto out; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + + backbone_gw->lasttime = jiffies; + + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + + bla_start_timer(bat_priv); +} + +/* initialize all bla structures */ +int bla_init(struct bat_priv *bat_priv) +{ + int i; + uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; + struct hard_iface *primary_if; + + bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); + + /* setting claim destination address */ + memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); + bat_priv->claim_dest.type = 0; + primary_if = primary_if_get_selected(bat_priv); + if (primary_if) { + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, + ETH_ALEN)); + hardif_free_ref(primary_if); + } else { + bat_priv->claim_dest.group = 0; /* will be set later */ + } + + /* initialize the duplicate list */ + for (i = 0; i < DUPLIST_SIZE; i++) + bat_priv->bcast_duplist[i].entrytime = + jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); + bat_priv->bcast_duplist_curr = 0; + + if (bat_priv->claim_hash) + return 1; + + bat_priv->claim_hash = hash_new(128); + bat_priv->backbone_hash = hash_new(32); + + if (!bat_priv->claim_hash || !bat_priv->backbone_hash) + return -1; + + bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); + + bla_start_timer(bat_priv); + return 1; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @bcast_packet: originator mac address + * @hdr_size: maximum length of the frame + * + * check if it is on our broadcast list. Another gateway might + * have sent the same packet because it is connected to the same backbone, + * so we have to remove this duplicate. + * + * This is performed by checking the CRC, which will tell us + * with a good chance that it is the same packet. If it is furthermore + * sent by another host, drop it. We allow equal packets from + * the same host however as this might be intended. + * + **/ + +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + int i, length, curr; + uint8_t *content; + uint16_t crc; + struct bcast_duplist_entry *entry; + + length = hdr_size - sizeof(*bcast_packet); + content = (uint8_t *)bcast_packet; + content += sizeof(*bcast_packet); + + /* calculate the crc ... */ + crc = crc16(0, content, length); + + for (i = 0 ; i < DUPLIST_SIZE; i++) { + curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + + /* we can stop searching if the entry is too old ; + * later entries will be even older + */ + if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) + break; + + if (entry->crc != crc) + continue; + + if (compare_eth(entry->orig, bcast_packet->orig)) + continue; + + /* this entry seems to match: same crc, not too old, + * and from another gw. therefore return 1 to forbid it. + */ + return 1; + } + /* not found, add a new entry (overwrite the oldest entry) */ + curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + entry->crc = crc; + entry->entrytime = jiffies; + memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); + bat_priv->bcast_duplist_curr = curr; + + /* allow it, its the first occurence. */ + return 0; +} + + + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: originator mac address + * + * check if the originator is a gateway for any VLAN ID. + * + * returns 1 if it is found, 0 otherwise + * + */ + +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw *backbone_gw; + int i; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + return 0; + + if (!hash) + return 0; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (compare_eth(backbone_gw->orig, orig)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + } + + return 0; +} + + +/** + * @skb: the frame to be checked + * @orig_node: the orig_node of the frame + * @hdr_size: maximum length of the frame + * + * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 + * if the orig_node is also a gateway on the soft interface, otherwise it + * returns 0. + * + */ +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct backbone_gw *backbone_gw; + short vid = -1; + + if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) + return 0; + + /* first, find out the vid. */ + if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) + return 0; + + ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) + return 0; + + vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + + hdr_size); + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + } + + /* see if this originator is a backbone gw for this VLAN */ + + backbone_gw = backbone_hash_find(orig_node->bat_priv, + orig_node->orig, vid); + if (!backbone_gw) + return 0; + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* free all bla structures (for softinterface free or module unload) */ +void bla_free(struct bat_priv *bat_priv) +{ + struct hard_iface *primary_if; + + cancel_delayed_work_sync(&bat_priv->bla_work); + primary_if = primary_if_get_selected(bat_priv); + + if (bat_priv->claim_hash) { + bla_purge_claims(bat_priv, primary_if, 1); + hash_destroy(bat_priv->claim_hash); + bat_priv->claim_hash = NULL; + } + if (bat_priv->backbone_hash) { + bla_purge_backbone_gw(bat_priv, 1); + hash_destroy(bat_priv->backbone_hash); + bat_priv->backbone_hash = NULL; + } + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * + * bla_rx avoidance checks if: + * * we have to race for a claim + * * if the frame is allowed on the LAN + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto handled; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + if (!claim) { + /* possible optimization: race for a claim */ + /* No claim exists yet, claim it for us! + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* if it is our own claim ... */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* ... allow it in any case */ + claim->lasttime = jiffies; + goto allow; + } + + /* if it is a broadcast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* ... drop it. the responsible gateway is in charge. */ + goto handled; + } else { + /* seems the client considers us as its best gateway. + * send a claim and update the claim table + * immediately. + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; + +handled: + kfree_skb(skb); + ret = 1; + +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * + * bla_tx checks if: + * * a claim was received which has to be processed + * * the frame is allowed on the mesh + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + /* in VLAN case, the mac header might not be set. */ + skb_reset_mac_header(skb); + + if (bla_process_claim(bat_priv, primary_if, skb)) + goto handled; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + + claim = claim_hash_find(bat_priv, &search_claim); + + /* if no claim exists, allow it. */ + if (!claim) + goto allow; + + /* check if we are responsible. */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* if yes, the client has roamed and we have + * to unclaim it. + */ + handle_unclaim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* check if it is a multicast/broadcast frame */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* drop it. the responsible gateway has forwarded it into + * the backbone network. + */ + goto handled; + } else { + /* we must allow it. at least if we are + * responsible for the DESTINATION. + */ + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; +handled: + ret = 1; +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} + +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->claim_hash; + struct claim *claim; + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + bool is_own; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, + "Claims announced for the mesh %s (orig %pM, group id %04x)\n", + net_dev->name, primary_if->net_dev->dev_addr, + ntohs(bat_priv->claim_dest.group)); + seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", + "Client", "VID", "Originator", "CRC"); + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + is_own = compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr); + seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", + claim->addr, claim->vid, + claim->backbone_gw->orig, + (is_own ? 'x' : ' '), + claim->backbone_gw->crc); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h new file mode 100644 index 00000000000..e39f93acc28 --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_BLA_H_ +#define _NET_BATMAN_ADV_BLA_H_ + +#ifdef CONFIG_BATMAN_ADV_BLA +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size); +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, int hdr_size); +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif); +int bla_init(struct bat_priv *bat_priv); +void bla_free(struct bat_priv *bat_priv); + +#define BLA_CRC_INIT 0 +#else /* ifdef CONFIG_BATMAN_ADV_BLA */ + +static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) +{ + return 0; +} + +static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) +{ + return 0; +} + +static inline int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, + int hdr_size) +{ + return 0; +} + +static inline int bla_claim_table_seq_print_text(struct seq_file *seq, + void *offset) +{ + return 0; +} + +static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, + uint8_t *orig) +{ + return 0; +} + +static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + return 0; +} + +static inline void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ +} + +static inline int bla_init(struct bat_priv *bat_priv) +{ + return 1; +} + +static inline void bla_free(struct bat_priv *bat_priv) +{ +} + +#endif /* ifdef CONFIG_BATMAN_ADV_BLA */ + +#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 6f9b9b78f77..47f7186dcef 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -558,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) p++; /* ...and then we jump over the data */ - if (pkt_len < *p) + if (pkt_len < 1 + (*p)) goto out; - pkt_len -= *p; - p += (*p); + pkt_len -= 1 + (*p); + p += 1 + (*p); } } out: diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 377897701a8..dc334fa8984 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -28,15 +28,10 @@ #include "bat_sysfs.h" #include "originator.h" #include "hash.h" +#include "bridge_loop_avoidance.h" #include <linux/if_arp.h> - -static int batman_skb_recv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); - void hardif_free_rcu(struct rcu_head *rcu) { struct hard_iface *hard_iface; @@ -107,7 +102,8 @@ out: return hard_iface; } -static void primary_if_update_addr(struct bat_priv *bat_priv) +static void primary_if_update_addr(struct bat_priv *bat_priv, + struct hard_iface *oldif) { struct vis_packet *vis_packet; struct hard_iface *primary_if; @@ -122,6 +118,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv) memcpy(vis_packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + bla_update_orig_address(bat_priv, primary_if, oldif); out: if (primary_if) hardif_free_ref(primary_if); @@ -140,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv, curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); - if (curr_hard_iface) - hardif_free_ref(curr_hard_iface); - if (!new_hard_iface) - return; + goto out; + + bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); + primary_if_update_addr(bat_priv, curr_hard_iface); - bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); - primary_if_update_addr(bat_priv); +out: + if (curr_hard_iface) + hardif_free_ref(curr_hard_iface); } static bool hardif_is_iface_up(const struct hard_iface *hard_iface) @@ -175,9 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev) net_dev->dev_addr)) continue; - pr_warning("The newly added mac address (%pM) already exists on: %s\n", - net_dev->dev_addr, hard_iface->net_dev->name); - pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); + pr_warn("The newly added mac address (%pM) already exists on: %s\n", + net_dev->dev_addr, hard_iface->net_dev->name); + pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); } rcu_read_unlock(); } @@ -230,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface) bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); hard_iface->if_status = IF_TO_BE_ACTIVATED; /** @@ -300,22 +298,17 @@ int hardif_enable_interface(struct hard_iface *hard_iface, if (!softif_is_valid(soft_iface)) { pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", soft_iface->name); - dev_put(soft_iface); ret = -EINVAL; - goto err; + goto err_dev; } hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); - - if (!hard_iface->packet_buff) { - bat_err(hard_iface->soft_iface, - "Can't add interface packet (%s): out of memory\n", - hard_iface->net_dev->name); + ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); + if (ret < 0) { ret = -ENOMEM; - goto err; + goto err_dev; } hard_iface->if_num = bat_priv->num_ifaces; @@ -328,7 +321,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); - atomic_set(&hard_iface->seqno, 1); atomic_set(&hard_iface->frag_seqno, 1); bat_info(hard_iface->soft_iface, "Adding interface: %s\n", hard_iface->net_dev->name); @@ -360,6 +352,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, out: return 0; +err_dev: + dev_put(soft_iface); err: hardif_free_ref(hard_iface); return ret; @@ -394,8 +388,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface) hardif_free_ref(new_if); } - kfree(hard_iface->packet_buff); - hard_iface->packet_buff = NULL; + bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); hard_iface->if_status = IF_NOT_IN_USE; /* delete all references to this hard_iface */ @@ -447,6 +440,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) check_known_mac_addr(hard_iface->net_dev); list_add_tail_rcu(&hard_iface->list, &hardif_list); + /** + * This can't be called via a bat_priv callback because + * we have no bat_priv yet. + */ + atomic_set(&hard_iface->seqno, 1); + hard_iface->packet_buff = NULL; + return hard_iface; free_if: @@ -524,14 +524,14 @@ static int hard_if_event(struct notifier_block *this, check_known_mac_addr(hard_iface->net_dev); bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto hardif_put; if (hard_iface == primary_if) - primary_if_update_addr(bat_priv); + primary_if_update_addr(bat_priv, NULL); break; default: break; @@ -545,114 +545,6 @@ out: return NOTIFY_DONE; } -/* incoming packets with the batman ethertype received on any active hard - * interface */ -static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev) -{ - struct bat_priv *bat_priv; - struct batman_ogm_packet *batman_ogm_packet; - struct hard_iface *hard_iface; - int ret; - - hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); - skb = skb_share_check(skb, GFP_ATOMIC); - - /* skb was released by skb_share_check() */ - if (!skb) - goto err_out; - - /* packet should hold at least type and version */ - if (unlikely(!pskb_may_pull(skb, 2))) - goto err_free; - - /* expect a valid ethernet header here. */ - if (unlikely(skb->mac_len != sizeof(struct ethhdr) || - !skb_mac_header(skb))) - goto err_free; - - if (!hard_iface->soft_iface) - goto err_free; - - bat_priv = netdev_priv(hard_iface->soft_iface); - - if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) - goto err_free; - - /* discard frames on not active interfaces */ - if (hard_iface->if_status != IF_ACTIVE) - goto err_free; - - batman_ogm_packet = (struct batman_ogm_packet *)skb->data; - - if (batman_ogm_packet->header.version != COMPAT_VERSION) { - bat_dbg(DBG_BATMAN, bat_priv, - "Drop packet: incompatible batman version (%i)\n", - batman_ogm_packet->header.version); - goto err_free; - } - - /* all receive handlers return whether they received or reused - * the supplied skb. if not, we have to free the skb. */ - - switch (batman_ogm_packet->header.packet_type) { - /* batman originator packet */ - case BAT_OGM: - ret = recv_bat_ogm_packet(skb, hard_iface); - break; - - /* batman icmp packet */ - case BAT_ICMP: - ret = recv_icmp_packet(skb, hard_iface); - break; - - /* unicast packet */ - case BAT_UNICAST: - ret = recv_unicast_packet(skb, hard_iface); - break; - - /* fragmented unicast packet */ - case BAT_UNICAST_FRAG: - ret = recv_ucast_frag_packet(skb, hard_iface); - break; - - /* broadcast packet */ - case BAT_BCAST: - ret = recv_bcast_packet(skb, hard_iface); - break; - - /* vis packet */ - case BAT_VIS: - ret = recv_vis_packet(skb, hard_iface); - break; - /* Translation table query (request or response) */ - case BAT_TT_QUERY: - ret = recv_tt_query(skb, hard_iface); - break; - /* Roaming advertisement */ - case BAT_ROAM_ADV: - ret = recv_roam_adv(skb, hard_iface); - break; - default: - ret = NET_RX_DROP; - } - - if (ret == NET_RX_DROP) - kfree_skb(skb); - - /* return NET_RX_SUCCESS in any case as we - * most probably dropped the packet for - * routing-logical reasons. */ - - return NET_RX_SUCCESS; - -err_free: - kfree_skb(skb); -err_out: - return NET_RX_DROP; -} - /* This function returns true if the interface represented by ifindex is a * 802.11 wireless device */ bool is_wifi_iface(int ifindex) diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index b87518edcef..2e98a57f340 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -175,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (len >= sizeof(struct icmp_packet_rr)) packet_len = sizeof(struct icmp_packet_rr); - skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); + skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!skb) { len = -ENOMEM; goto out; } - skb_reserve(skb, sizeof(struct ethhdr)); + skb_reserve(skb, ETH_HLEN); icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); if (copy_from_user(icmp_packet, buff, packet_len)) { diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 6d51caaf8ce..083a2993efe 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -30,6 +30,7 @@ #include "translation-table.h" #include "hard-interface.h" #include "gateway_client.h" +#include "bridge_loop_avoidance.h" #include "vis.h" #include "hash.h" #include "bat_algo.h" @@ -38,6 +39,7 @@ /* List manipulations on hardif_list have to be rtnl_lock()'ed, * list traversals just rcu-locked */ struct list_head hardif_list; +static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); char bat_routing_algo[20] = "BATMAN IV"; static struct hlist_head bat_algo_list; @@ -45,11 +47,15 @@ unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct workqueue_struct *bat_event_workqueue; +static void recv_handler_init(void); + static int __init batman_init(void) { INIT_LIST_HEAD(&hardif_list); INIT_HLIST_HEAD(&bat_algo_list); + recv_handler_init(); + bat_iv_init(); /* the name should not be longer than 10 chars - see @@ -96,13 +102,10 @@ int mesh_init(struct net_device *soft_iface) spin_lock_init(&bat_priv->gw_list_lock); spin_lock_init(&bat_priv->vis_hash_lock); spin_lock_init(&bat_priv->vis_list_lock); - spin_lock_init(&bat_priv->softif_neigh_lock); - spin_lock_init(&bat_priv->softif_neigh_vid_lock); INIT_HLIST_HEAD(&bat_priv->forw_bat_list); INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); INIT_HLIST_HEAD(&bat_priv->gw_list); - INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); INIT_LIST_HEAD(&bat_priv->tt_changes_list); INIT_LIST_HEAD(&bat_priv->tt_req_list); INIT_LIST_HEAD(&bat_priv->tt_roam_list); @@ -118,6 +121,9 @@ int mesh_init(struct net_device *soft_iface) if (vis_init(bat_priv) < 1) goto err; + if (bla_init(bat_priv) < 1) + goto err; + atomic_set(&bat_priv->gw_reselect, 0); atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); goto end; @@ -145,7 +151,7 @@ void mesh_free(struct net_device *soft_iface) tt_free(bat_priv); - softif_neigh_purge(bat_priv); + bla_free(bat_priv); atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } @@ -178,6 +184,120 @@ int is_my_mac(const uint8_t *addr) return 0; } +static int recv_unhandled_packet(struct sk_buff *skb, + struct hard_iface *recv_if) +{ + return NET_RX_DROP; +} + +/* incoming packets with the batman ethertype received on any active hard + * interface + */ +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + struct bat_priv *bat_priv; + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *hard_iface; + uint8_t idx; + int ret; + + hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); + skb = skb_share_check(skb, GFP_ATOMIC); + + /* skb was released by skb_share_check() */ + if (!skb) + goto err_out; + + /* packet should hold at least type and version */ + if (unlikely(!pskb_may_pull(skb, 2))) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) + goto err_free; + + if (!hard_iface->soft_iface) + goto err_free; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto err_free; + + /* discard frames on not active interfaces */ + if (hard_iface->if_status != IF_ACTIVE) + goto err_free; + + batman_ogm_packet = (struct batman_ogm_packet *)skb->data; + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. + */ + idx = batman_ogm_packet->header.packet_type; + ret = (*recv_packet_handler[idx])(skb, hard_iface); + + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. + */ + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); +err_out: + return NET_RX_DROP; +} + +static void recv_handler_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) + recv_packet_handler[i] = recv_unhandled_packet; + + /* batman icmp packet */ + recv_packet_handler[BAT_ICMP] = recv_icmp_packet; + /* unicast packet */ + recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; + /* fragmented unicast packet */ + recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; + /* broadcast packet */ + recv_packet_handler[BAT_BCAST] = recv_bcast_packet; + /* vis packet */ + recv_packet_handler[BAT_VIS] = recv_vis_packet; + /* Translation table query (request or response) */ + recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; + /* Roaming advertisement */ + recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; +} + +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)) +{ + if (recv_packet_handler[packet_type] != &recv_unhandled_packet) + return -EBUSY; + + recv_packet_handler[packet_type] = recv_handler; + return 0; +} + +void recv_handler_unregister(uint8_t packet_type) +{ + recv_packet_handler[packet_type] = recv_unhandled_packet; +} + static struct bat_algo_ops *bat_algo_get(char *name) { struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; @@ -207,12 +327,12 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) } /* all algorithms must implement all ops (for now) */ - if (!bat_algo_ops->bat_ogm_init || - !bat_algo_ops->bat_ogm_init_primary || - !bat_algo_ops->bat_ogm_update_mac || + if (!bat_algo_ops->bat_iface_enable || + !bat_algo_ops->bat_iface_disable || + !bat_algo_ops->bat_iface_update_mac || + !bat_algo_ops->bat_primary_iface_set || !bat_algo_ops->bat_ogm_schedule || - !bat_algo_ops->bat_ogm_emit || - !bat_algo_ops->bat_ogm_receive) { + !bat_algo_ops->bat_ogm_emit) { pr_info("Routing algo '%s' does not implement required ops\n", bat_algo_ops->name); goto out; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 94fa1c2393a..f4a3ec00347 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -28,7 +28,7 @@ #define DRIVER_DEVICE "batman-adv" #ifndef SOURCE_VERSION -#define SOURCE_VERSION "2012.1.0" +#define SOURCE_VERSION "2012.2.0" #endif /* B.A.T.M.A.N. parameters */ @@ -65,7 +65,7 @@ #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ -#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) +#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) #define LOG_BUF_LEN 8192 /* has to be a power of 2 */ @@ -80,8 +80,12 @@ #define MAX_AGGREGATION_BYTES 512 #define MAX_AGGREGATION_MS 100 -#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ +#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ +#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) +#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) +#define DUPLIST_SIZE 16 +#define DUPLIST_TIMEOUT 500 /* 500 ms */ /* don't reset again within 30 seconds */ #define RESET_PROTECTION_MS 30000 #define EXPECTED_SEQNO_RANGE 65536 @@ -119,7 +123,8 @@ enum dbg_level { DBG_BATMAN = 1 << 0, DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ DBG_TT = 1 << 2, /* translation table operations */ - DBG_ALL = 7 + DBG_BLA = 1 << 3, /* bridge loop avoidance */ + DBG_ALL = 15 }; /* Kernel headers */ @@ -150,6 +155,12 @@ void mesh_free(struct net_device *soft_iface); void inc_module_count(void); void dec_module_count(void); int is_my_mac(const uint8_t *addr); +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)); +void recv_handler_unregister(uint8_t packet_type); int bat_algo_register(struct bat_algo_ops *bat_algo_ops); int bat_algo_select(struct bat_priv *bat_priv, char *name); int bat_algo_seq_print_text(struct seq_file *seq, void *offset); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 43c0a4f1399..41147942ba5 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -28,13 +28,15 @@ #include "hard-interface.h" #include "unicast.h" #include "soft-interface.h" +#include "bridge_loop_avoidance.h" static void purge_orig(struct work_struct *work); static void start_purge_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); - queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); + queue_delayed_work(bat_event_workqueue, + &bat_priv->orig_work, msecs_to_jiffies(1000)); } /* returns 1 if they are the same originator */ @@ -83,35 +85,30 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node) return router; } -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming) +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno) { - struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct neigh_node *neigh_node; - bat_dbg(DBG_BATMAN, bat_priv, - "Creating new last-hop neighbor of originator\n"); - neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); if (!neigh_node) - return NULL; + goto out; INIT_HLIST_NODE(&neigh_node->list); - INIT_LIST_HEAD(&neigh_node->bonding_list); - spin_lock_init(&neigh_node->tq_lock); - memcpy(neigh_node->addr, neigh, ETH_ALEN); - neigh_node->orig_node = orig_neigh_node; - neigh_node->if_incoming = if_incoming; + memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); + spin_lock_init(&neigh_node->lq_update_lock); /* extra reference for return */ atomic_set(&neigh_node->refcount, 2); - spin_lock_bh(&orig_node->neigh_list_lock); - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); - spin_unlock_bh(&orig_node->neigh_list_lock); + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new neighbor %pM, initial seqno %d\n", + neigh_addr, seqno); + +out: return neigh_node; } @@ -273,6 +270,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, struct hlist_node *node, *node_tmp; struct neigh_node *neigh_node; bool neigh_purged = false; + unsigned long last_seen; *best_neigh_node = NULL; @@ -282,11 +280,13 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { - if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) || + if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) || (neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { + last_seen = neigh_node->last_seen; + if ((neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == @@ -299,9 +299,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, neigh_node->if_incoming->net_dev->name); else bat_dbg(DBG_BATMAN, bat_priv, - "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n", + "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", orig_node->orig, neigh_node->addr, - (neigh_node->last_valid / HZ)); + jiffies_to_msecs(last_seen)); neigh_purged = true; @@ -324,10 +324,11 @@ static bool purge_orig_node(struct bat_priv *bat_priv, { struct neigh_node *best_neigh_node; - if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) { + if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { bat_dbg(DBG_BATMAN, bat_priv, - "Originator timeout: originator %pM, last_valid %lu\n", - orig_node->orig, (orig_node->last_valid / HZ)); + "Originator timeout: originator %pM, last_seen %u\n", + orig_node->orig, + jiffies_to_msecs(orig_node->last_seen)); return true; } else { if (purge_orig_neighbors(bat_priv, orig_node, @@ -375,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv) gw_node_purge(bat_priv); gw_election(bat_priv); - - softif_neigh_purge(bat_priv); } static void purge_orig(struct work_struct *work) @@ -447,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) goto next; last_seen_secs = jiffies_to_msecs(jiffies - - orig_node->last_valid) / 1000; + orig_node->last_seen) / 1000; last_seen_msecs = jiffies_to_msecs(jiffies - - orig_node->last_valid) % 1000; + orig_node->last_seen) % 1000; seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", orig_node->orig, last_seen_secs, diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 3fe2eda8565..f74d0d69335 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv); void purge_orig_ref(struct bat_priv *bat_priv); void orig_node_free_ref(struct orig_node *orig_node); struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming); +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno); void neigh_node_free_ref(struct neigh_node *neigh_node); struct neigh_node *orig_node_get_router(struct orig_node *orig_node); int orig_seq_print_text(struct seq_file *seq, void *offset); diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 441f3db1bd9..0ee1af77079 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -25,7 +25,7 @@ #define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ enum bat_packettype { - BAT_OGM = 0x01, + BAT_IV_OGM = 0x01, BAT_ICMP = 0x02, BAT_UNICAST = 0x03, BAT_BCAST = 0x04, @@ -38,7 +38,8 @@ enum bat_packettype { /* this file is included by batctl which needs these defines */ #define COMPAT_VERSION 14 -enum batman_flags { +enum batman_iv_flags { + NOT_BEST_NEXT_HOP = 1 << 3, PRIMARIES_FIRST_HOP = 1 << 4, VIS_SERVER = 1 << 5, DIRECTLINK = 1 << 6 @@ -90,6 +91,23 @@ enum tt_client_flags { TT_CLIENT_PENDING = 1 << 10 }; +/* claim frame types for the bridge loop avoidance */ +enum bla_claimframe { + CLAIM_TYPE_ADD = 0x00, + CLAIM_TYPE_DEL = 0x01, + CLAIM_TYPE_ANNOUNCE = 0x02, + CLAIM_TYPE_REQUEST = 0x03 +}; + +/* the destination hardware field in the ARP frame is used to + * transport the claim type and the group id + */ +struct bla_claim_dst { + uint8_t magic[3]; /* FF:43:05 */ + uint8_t type; /* bla_claimframe */ + uint16_t group; /* group id */ +} __packed; + struct batman_header { uint8_t packet_type; uint8_t version; /* batman version field */ @@ -100,8 +118,8 @@ struct batman_ogm_packet { struct batman_header header; uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ uint32_t seqno; - uint8_t orig[6]; - uint8_t prev_sender[6]; + uint8_t orig[ETH_ALEN]; + uint8_t prev_sender[ETH_ALEN]; uint8_t gw_flags; /* flags related to gateway class */ uint8_t tq; uint8_t tt_num_changes; @@ -109,13 +127,13 @@ struct batman_ogm_packet { uint16_t tt_crc; } __packed; -#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) +#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet) struct icmp_packet { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t reserved; @@ -128,8 +146,8 @@ struct icmp_packet { struct icmp_packet_rr { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t rr_cur; @@ -139,16 +157,16 @@ struct icmp_packet_rr { struct unicast_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; } __packed; struct unicast_frag_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; uint8_t flags; uint8_t align; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; } __packed; @@ -156,7 +174,7 @@ struct bcast_packet { struct batman_header header; uint8_t reserved; uint32_t seqno; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; } __packed; struct vis_packet { @@ -165,9 +183,9 @@ struct vis_packet { uint32_t seqno; /* sequence number */ uint8_t entries; /* number of entries behind this struct */ uint8_t reserved; - uint8_t vis_orig[6]; /* originator that announces its neighbors */ - uint8_t target_orig[6]; /* who should receive this packet */ - uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ + uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ + uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ + uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ } __packed; struct tt_query_packet { diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7f8e1589941..840e2c64a30 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -29,6 +29,10 @@ #include "originator.h" #include "vis.h" #include "unicast.h" +#include "bridge_loop_avoidance.h" + +static int route_unicast_packet(struct sk_buff *skb, + struct hard_iface *recv_if); void slide_own_bcast_window(struct hard_iface *hard_iface) { @@ -52,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) bit_get_packet(bat_priv, word, 1, 0); orig_node->bcast_own_sum[hard_iface->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->ogm_cnt_lock); } rcu_read_unlock(); @@ -230,51 +234,46 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, { if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { - if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) { - - *last_reset = jiffies; - bat_dbg(DBG_BATMAN, bat_priv, - "old packet received, start protection\n"); - - return 0; - } else { + if (!has_timed_out(*last_reset, RESET_PROTECTION_MS)) return 1; - } + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); } + return 0; } -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len) { - struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ - if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) - return NET_RX_DROP; + if (unlikely(!pskb_may_pull(skb, header_len))) + return false; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with broadcast indication but unicast recipient */ if (!is_broadcast_ether_addr(ethhdr->h_dest)) - return NET_RX_DROP; + return false; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) - return NET_RX_DROP; + return false; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, 0) < 0) - return NET_RX_DROP; + return false; /* keep skb linear */ if (skb_linearize(skb) < 0) - return NET_RX_DROP; - - bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb); + return false; - kfree_skb(skb); - return NET_RX_SUCCESS; + return true; } static int recv_my_icmp_packet(struct bat_priv *bat_priv, @@ -309,7 +308,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -364,7 +363,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet *)skb->data; @@ -450,7 +449,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -669,6 +668,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) if (!is_my_mac(roam_adv_packet->dst)) return route_unicast_packet(skb, recv_if); + /* check if it is a backbone gateway. we don't accept + * roaming advertisement from it, as it has the same + * entries as we have. + */ + if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) + goto out; + orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); if (!orig_node) goto out; @@ -798,7 +804,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) return 0; } -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; @@ -830,7 +836,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; unicast_packet = (struct unicast_packet *)skb->data; @@ -907,12 +913,20 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, /* Check whether I have to reroute the packet */ if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { - /* Linearize the skb before accessing it */ - if (skb_linearize(skb) < 0) + /* check if there is enough data before accessing it */ + if (pskb_may_pull(skb, sizeof(struct unicast_packet) + + ETH_HLEN) < 0) return 0; ethhdr = (struct ethhdr *)(skb->data + sizeof(struct unicast_packet)); + + /* we don't have an updated route for this client, so we should + * not try to reroute the packet!! + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + return 1; + orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); if (!orig_node) { @@ -1047,8 +1061,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_lock_bh(&orig_node->bcast_seqno_lock); /* check whether the packet is a duplicate */ - if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, - ntohl(bcast_packet->seqno))) + if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) goto spin_unlock; seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; @@ -1065,9 +1079,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_unlock_bh(&orig_node->bcast_seqno_lock); + /* check whether this has been sent by another originator before */ + if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) + goto out; + /* rebroadcast packet */ add_bcast_packet_to_list(bat_priv, skb, 1); + /* don't hand the broadcast up if it is from an originator + * from the same backbone. + */ + if (bla_is_backbone_gw(skb, orig_node, hdr_size)) + goto out; + /* broadcast for me */ interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); ret = NET_RX_SUCCESS; diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 92ac100d83d..d6bbbebb656 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -23,15 +23,16 @@ #define _NET_BATMAN_ADV_ROUTING_H_ void slide_own_bcast_window(struct hard_iface *hard_iface); +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len); void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node); -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); struct neigh_node *find_router(struct bat_priv *bat_priv, diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index af7a6741a68..f47299f22c6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -45,13 +45,13 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { - pr_warning("Interface %s is not up - can't send packet via that interface!\n", - hard_iface->net_dev->name); + pr_warn("Interface %s is not up - can't send packet via that interface!\n", + hard_iface->net_dev->name); goto send_skb_err; } /* push to the ethernet header. */ - if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) + if (my_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); @@ -87,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface, /* keep old buffer if kmalloc should fail */ if (new_buff) { memcpy(new_buff, hard_iface->packet_buff, - BATMAN_OGM_LEN); + BATMAN_OGM_HLEN); kfree(hard_iface->packet_buff); hard_iface->packet_buff = new_buff; @@ -101,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, { int new_len; - new_len = BATMAN_OGM_LEN + + new_len = BATMAN_OGM_HLEN + tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); /* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented */ if (new_len > hard_iface->soft_iface->mtu) - new_len = BATMAN_OGM_LEN; + new_len = BATMAN_OGM_HLEN; realloc_packet_buffer(hard_iface, new_len); @@ -117,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); return tt_changes_fill_buffer(bat_priv, - hard_iface->packet_buff + BATMAN_OGM_LEN, - hard_iface->packet_len - BATMAN_OGM_LEN); + hard_iface->packet_buff + BATMAN_OGM_HLEN, + hard_iface->packet_len - BATMAN_OGM_HLEN); } static int reset_packet_buffer(struct bat_priv *bat_priv, struct hard_iface *hard_iface) { - realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); + realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN); return 0; } @@ -292,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) /* if we still have some more bcasts to send */ if (forw_packet->num_packets < 3) { _add_bcast_packet_to_list(bat_priv, forw_packet, - ((5 * HZ) / 1000)); + msecs_to_jiffies(5)); return; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index a5590f4193f..6e2530b0204 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -36,6 +36,7 @@ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include "unicast.h" +#include "bridge_loop_avoidance.h" static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -73,439 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) return 0; } -static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) -{ - if (atomic_dec_and_test(&softif_neigh->refcount)) - kfree_rcu(softif_neigh, rcu); -} - -static void softif_neigh_vid_free_rcu(struct rcu_head *rcu) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hlist_node *node, *node_tmp; - struct bat_priv *bat_priv; - - softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu); - bat_priv = softif_neigh_vid->bat_priv; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node, node_tmp, - &softif_neigh_vid->softif_neigh_list, list) { - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - kfree(softif_neigh_vid); -} - -static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid) -{ - if (atomic_dec_and_test(&softif_neigh_vid->refcount)) - call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu); -} - -static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (softif_neigh_vid->vid != vid) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - goto out; - } - - softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC); - if (!softif_neigh_vid) - goto out; - - softif_neigh_vid->vid = vid; - softif_neigh_vid->bat_priv = bat_priv; - - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh_vid->refcount, 2); - INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list); - INIT_HLIST_NODE(&softif_neigh_vid->list); - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_add_head_rcu(&softif_neigh_vid->list, - &bat_priv->softif_neigh_vids); - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -out: - rcu_read_unlock(); - return softif_neigh_vid; -} - -static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, - const uint8_t *addr, short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - struct hlist_node *node; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (!compare_eth(softif_neigh->addr, addr)) - continue; - - if (!atomic_inc_not_zero(&softif_neigh->refcount)) - continue; - - softif_neigh->last_seen = jiffies; - goto unlock; - } - - softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC); - if (!softif_neigh) - goto unlock; - - memcpy(softif_neigh->addr, addr, ETH_ALEN); - softif_neigh->last_seen = jiffies; - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh->refcount, 2); - - INIT_HLIST_NODE(&softif_neigh->list); - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_add_head_rcu(&softif_neigh->list, - &softif_neigh_vid->softif_neigh_list); - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -unlock: - rcu_read_unlock(); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_get_selected( - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *softif_neigh; - - rcu_read_lock(); - softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount)) - softif_neigh = NULL; - - rcu_read_unlock(); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_vid_get_selected( - struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - softif_neigh = softif_neigh_get_selected(softif_neigh_vid); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static void softif_neigh_vid_select(struct bat_priv *bat_priv, - struct softif_neigh *new_neigh, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *curr_neigh; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - - if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) - new_neigh = NULL; - - curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh, - 1); - rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); - - if ((curr_neigh) && (!new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Removing mesh exit point on vid: %d (prev: %pM).\n", - vid, curr_neigh->addr); - else if ((curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Changing mesh exit point on vid: %d from %pM to %pM.\n", - vid, curr_neigh->addr, new_neigh->addr); - else if ((!curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Setting mesh exit point on vid: %d to %pM.\n", - vid, new_neigh->addr); - - if (curr_neigh) - softif_neigh_free_ref(curr_neigh); - - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); -} - -static void softif_neigh_vid_deselect(struct bat_priv *bat_priv, - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *curr_neigh; - struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp; - struct hard_iface *primary_if = NULL; - struct hlist_node *node; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* find new softif_neigh immediately to avoid temporary loops */ - rcu_read_lock(); - curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - hlist_for_each_entry_rcu(softif_neigh_tmp, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (softif_neigh_tmp == curr_neigh) - continue; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh_tmp->addr, ETH_ALEN) < 0) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount)) - continue; - - softif_neigh = softif_neigh_tmp; - goto unlock; - } - -unlock: - rcu_read_unlock(); -out: - softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid); - - if (primary_if) - hardif_free_ref(primary_if); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); -} - -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) -{ - struct net_device *net_dev = (struct net_device *)seq->private; - struct bat_priv *bat_priv = netdev_priv(net_dev); - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hard_iface *primary_if; - struct hlist_node *node, *node_tmp; - struct softif_neigh *curr_softif_neigh; - int ret = 0, last_seen_secs, last_seen_msecs; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - please specify interfaces to enable it\n", - net_dev->name); - goto out; - } - - if (primary_if->if_status != IF_ACTIVE) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - primary interface not active\n", - net_dev->name); - goto out; - } - - seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - seq_printf(seq, " %-15s %s on vid: %d\n", - "Originator", "last-seen", softif_neigh_vid->vid); - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - - hlist_for_each_entry_rcu(softif_neigh, node_tmp, - &softif_neigh_vid->softif_neigh_list, - list) { - last_seen_secs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) / 1000; - last_seen_msecs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) % 1000; - seq_printf(seq, "%s %pM %3i.%03is\n", - curr_softif_neigh == softif_neigh - ? "=>" : " ", softif_neigh->addr, - last_seen_secs, last_seen_msecs); - } - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - seq_printf(seq, "\n"); - } - rcu_read_unlock(); - -out: - if (primary_if) - hardif_free_ref(primary_if); - return ret; -} - -void softif_neigh_purge(struct bat_priv *bat_priv) -{ - struct softif_neigh *softif_neigh, *curr_softif_neigh; - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node, *node_tmp, *node_tmp2; - int do_deselect; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - do_deselect = 0; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, - &softif_neigh_vid->softif_neigh_list, - list) { - if ((!has_timed_out(softif_neigh->last_seen, - SOFTIF_NEIGH_TIMEOUT)) && - (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) - continue; - - if (curr_softif_neigh == softif_neigh) { - bat_dbg(DBG_ROUTES, bat_priv, - "Current mesh exit point on vid: %d '%pM' vanished.\n", - softif_neigh_vid->vid, - softif_neigh->addr); - do_deselect = 1; - } - - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - /* soft_neigh_vid_deselect() needs to acquire the - * softif_neigh_lock */ - if (do_deselect) - softif_neigh_vid_deselect(bat_priv, softif_neigh_vid); - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - softif_neigh_vid_free_ref(softif_neigh_vid); - } - rcu_read_unlock(); - - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp, - &bat_priv->softif_neigh_vids, list) { - if (!hlist_empty(&softif_neigh_vid->softif_neigh_list)) - continue; - - hlist_del_rcu(&softif_neigh_vid->list); - softif_neigh_vid_free_ref(softif_neigh_vid); - } - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -} - -static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, - short vid) -{ - struct bat_priv *bat_priv = netdev_priv(dev); - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; - struct batman_ogm_packet *batman_ogm_packet; - struct softif_neigh *softif_neigh = NULL; - struct hard_iface *primary_if = NULL; - struct softif_neigh *curr_softif_neigh = NULL; - - if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN + VLAN_HLEN); - else - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN); - - if (batman_ogm_packet->header.version != COMPAT_VERSION) - goto out; - - if (batman_ogm_packet->header.packet_type != BAT_OGM) - goto out; - - if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) - goto out; - - if (is_my_mac(batman_ogm_packet->orig)) - goto out; - - softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid); - if (!softif_neigh) - goto out; - - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh == softif_neigh) - goto out; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh->addr, ETH_ALEN) < 0) - goto out; - - /* close own batX device and use softif_neigh as exit node */ - if (!curr_softif_neigh) { - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - goto out; - } - - /* switch to new 'smallest neighbor' */ - if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0) - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - -out: - kfree_skb(skb); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - if (primary_if) - hardif_free_ref(primary_if); - return; -} - static int interface_open(struct net_device *dev) { netif_start_queue(dev); @@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct hard_iface *primary_if = NULL; struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; + static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, + 0x00}; unsigned int header_len = 0; int data_len = skb->len, ret; - short vid = -1; + short vid __maybe_unused = -1; bool do_bcast = false; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) @@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* fall through */ case ETH_P_BATMAN: - softif_batman_recv(skb, soft_iface, vid); - goto end; + goto dropped; } - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the mesh - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) + if (bla_tx(bat_priv, skb, vid)) goto dropped; /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); + /* don't accept stp packets. STP does not help in meshes. + * better use the bridge loop avoidance ... + */ + if (compare_eth(ethhdr->h_dest, stp_addr)) + goto dropped; + if (is_multicast_ether_addr(ethhdr->h_dest)) { do_bcast = true; @@ -675,8 +244,6 @@ dropped: dropped_freed: bat_priv->stats.tx_dropped++; end: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); if (primary_if) hardif_free_ref(primary_if); return NETDEV_TX_OK; @@ -687,12 +254,9 @@ void interface_rx(struct net_device *soft_iface, int hdr_size) { struct bat_priv *bat_priv = netdev_priv(soft_iface); - struct unicast_packet *unicast_packet; struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; - short vid = -1; - int ret; + short vid __maybe_unused = -1; /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -716,30 +280,6 @@ void interface_rx(struct net_device *soft_iface, goto dropped; } - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the non-mesh network - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) { - skb_push(skb, hdr_size); - unicast_packet = (struct unicast_packet *)skb->data; - - if ((unicast_packet->header.packet_type != BAT_UNICAST) && - (unicast_packet->header.packet_type != BAT_UNICAST_FRAG)) - goto dropped; - - skb_reset_mac_header(skb); - - memcpy(unicast_packet->dest, - curr_softif_neigh->addr, ETH_ALEN); - ret = route_unicast_packet(skb, recv_if); - if (ret == NET_RX_DROP) - goto dropped; - - goto out; - } - /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; @@ -752,21 +292,25 @@ void interface_rx(struct net_device *soft_iface, /* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; soft_iface->last_rx = jiffies; if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ + if (bla_rx(bat_priv, skb, vid)) + goto out; + netif_rx(skb); goto out; dropped: kfree_skb(skb); out: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); return; } @@ -828,13 +372,14 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); + atomic_set(&bat_priv->bridge_loop_avoidance, 0); atomic_set(&bat_priv->ap_isolation, 0); atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); atomic_set(&bat_priv->gw_sel_class, 20); atomic_set(&bat_priv->gw_bandwidth, 41); atomic_set(&bat_priv->orig_interval, 1000); - atomic_set(&bat_priv->hop_penalty, 10); + atomic_set(&bat_priv->hop_penalty, 30); atomic_set(&bat_priv->log_level, 0); atomic_set(&bat_priv->fragmentation, 1); atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); @@ -845,6 +390,7 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->ttvn, 0); atomic_set(&bat_priv->tt_local_changes, 0); atomic_set(&bat_priv->tt_ogm_append_cnt, 0); + atomic_set(&bat_priv->bla_num_requests, 0); bat_priv->tt_buff = NULL; bat_priv->tt_buff_len = 0; diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 756eab5b8dd..02030067388 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -23,8 +23,6 @@ #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ int my_skb_head_push(struct sk_buff *skb, unsigned int len); -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); -void softif_neigh_purge(struct bat_priv *bat_priv); void interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct hard_iface *recv_if, int hdr_size); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 1f869212784..a66c2dcd108 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -27,13 +27,14 @@ #include "hash.h" #include "originator.h" #include "routing.h" +#include "bridge_loop_avoidance.h" #include <linux/crc16.h> -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message); +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node); static void tt_purge(struct work_struct *work); +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry); /* returns 1 if they are the same mac addr */ static int compare_tt(const struct hlist_node *node, const void *data2) @@ -123,17 +124,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node) - orig_node_free_ref(tt_global_entry->orig_node); - kfree(tt_global_entry); } static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) { - if (atomic_dec_and_test(&tt_global_entry->common.refcount)) + if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { + tt_global_del_orig_list(tt_global_entry); call_rcu(&tt_global_entry->common.rcu, tt_global_entry_free_rcu); + } +} + +static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); + atomic_dec(&orig_entry->orig_node->tt_size); + orig_node_free_ref(orig_entry->orig_node); + kfree(orig_entry); +} + +static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) +{ + call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); } static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, @@ -182,12 +197,17 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; int hash_added; tt_local_entry = tt_local_hash_find(bat_priv, addr); if (tt_local_entry) { tt_local_entry->last_seen = jiffies; + /* possibly unset the TT_CLIENT_PENDING flag */ + tt_local_entry->common.flags &= ~TT_CLIENT_PENDING; goto out; } @@ -232,14 +252,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* Check whether it is a roaming! */ if (tt_global_entry) { - /* This node is probably going to update its tt table */ - tt_global_entry->orig_node->tt_poss_change = true; - /* The global entry has to be marked as ROAMING and has to be - * kept for consistency purpose */ + /* These node are probably going to update their tt table */ + head = &tt_global_entry->orig_list; + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + orig_entry->orig_node->tt_poss_change = true; + + send_roam_adv(bat_priv, tt_global_entry->common.addr, + orig_entry->orig_node); + } + rcu_read_unlock(); + /* The global entry has to be marked as ROAMING and + * has to be kept for consistency purpose + */ tt_global_entry->common.flags |= TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; - send_roam_adv(bat_priv, tt_global_entry->common.addr, - tt_global_entry->orig_node); } out: if (tt_local_entry) @@ -490,33 +517,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_changes_list_lock); } +/* find out if an orig_node is already in the list of a tt_global_entry. + * returns 1 if found, 0 otherwise + */ +static bool tt_global_entry_has_orig(const struct tt_global_entry *entry, + const struct orig_node *orig_node) +{ + struct tt_orig_list_entry *tmp_orig_entry; + const struct hlist_head *head; + struct hlist_node *node; + bool found = false; + + rcu_read_lock(); + head = &entry->orig_list; + hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { + if (tmp_orig_entry->orig_node == orig_node) { + found = true; + break; + } + } + rcu_read_unlock(); + return found; +} + +static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + int ttvn) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); + if (!orig_entry) + return; + + INIT_HLIST_NODE(&orig_entry->list); + atomic_inc(&orig_node->refcount); + atomic_inc(&orig_node->tt_size); + orig_entry->orig_node = orig_node; + orig_entry->ttvn = ttvn; + + spin_lock_bh(&tt_global_entry->list_lock); + hlist_add_head_rcu(&orig_entry->list, + &tt_global_entry->orig_list); + spin_unlock_bh(&tt_global_entry->list_lock); +} + /* caller must hold orig_node refcount */ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_addr, uint8_t ttvn, bool roaming, bool wifi) { - struct tt_global_entry *tt_global_entry; - struct orig_node *orig_node_tmp; + struct tt_global_entry *tt_global_entry = NULL; int ret = 0; int hash_added; tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); if (!tt_global_entry) { - tt_global_entry = - kmalloc(sizeof(*tt_global_entry), - GFP_ATOMIC); + tt_global_entry = kzalloc(sizeof(*tt_global_entry), + GFP_ATOMIC); if (!tt_global_entry) goto out; memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); + tt_global_entry->common.flags = NO_FLAGS; - atomic_set(&tt_global_entry->common.refcount, 2); - /* Assign the new orig_node */ - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - tt_global_entry->ttvn = ttvn; tt_global_entry->roam_at = 0; + atomic_set(&tt_global_entry->common.refcount, 2); + + INIT_HLIST_HEAD(&tt_global_entry->orig_list); + spin_lock_init(&tt_global_entry->list_lock); hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, choose_orig, &tt_global_entry->common, @@ -527,19 +597,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, tt_global_entry_free_ref(tt_global_entry); goto out_remove; } - atomic_inc(&orig_node->tt_size); + + tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn); } else { - if (tt_global_entry->orig_node != orig_node) { - atomic_dec(&tt_global_entry->orig_node->tt_size); - orig_node_tmp = tt_global_entry->orig_node; - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - orig_node_free_ref(orig_node_tmp); - atomic_inc(&orig_node->tt_size); + /* there is already a global entry, use this one. */ + + /* If there is the TT_CLIENT_ROAM flag set, there is only one + * originator left in the list and we previously received a + * delete + roaming change for this originator. + * + * We should first delete the old originator before adding the + * new one. + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) { + tt_global_del_orig_list(tt_global_entry); + tt_global_entry->common.flags &= ~TT_CLIENT_ROAM; + tt_global_entry->roam_at = 0; } - tt_global_entry->common.flags = NO_FLAGS; - tt_global_entry->ttvn = ttvn; - tt_global_entry->roam_at = 0; + + if (!tt_global_entry_has_orig(tt_global_entry, orig_node)) + tt_global_add_orig_entry(tt_global_entry, orig_node, + ttvn); } if (wifi) @@ -560,6 +638,34 @@ out: return ret; } +/* print all orig nodes who announce the address for this global entry. + * it is assumed that the caller holds rcu_read_lock(); + */ +static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, + struct seq_file *seq) +{ + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + struct tt_common_entry *tt_common_entry; + uint16_t flags; + uint8_t last_ttvn; + + tt_common_entry = &tt_global_entry->common; + + head = &tt_global_entry->orig_list; + + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + flags = tt_common_entry->flags; + last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); + seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", + tt_global_entry->common.addr, orig_entry->ttvn, + orig_entry->orig_node->orig, last_ttvn, + (flags & TT_CLIENT_ROAM ? 'R' : '.'), + (flags & TT_CLIENT_WIFI ? 'W' : '.')); + } +} + int tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; @@ -603,18 +709,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - seq_printf(seq, - " * %pM (%3u) via %pM (%3u) [%c%c]\n", - tt_global_entry->common.addr, - tt_global_entry->ttvn, - tt_global_entry->orig_node->orig, - (uint8_t) atomic_read( - &tt_global_entry->orig_node-> - last_ttvn), - (tt_global_entry->common.flags & - TT_CLIENT_ROAM ? 'R' : '.'), - (tt_global_entry->common.flags & - TT_CLIENT_WIFI ? 'W' : '.')); + tt_global_print_entry(tt_global_entry, seq); } rcu_read_unlock(); } @@ -624,59 +719,150 @@ out: return ret; } -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message) +/* deletes the orig list of a tt_global_entry */ +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry) { - if (!tt_global_entry) - goto out; + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; - bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", - tt_global_entry->common.addr, tt_global_entry->orig_node->orig, - message); + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + spin_unlock_bh(&tt_global_entry->list_lock); - atomic_dec(&tt_global_entry->orig_node->tt_size); +} + +static void tt_global_del_orig_entry(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; + + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + if (orig_entry->orig_node == orig_node) { + bat_dbg(DBG_TT, bat_priv, + "Deleting %pM from global tt entry %pM: %s\n", + orig_node->orig, tt_global_entry->common.addr, + message); + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + } + spin_unlock_bh(&tt_global_entry->list_lock); +} + +static void tt_global_del_struct(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + const char *message) +{ + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry %pM: %s\n", + tt_global_entry->common.addr, message); hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, tt_global_entry->common.addr); -out: - if (tt_global_entry) - tt_global_entry_free_ref(tt_global_entry); + tt_global_entry_free_ref(tt_global_entry); + } -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming) +/* If the client is to be deleted, we check if it is the last origantor entry + * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer, + * otherwise we simply remove the originator scheduled for deletion. + */ +static void tt_global_del_roaming(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + bool last_entry = true; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + + /* no local entry exists, case 1: + * Check if this is the last one or if other entries exist. + */ + + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + if (orig_entry->orig_node != orig_node) { + last_entry = false; + break; + } + } + rcu_read_unlock(); + + if (last_entry) { + /* its the last one, mark for roaming. */ + tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + } else + /* there is another entry, we can simply delete this + * one and can still use the other one. + */ + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); +} + + + +static void tt_global_del(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *addr, + const char *message, bool roaming) { struct tt_global_entry *tt_global_entry = NULL; struct tt_local_entry *tt_local_entry = NULL; tt_global_entry = tt_global_hash_find(bat_priv, addr); - if (!tt_global_entry || tt_global_entry->orig_node != orig_node) + if (!tt_global_entry) goto out; - if (!roaming) - goto out_del; + if (!roaming) { + tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node, + message); + + if (hlist_empty(&tt_global_entry->orig_list)) + tt_global_del_struct(bat_priv, tt_global_entry, + message); + + goto out; + } /* if we are deleting a global entry due to a roam * event, there are two possibilities: - * 1) the client roamed from node A to node B => we mark + * 1) the client roamed from node A to node B => if there + * is only one originator left for this client, we mark * it with TT_CLIENT_ROAM, we start a timer and we * wait for node B to claim it. In case of timeout * the entry is purged. + * + * If there are other originators left, we directly delete + * the originator. * 2) the client roamed to us => we can directly delete * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, tt_global_entry->common.addr); - if (!tt_local_entry) { - tt_global_entry->common.flags |= TT_CLIENT_ROAM; - tt_global_entry->roam_at = jiffies; - goto out; - } + if (tt_local_entry) { + /* local entry exists, case 2: client roamed to us. */ + tt_global_del_orig_list(tt_global_entry); + tt_global_del_struct(bat_priv, tt_global_entry, message); + } else + /* no local entry exists, case 1: check for roaming */ + tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, + message); -out_del: - _tt_global_del(bat_priv, tt_global_entry, message); out: if (tt_global_entry) @@ -709,11 +895,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv, tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node == orig_node) { + + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); + + if (hlist_empty(&tt_global_entry->orig_list)) { bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", + "Deleting global tt entry %pM: %s\n", tt_global_entry->common.addr, - tt_global_entry->orig_node->orig, message); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); @@ -754,7 +943,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv) bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry (%pM): Roaming timeout\n", tt_global_entry->common.addr); - atomic_dec(&tt_global_entry->orig_node->tt_size); + hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@ -817,6 +1006,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + int best_tq; if (src && atomic_read(&bat_priv->ap_isolation)) { tt_local_entry = tt_local_hash_find(bat_priv, src); @@ -833,11 +1027,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; - if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) - goto out; + best_tq = 0; - orig_node = tt_global_entry->orig_node; + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + router = orig_node_get_router(orig_entry->orig_node); + if (!router) + continue; + if (router->tq_avg > best_tq) { + orig_node = orig_entry->orig_node; + best_tq = router->tq_avg; + } + neigh_node_free_ref(router); + } + /* found anything? */ + if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) + orig_node = NULL; + rcu_read_unlock(); out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); @@ -848,7 +1056,8 @@ out: } /* Calculates the checksum of the local table of a given orig_node */ -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) +static uint16_t tt_global_crc(struct bat_priv *bat_priv, + struct orig_node *orig_node) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_global_hash; @@ -868,20 +1077,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (compare_eth(tt_global_entry->orig_node, - orig_node)) { - /* Roaming clients are in the global table for - * consistency only. They don't have to be - * taken into account while computing the - * global crc */ - if (tt_common_entry->flags & TT_CLIENT_ROAM) - continue; - total_one = 0; - for (j = 0; j < ETH_ALEN; j++) - total_one = crc16_byte(total_one, - tt_common_entry->addr[j]); - total ^= total_one; - } + /* Roaming clients are in the global table for + * consistency only. They don't have to be + * taken into account while computing the + * global crc + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) + continue; + + /* find out if this global entry is announced by this + * originator + */ + if (!tt_global_entry_has_orig(tt_global_entry, + orig_node)) + continue; + + total_one = 0; + for (j = 0; j < ETH_ALEN; j++) + total_one = crc16_byte(total_one, + tt_global_entry->common.addr[j]); + total ^= total_one; } rcu_read_unlock(); } @@ -936,8 +1151,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_req_list_lock); } -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes) +static void tt_save_orig_buffer(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *tt_buff, + uint8_t tt_num_changes) { uint16_t tt_buff_len = tt_len(tt_num_changes); @@ -1020,7 +1237,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - return (tt_global_entry->orig_node == orig_node); + return tt_global_entry_has_orig(tt_global_entry, orig_node); } static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, @@ -1124,7 +1341,7 @@ static int send_tt_request(struct bat_priv *bat_priv, memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); tt_request->header.ttl = TTL; tt_request->ttvn = ttvn; - tt_request->tt_data = tt_crc; + tt_request->tt_data = htons(tt_crc); tt_request->flags = TT_REQUEST; if (full_table) @@ -1401,10 +1618,15 @@ out: bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request) { - if (is_my_mac(tt_request->dst)) + if (is_my_mac(tt_request->dst)) { + /* don't answer backbone gws! */ + if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) + return true; + return send_my_tt_response(bat_priv, tt_request); - else + } else { return send_other_tt_response(bat_priv, tt_request); + } } static void _tt_update_changes(struct bat_priv *bat_priv, @@ -1508,6 +1730,10 @@ void handle_tt_response(struct bat_priv *bat_priv, tt_response->src, tt_response->ttvn, tt_response->tt_data, (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); + /* we should have never asked a backbone gw */ + if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) + goto out; + orig_node = orig_hash_find(bat_priv, tt_response->src); if (!orig_node) goto out; @@ -1627,8 +1853,8 @@ unlock: return ret; } -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node) +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node) { struct neigh_node *neigh_node = NULL; struct sk_buff *skb = NULL; @@ -1796,6 +2022,8 @@ void tt_commit_changes(struct bat_priv *bat_priv) /* Increment the TTVN only once per OGM interval */ atomic_inc(&bat_priv->ttvn); + bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", + (uint8_t)atomic_read(&bat_priv->ttvn)); bat_priv->tt_poss_change = false; } @@ -1836,6 +2064,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); bool full_table = true; + /* don't care about a backbone gateways updates. */ + if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + return; + /* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes */ if ((!orig_node->tt_initialised && ttvn == 1) || @@ -1873,6 +2105,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, } else { /* if we missed more than one change or our tables are not * in sync anymore -> request fresh tt data */ + if (!orig_node->tt_initialised || ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { request_table: @@ -1886,3 +2119,22 @@ request_table: } } } + +/* returns true whether we know that the client has moved from its old + * originator to another one. This entry is kept is still kept for consistency + * purposes + */ +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) +{ + struct tt_global_entry *tt_global_entry; + bool ret = false; + + tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (!tt_global_entry) + goto out; + + ret = tt_global_entry->common.flags & TT_CLIENT_ROAM; + tt_global_entry_free_ref(tt_global_entry); +out: + return ret; +} diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index c753633b1da..c43374dc364 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -39,27 +39,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, int tt_global_seq_print_text(struct seq_file *seq, void *offset); void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message); -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming); struct orig_node *transtable_search(struct bat_priv *bat_priv, const uint8_t *src, const uint8_t *addr); -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes); uint16_t tt_local_crc(struct bat_priv *bat_priv); -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); void tt_free(struct bat_priv *bat_priv); bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request); bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); void handle_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_response); -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node); void tt_commit_changes(struct bat_priv *bat_priv); bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_buff, uint8_t tt_num_changes, uint8_t ttvn, uint16_t tt_crc); +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 302efb52347..61308e8016f 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -27,7 +27,7 @@ #include "packet.h" #include "bitarray.h" -#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ +#define BAT_HEADER_LEN (ETH_HLEN + \ ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ sizeof(struct unicast_packet) : \ sizeof(struct bcast_packet)))) @@ -52,7 +52,7 @@ struct hard_iface { /** * orig_node - structure for orig_list maintaining nodes of mesh * @primary_addr: hosts primary interface address - * @last_valid: when last packet from this node was received + * @last_seen: when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset * @batman_seqno_reset: time when the batman seqno window was reset * @gw_flags: flags related to gateway class @@ -70,7 +70,7 @@ struct orig_node { struct neigh_node __rcu *router; /* rcu protected pointer */ unsigned long *bcast_own; uint8_t *bcast_own_sum; - unsigned long last_valid; + unsigned long last_seen; unsigned long bcast_seqno_reset; unsigned long batman_seqno_reset; uint8_t gw_flags; @@ -90,7 +90,7 @@ struct orig_node { bool tt_poss_change; uint32_t last_real_seqno; uint8_t last_ttl; - unsigned long bcast_bits[NUM_WORDS]; + DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE); uint32_t last_bcast_seqno; struct hlist_head neigh_list; struct list_head frag_list; @@ -120,7 +120,7 @@ struct gw_node { /** * neigh_node - * @last_valid: when last packet via this neighbor was received + * @last_seen: when last packet via this neighbor was received */ struct neigh_node { struct hlist_node list; @@ -131,15 +131,22 @@ struct neigh_node { uint8_t tq_avg; uint8_t last_ttl; struct list_head bonding_list; - unsigned long last_valid; - unsigned long real_bits[NUM_WORDS]; + unsigned long last_seen; + DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); atomic_t refcount; struct rcu_head rcu; struct orig_node *orig_node; struct hard_iface *if_incoming; - spinlock_t tq_lock; /* protects: tq_recv, tq_index */ + spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ }; +#ifdef CONFIG_BATMAN_ADV_BLA +struct bcast_duplist_entry { + uint8_t orig[ETH_ALEN]; + uint16_t crc; + unsigned long entrytime; +}; +#endif struct bat_priv { atomic_t mesh_state; @@ -148,6 +155,7 @@ struct bat_priv { atomic_t bonding; /* boolean */ atomic_t fragmentation; /* boolean */ atomic_t ap_isolation; /* boolean */ + atomic_t bridge_loop_avoidance; /* boolean */ atomic_t vis_mode; /* VIS_TYPE_* */ atomic_t gw_mode; /* GW_MODE_* */ atomic_t gw_sel_class; /* uint */ @@ -161,6 +169,7 @@ struct bat_priv { atomic_t ttvn; /* translation table version number */ atomic_t tt_ogm_append_cnt; atomic_t tt_local_changes; /* changes registered in a OGM interval */ + atomic_t bla_num_requests; /* number of bla requests in flight */ /* The tt_poss_change flag is used to detect an ongoing roaming phase. * If true, then I received a Roaming_adv and I have to inspect every * packet directed to me to check whether I am still the true @@ -174,15 +183,23 @@ struct bat_priv { struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; struct hlist_head gw_list; - struct hlist_head softif_neigh_vids; struct list_head tt_changes_list; /* tracks changes in a OGM int */ struct list_head vis_send_list; struct hashtable_t *orig_hash; struct hashtable_t *tt_local_hash; struct hashtable_t *tt_global_hash; +#ifdef CONFIG_BATMAN_ADV_BLA + struct hashtable_t *claim_hash; + struct hashtable_t *backbone_hash; +#endif struct list_head tt_req_list; /* list of pending tt_requests */ struct list_head tt_roam_list; struct hashtable_t *vis_hash; +#ifdef CONFIG_BATMAN_ADV_BLA + struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; + int bcast_duplist_curr; + struct bla_claim_dst claim_dest; +#endif spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects */ spinlock_t tt_changes_list_lock; /* protects tt_changes */ @@ -191,8 +208,6 @@ struct bat_priv { spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ spinlock_t vis_hash_lock; /* protects vis_hash */ spinlock_t vis_list_lock; /* protects vis_info::recv_list */ - spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ - spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ atomic_t num_local_tt; /* Checksum of the local table, recomputed before sending a new OGM */ atomic_t tt_crc; @@ -202,6 +217,7 @@ struct bat_priv { struct delayed_work tt_work; struct delayed_work orig_work; struct delayed_work vis_work; + struct delayed_work bla_work; struct gw_node __rcu *curr_gw; /* rcu protected pointer */ atomic_t gw_reselect; struct hard_iface __rcu *primary_if; /* rcu protected pointer */ @@ -239,10 +255,41 @@ struct tt_local_entry { struct tt_global_entry { struct tt_common_entry common; + struct hlist_head orig_list; + spinlock_t list_lock; /* protects the list */ + unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ +}; + +struct tt_orig_list_entry { struct orig_node *orig_node; uint8_t ttvn; - unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ + struct rcu_head rcu; + struct hlist_node list; +}; + +#ifdef CONFIG_BATMAN_ADV_BLA +struct backbone_gw { + uint8_t orig[ETH_ALEN]; + short vid; /* used VLAN ID */ + struct hlist_node hash_entry; + struct bat_priv *bat_priv; + unsigned long lasttime; /* last time we heard of this backbone gw */ + atomic_t request_sent; + atomic_t refcount; + struct rcu_head rcu; + uint16_t crc; /* crc checksum over all claims */ +}; + +struct claim { + uint8_t addr[ETH_ALEN]; + short vid; + struct backbone_gw *backbone_gw; + unsigned long lasttime; /* last time we heard of claim (locals only) */ + struct rcu_head rcu; + atomic_t refcount; + struct hlist_node hash_entry; }; +#endif struct tt_change_node { struct list_head list; @@ -327,41 +374,24 @@ struct recvlist_node { uint8_t mac[ETH_ALEN]; }; -struct softif_neigh_vid { - struct hlist_node list; - struct bat_priv *bat_priv; - short vid; - atomic_t refcount; - struct softif_neigh __rcu *softif_neigh; - struct rcu_head rcu; - struct hlist_head softif_neigh_list; -}; - -struct softif_neigh { - struct hlist_node list; - uint8_t addr[ETH_ALEN]; - unsigned long last_seen; - atomic_t refcount; - struct rcu_head rcu; -}; - struct bat_algo_ops { struct hlist_node list; char *name; - /* init OGM when hard-interface is enabled */ - void (*bat_ogm_init)(struct hard_iface *hard_iface); - /* init primary OGM when primary interface is selected */ - void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); - /* init mac addresses of the OGM belonging to this hard-interface */ - void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); + /* init routing info when hard-interface is enabled */ + int (*bat_iface_enable)(struct hard_iface *hard_iface); + /* de-init routing info when hard-interface is disabled */ + void (*bat_iface_disable)(struct hard_iface *hard_iface); + /* (re-)init mac addresses of the protocol information + * belonging to this hard-interface + */ + void (*bat_iface_update_mac)(struct hard_iface *hard_iface); + /* called when primary interface is selected / changed */ + void (*bat_primary_iface_set)(struct hard_iface *hard_iface); /* prepare a new outgoing OGM for the send queue */ void (*bat_ogm_schedule)(struct hard_iface *hard_iface, int tt_num_changes); /* send scheduled OGM */ void (*bat_ogm_emit)(struct forw_packet *forw_packet); - /* receive incoming OGM */ - void (*bat_ogm_receive)(struct hard_iface *if_incoming, - struct sk_buff *skb); }; #endif /* _NET_BATMAN_ADV_TYPES_H_ */ diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 676f6a626b2..74175c21085 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -331,6 +331,14 @@ find_router: unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + /* inform the destination node that we are still missing a correct route + * for this client. The destination will receive this packet and will + * try to reroute it because the ttvn contained in the header is less + * than the current one + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + unicast_packet->ttvn = unicast_packet->ttvn - 1; + if (atomic_read(&bat_priv->fragmentation) && data_len + sizeof(*unicast_packet) > neigh_node->if_incoming->net_dev->mtu) { diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c4a5b8cafad..cec216fb77c 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, return NULL; info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + - sizeof(struct ethhdr)); + ETH_HLEN); if (!info->skb_packet) { kfree(info); return NULL; } - skb_reserve(info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) + vis_info_len); @@ -894,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv) bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + MAX_VIS_PACKET_SIZE + - sizeof(struct ethhdr)); + ETH_HLEN); if (!bat_priv->my_vis_info->skb_packet) goto free_info; - skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, sizeof(*packet)); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index a779ec70332..88884d1d95f 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst) BT_DBG(""); list_for_each_entry(s, &bnep_session_list, list) - if (!compare_ether_addr(dst, s->eh.h_source)) + if (ether_addr_equal(dst, s->eh.h_source)) return s; return NULL; @@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) iv[il++] = (struct kvec) { &type, 1 }; len++; - if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source)) + if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source)) type |= 0x01; - if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest)) + if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest)) type |= 0x02; if (type) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ba829de8442..929e48aed44 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -170,7 +170,7 @@ static int br_set_mac_address(struct net_device *dev, void *p) return -EADDRNOTAVAIL; spin_lock_bh(&br->lock); - if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { + if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); br_fdb_change_mac_address(br, addr->sa_data); @@ -317,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = { .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, + .ndo_fdb_add = br_fdb_add, + .ndo_fdb_del = br_fdb_delete, + .ndo_fdb_dump = br_fdb_dump, }; static void br_dev_free(struct net_device *dev) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5ba0c844d50..d21f3238351 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto insert; } @@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto skip_delete; } @@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) { + if (ether_addr_equal(fdb->addr.addr, addr)) { if (unlikely(has_expired(br, fdb))) break; return fdb; @@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; @@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; @@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; ndm->ndm_state = fdb_to_nud(fdb); - NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); - + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) + goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ci.ndm_refcnt = 0; - NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -535,44 +535,38 @@ errout: } /* Dump information about entries, in response to GETNEIGH */ -int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) { - struct net *net = sock_net(skb->sk); - struct net_device *dev; - int idx = 0; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - struct net_bridge *br = netdev_priv(dev); - int i; + struct net_bridge *br = netdev_priv(dev); + int i; - if (!(dev->priv_flags & IFF_EBRIDGE)) - continue; + if (!(dev->priv_flags & IFF_EBRIDGE)) + goto out; - for (i = 0; i < BR_HASH_SIZE; i++) { - struct hlist_node *h; - struct net_bridge_fdb_entry *f; - - hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { - if (idx < cb->args[0]) - goto skip; + for (i = 0; i < BR_HASH_SIZE; i++) { + struct hlist_node *h; + struct net_bridge_fdb_entry *f; - if (fdb_fill_info(skb, br, f, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI) < 0) - break; + hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { + if (idx < cb->args[0]) + goto skip; + + if (fdb_fill_info(skb, br, f, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI) < 0) + break; skip: - ++idx; - } + ++idx; } } - rcu_read_unlock(); - cb->args[0] = idx; - - return skb->len; +out: + return idx; } /* Update (create or replace) forwarding database entry */ @@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, } /* Add new permanent fdb entry with RTM_NEWNEIGH */ -int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 nlh_flags) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; - struct nlattr *tb[NDA_MAX+1]; - struct net_device *dev; struct net_bridge_port *p; - const __u8 *addr; - int err; - - ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); - if (err < 0) - return err; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { - pr_info("bridge: RTM_NEWNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(tb[NDA_LLADDR]); - if (!is_valid_ether_addr(addr)) { - pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); - return -EINVAL; - } + int err = 0; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); @@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) rcu_read_unlock(); } else { spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags); spin_unlock_bh(&p->br->hash_lock); } return err; } -static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) +static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr) { struct net_bridge *br = p->br; struct hlist_head *head = &br->hash[br_mac_hash(addr)]; @@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) } /* Remove neighbor entry with RTM_DELNEIGH */ -int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; struct net_bridge_port *p; - struct nlattr *llattr; - const __u8 *addr; - struct net_device *dev; int err; - ASSERT_RTNL(); - if (nlmsg_len(nlh) < sizeof(*ndm)) - return -EINVAL; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); - if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { - pr_info("bridge: RTM_DELNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(llattr); - p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index a2098e3de50..e9466d41270 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p, p->state == BR_STATE_FORWARDING); } -static inline unsigned packet_length(const struct sk_buff *skb) +static inline unsigned int packet_length(const struct sk_buff *skb) { return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5a31731be4d..76f15fda021 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -216,7 +216,7 @@ forward: } /* fall through */ case BR_STATE_LEARNING: - if (!compare_ether_addr(p->br->dev->dev_addr, dest)) + if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 27ca25ed702..b66581208cb 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -36,6 +36,8 @@ #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) +static void br_multicast_start_querier(struct net_bridge *br); + #if IS_ENABLED(CONFIG_IPV6) static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { @@ -458,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, hopopt[3] = 2; /* Length of RA Option */ hopopt[4] = 0; /* Type = 0x0000 (MLD) */ hopopt[5] = 0; - hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ - hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ + hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ + hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ skb_put(skb, sizeof(*ip6h) + 8); @@ -512,8 +514,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; - unsigned count = 0; - unsigned max; + unsigned int count = 0; + unsigned int max; int elasticity; int err; @@ -740,6 +742,20 @@ static void br_multicast_local_router_expired(unsigned long data) { } +static void br_multicast_querier_expired(unsigned long data) +{ + struct net_bridge *br = (void *)data; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || br->multicast_disabled) + goto out; + + br_multicast_start_querier(br); + +out: + spin_unlock(&br->multicast_lock); +} + static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) @@ -766,6 +782,7 @@ static void br_multicast_send_query(struct net_bridge *br, struct br_ip br_group; if (!netif_running(br->dev) || br->multicast_disabled || + !br->multicast_querier || timer_pending(&br->multicast_querier_timer)) return; @@ -1281,8 +1298,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, struct sk_buff *skb2 = skb; const struct iphdr *iph; struct igmphdr *ih; - unsigned len; - unsigned offset; + unsigned int len; + unsigned int offset; int err; /* We treat OOM as packet loss for now. */ @@ -1382,7 +1399,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, u8 icmp6_type; u8 nexthdr; __be16 frag_off; - unsigned len; + unsigned int len; int offset; int err; @@ -1548,6 +1565,7 @@ void br_multicast_init(struct net_bridge *br) br->hash_max = 512; br->multicast_router = 1; + br->multicast_querier = 0; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; @@ -1562,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br) setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, - br_multicast_local_router_expired, 0); + br_multicast_querier_expired, (unsigned long)br); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); } @@ -1689,9 +1707,23 @@ unlock: return err; } -int br_multicast_toggle(struct net_bridge *br, unsigned long val) +static void br_multicast_start_querier(struct net_bridge *br) { struct net_bridge_port *port; + + br_multicast_open(br); + + list_for_each_entry(port, &br->port_list, list) { + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) + continue; + + __br_multicast_enable_port(port); + } +} + +int br_multicast_toggle(struct net_bridge *br, unsigned long val) +{ int err = 0; struct net_bridge_mdb_htable *mdb; @@ -1721,14 +1753,7 @@ rollback: goto rollback; } - br_multicast_open(br); - list_for_each_entry(port, &br->port_list, list) { - if (port->state == BR_STATE_DISABLED || - port->state == BR_STATE_BLOCKING) - continue; - - __br_multicast_enable_port(port); - } + br_multicast_start_querier(br); unlock: spin_unlock_bh(&br->multicast_lock); @@ -1736,6 +1761,24 @@ unlock: return err; } +int br_multicast_set_querier(struct net_bridge *br, unsigned long val) +{ + val = !!val; + + spin_lock_bh(&br->multicast_lock); + if (br->multicast_querier == val) + goto unlock; + + br->multicast_querier = val; + if (val) + br_multicast_start_querier(br); + +unlock: + spin_unlock_bh(&br->multicast_lock); + + return 0; +} + int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index d7f49b63ab0..e41456bd3cc 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -54,12 +54,14 @@ static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; +static int brnf_pass_vlan_indev __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 +#define brnf_pass_vlan_indev 0 #endif #define IS_IP(skb) \ @@ -503,6 +505,19 @@ bridged_dnat: return 0; } +static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev) +{ + struct net_device *vlan, *br; + + br = bridge_parent(dev); + if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) + return br; + + vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); + + return vlan ? vlan : br; +} + /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { @@ -515,7 +530,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb) nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; - skb->dev = bridge_parent(skb->dev); + skb->dev = brnf_get_logical_dev(skb, skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) @@ -543,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb) int optlen = nh[off + 1] + 2; switch (nh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; @@ -774,7 +789,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, else skb->protocol = htons(ETH_P_IPV6); - NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, + NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent, br_nf_forward_finish); return NF_STOLEN; @@ -1002,12 +1017,13 @@ static ctl_table brnf_table[] = { .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, - { } -}; - -static struct ctl_path brnf_path[] = { - { .procname = "net", }, - { .procname = "bridge", }, + { + .procname = "bridge-nf-pass-vlan-input-dev", + .data = &brnf_pass_vlan_indev, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = brnf_sysctl_call_tables, + }, { } }; #endif @@ -1026,7 +1042,7 @@ int __init br_netfilter_init(void) return ret; } #ifdef CONFIG_SYSCTL - brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); + brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); @@ -1043,7 +1059,7 @@ void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL - unregister_sysctl_table(brnf_sysctl_header); + unregister_net_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a1daf8227ed..2080485515f 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); - - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - - if (event == RTM_NEWLINK) - NLA_PUT_U8(skb, IFLA_PROTINFO, port->state); - + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u8(skb, IFLA_OPERSTATE, operstate) || + (dev->addr_len && + nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink)) || + (event == RTM_NEWLINK && + nla_put_u8(skb, IFLA_PROTINFO, port->state))) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -91,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) int err = -ENOBUFS; br_debug(port->br, "port %u(%s) event %d\n", - (unsigned)port->port_no, port->dev->name, event); + (unsigned int)port->port_no, port->dev->name, event); skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) @@ -235,18 +232,6 @@ int __init br_netlink_init(void) br_rtm_setlink, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, - br_fdb_add, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, - br_fdb_delete, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, - NULL, br_fdb_dump, NULL); - if (err) - goto err3; return 0; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e1d88225787..1a8ad4fb9a6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -224,6 +224,7 @@ struct net_bridge unsigned char multicast_router; u8 multicast_disabled:1; + u8 multicast_querier:1; u32 hash_elasticity; u32 hash_max; @@ -359,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br, extern void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); -extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); + +extern int br_fdb_delete(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr); +extern int br_fdb_add(struct ndmsg *nlh, + struct net_device *dev, + unsigned char *addr, + u16 nlh_flags); +extern int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx); /* br_forward.c */ extern void br_deliver(const struct net_bridge_port *to, @@ -417,6 +427,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val); extern int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val); extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); +extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val); extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); static inline bool br_multicast_is_router(struct net_bridge *br) diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h index 05ed9bc7e42..0c0fe36e7aa 100644 --- a/net/bridge/br_private_stp.h +++ b/net/bridge/br_private_stp.h @@ -29,10 +29,9 @@ #define BR_MIN_PATH_COST 1 #define BR_MAX_PATH_COST 65535 -struct br_config_bpdu -{ - unsigned topology_change:1; - unsigned topology_change_ack:1; +struct br_config_bpdu { + unsigned int topology_change:1; + unsigned int topology_change_ack:1; bridge_id root; int root_path_cost; bridge_id bridge_id; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 8c836d96ba7..af9a12099ba 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = { void br_log_state(const struct net_bridge_port *p) { br_info(p->br, "port %u(%s) entered %s state\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, br_port_state_names[p->state]); } @@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p) { if (br_is_designated_port(p)) { br_info(p->br, "port %u(%s) received tcn bpdu\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); br_topology_change_detection(p->br); br_topology_change_acknowledge(p); diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index e16aade51ae..fd30a6022de 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, if (p->state == BR_STATE_DISABLED) goto out; - if (compare_ether_addr(dest, br->group_addr) != 0) + if (!ether_addr_equal(dest, br->group_addr)) goto out; buf = skb_pull(skb, 3); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index f494496373d..9d5a414a394 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val) /* called under bridge lock */ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) { - /* should be aligned on 2 bytes for compare_ether_addr() */ + /* should be aligned on 2 bytes for ether_addr_equal() */ unsigned short oldaddr_aligned[ETH_ALEN >> 1]; unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; struct net_bridge_port *p; @@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) memcpy(br->dev->dev_addr, addr, ETH_ALEN); list_for_each_entry(p, &br->port_list, list) { - if (!compare_ether_addr(p->designated_bridge.addr, oldaddr)) + if (ether_addr_equal(p->designated_bridge.addr, oldaddr)) memcpy(p->designated_bridge.addr, addr, ETH_ALEN); - if (!compare_ether_addr(p->designated_root.addr, oldaddr)) + if (ether_addr_equal(p->designated_root.addr, oldaddr)) memcpy(p->designated_root.addr, addr, ETH_ALEN); - } br_configuration_update(br); @@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) br_become_root_bridge(br); } -/* should be aligned on 2 bytes for compare_ether_addr() */ +/* should be aligned on 2 bytes for ether_addr_equal() */ static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; /* called under bridge lock */ @@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br) } - if (compare_ether_addr(br->bridge_id.addr, addr) == 0) + if (ether_addr_equal(br->bridge_id.addr, addr)) return false; /* no change */ br_stp_change_bridge_id(br, addr); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 58de2a0f997..a6747e67342 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg) return; br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, id->prio[0], id->prio[1], &id->addr); /* @@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg) struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { p->state = BR_STATE_LEARNING; @@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg) struct net_bridge_port *p = (struct net_bridge_port *) arg; br_debug(p->br, "port %u(%s) hold timer expired\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c236c0e4398..c5c059333ea 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); - unsigned new_addr[6]; + unsigned int new_addr[6]; int i; if (!capable(CAP_NET_ADMIN)) @@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d, static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, show_multicast_snooping, store_multicast_snooping); +static ssize_t show_multicast_querier(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", br->multicast_querier); +} + +static ssize_t store_multicast_querier(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_querier); +} +static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR, + show_multicast_querier, store_multicast_querier); + static ssize_t show_hash_elasticity(struct device *d, struct device_attribute *attr, char *buf) { @@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = { #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &dev_attr_multicast_router.attr, &dev_attr_multicast_snooping.attr, + &dev_attr_multicast_querier.attr, &dev_attr_hash_elasticity.attr, &dev_attr_hash_max.attr, &dev_attr_multicast_last_member_count.attr, diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 5b33a2e634a..071d87214dd 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c @@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par) !(info->bitmask & EBT_STP_MASK)) return -EINVAL; /* Make sure the match only receives stp frames */ - if (compare_ether_addr(e->destmac, bridge_ula) || - compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) + if (!ether_addr_equal(e->destmac, bridge_ula) || + !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) return -EINVAL; return 0; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 5016fa57b62..fb894435526 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -19,7 +19,7 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/caif/caif_socket.h> -#include <linux/atomic.h> +#include <linux/pkt_sched.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/caif/caif_layer.h> @@ -130,11 +130,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { - if (net_ratelimit()) - pr_debug("sending flow OFF (queue len = %d %d)\n", - atomic_read(&cf_sk->sk.sk_rmem_alloc), - sk_rcvbuf_lowwater(cf_sk)); + (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { + net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", + atomic_read(&cf_sk->sk.sk_rmem_alloc), + sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } @@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return err; if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); - if (net_ratelimit()) - pr_debug("sending flow OFF due to rmem_schedule\n"); + net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; @@ -505,6 +503,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); + cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); @@ -1062,6 +1061,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; + /* Initialize default priority for well-known cases */ + switch (protocol) { + case CAIFPROTO_AT: + sk->sk_priority = TC_PRIO_CONTROL; + break; + case CAIFPROTO_RFM: + sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; + break; + default: + sk->sk_priority = TC_PRIO_BESTEFFORT; + } + /* * Lock in order to try to stop someone from opening the socket * too early. @@ -1081,7 +1092,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 5cf52225692..047cd0eec02 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -9,6 +9,7 @@ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfctrl.h> @@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) cfctrl->serv.dev_info.id = physlinkid; cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); cfpkt_addbdy(pkt, physlinkid); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); dn->transmit(dn, pkt); } @@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer, * might arrive with the newly allocated channel ID. */ cfpkt_info(pkt)->dev_info->id = param->phyid; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); if (ret < 0) { @@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); cfpkt_addbdy(pkt, channelid); init_info(cfpkt_info(pkt), cfctrl); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); #ifndef CAIF_NO_LOOP diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index e335ba859b9..863dedd91bb 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) memcpy(skb2->data, split, len2nd); skb2->tail += len2nd; skb2->len += len2nd; + skb2->priority = skb->priority; return skb_to_pkt(skb2); } @@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); + +void cfpkt_set_prio(struct cfpkt *pkt, int prio) +{ + pkt_to_skb(pkt)->priority = prio; +} +EXPORT_SYMBOL(cfpkt_set_prio); diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 4aa33d4496b..dd485f6128e 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> @@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } case CAIF_MODEMCMD_FLOW_OFF_REQ: @@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } default: diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index d09340e1523..69771c04ba8 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -424,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) struct chnl_net *priv; u8 loop; priv = netdev_priv(dev); - NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); - NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); + if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id) || + nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id)) + goto nla_put_failure; loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; - NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); - - + if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop)) + goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; diff --git a/net/can/gw.c b/net/can/gw.c index 3d79b127881..b41acf25668 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-gw"); -HLIST_HEAD(cgw_list); +static HLIST_HEAD(cgw_list); static struct notifier_block notifier; static struct kmem_cache *cgw_cache __read_mostly; diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index e02da7a5c5a..f459e93b774 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h @@ -13,7 +13,7 @@ */ struct ceph_x_ticket_handler { struct rb_node node; - unsigned service; + unsigned int service; struct ceph_crypto_key session_key; struct ceph_timespec validity; @@ -27,7 +27,7 @@ struct ceph_x_ticket_handler { struct ceph_x_authorizer { struct ceph_buffer *buf; - unsigned service; + unsigned int service; u64 nonce; char reply_buf[128]; /* big enough for encrypted blob */ }; @@ -38,7 +38,7 @@ struct ceph_x_info { bool starting; u64 server_challenge; - unsigned have_keys; + unsigned int have_keys; struct rb_root ticket_handlers; struct ceph_x_authorizer auth_authorizer; diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index cc913193d99..a776f751edb 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id); * create a fresh client instance */ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, - unsigned supported_features, - unsigned required_features) + unsigned int supported_features, + unsigned int required_features) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 0a1b53bce76..67bb1f11e61 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -20,7 +20,7 @@ c = c - a; c = c - b; c = c ^ (b >> 15); \ } while (0) -unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) +unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) { const unsigned char *k = (const unsigned char *)str; __u32 a, b, c; /* the internal state */ @@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) /* * linux dcache hash */ -unsigned ceph_str_hash_linux(const char *str, unsigned length) +unsigned int ceph_str_hash_linux(const char *str, unsigned int length) { unsigned long hash = 0; unsigned char c; @@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length) } -unsigned ceph_str_hash(int type, const char *s, unsigned len) +unsigned int ceph_str_hash(int type, const char *s, unsigned int len) { switch (type) { case CEPH_STR_HASH_LINUX: diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index b79747c4b64..363f8f7e6c3 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -20,6 +20,7 @@ #include <linux/crush/crush.h> #include <linux/crush/hash.h> +#include <linux/crush/mapper.h> /* * Implement the core CRUSH mapping algorithm. @@ -68,8 +69,8 @@ int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) static int bucket_perm_choose(struct crush_bucket *bucket, int x, int r) { - unsigned pr = r % bucket->size; - unsigned i, s; + unsigned int pr = r % bucket->size; + unsigned int i, s; /* start a new permutation if @x has changed */ if (bucket->perm_x != x || bucket->perm_n == 0) { @@ -100,13 +101,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket, for (i = 0; i < bucket->perm_n; i++) dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); while (bucket->perm_n <= pr) { - unsigned p = bucket->perm_n; + unsigned int p = bucket->perm_n; /* no point in swapping the final entry */ if (p < bucket->size - 1) { i = crush_hash32_3(bucket->hash, x, bucket->id, p) % (bucket->size - p); if (i) { - unsigned t = bucket->perm[p + i]; + unsigned int t = bucket->perm[p + i]; bucket->perm[p + i] = bucket->perm[p]; bucket->perm[p] = t; } diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 27d4ea315d1..54b531a0112 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -94,9 +94,9 @@ static int monc_show(struct seq_file *s, void *p) mutex_lock(&monc->mutex); if (monc->have_mdsmap) - seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); + seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap); if (monc->have_osdmap) - seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); + seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap); if (monc->want_next_osdmap) seq_printf(s, "want next osdmap\n"); @@ -146,7 +146,7 @@ static int osdc_show(struct seq_file *s, void *pp) if (req->r_reassert_version.epoch) seq_printf(s, "\t%u'%llu", - (unsigned)le32_to_cpu(req->r_reassert_version.epoch), + (unsigned int)le32_to_cpu(req->r_reassert_version.epoch), le64_to_cpu(req->r_reassert_version.version)); else seq_printf(s, "\t"); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f0993af2ae4..36fa6bf6849 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -699,7 +699,7 @@ static int prepare_write_connect(struct ceph_messenger *msgr, struct ceph_connection *con, int include_banner) { - unsigned global_seq = get_global_seq(con->msgr, 0); + unsigned int global_seq = get_global_seq(con->msgr, 0); int proto; switch (con->peer_name.type) { @@ -816,7 +816,7 @@ static void iter_bio_next(struct bio **bio_iter, int *seg) static int write_partial_msg_pages(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; - unsigned data_len = le32_to_cpu(msg->hdr.data_len); + unsigned int data_len = le32_to_cpu(msg->hdr.data_len); size_t len; bool do_datacrc = !con->msgr->nocrc; int ret; @@ -1554,7 +1554,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, static int read_partial_message_pages(struct ceph_connection *con, struct page **pages, - unsigned data_len, bool do_datacrc) + unsigned int data_len, bool do_datacrc) { void *p; int ret; @@ -1587,7 +1587,7 @@ static int read_partial_message_pages(struct ceph_connection *con, #ifdef CONFIG_BLOCK static int read_partial_message_bio(struct ceph_connection *con, struct bio **bio_iter, int *bio_seg, - unsigned data_len, bool do_datacrc) + unsigned int data_len, bool do_datacrc) { struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); void *p; @@ -1629,7 +1629,7 @@ static int read_partial_message(struct ceph_connection *con) struct ceph_msg *m = con->in_msg; int ret; int to, left; - unsigned front_len, middle_len, data_len; + unsigned int front_len, middle_len, data_len; bool do_datacrc = !con->msgr->nocrc; int skip; u64 seq; @@ -2345,9 +2345,9 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) { mutex_lock(&con->mutex); if (con->in_msg && con->in_msg == msg) { - unsigned front_len = le32_to_cpu(con->in_hdr.front_len); - unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); - unsigned data_len = le32_to_cpu(con->in_hdr.data_len); + unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); + unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); + unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); /* skip rest of message */ dout("con_revoke_pages %p msg %p revoked\n", con, msg); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 1845cde2622..10d6008d31f 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -168,7 +168,7 @@ static bool __sub_expired(struct ceph_mon_client *monc) */ static void __schedule_delayed(struct ceph_mon_client *monc) { - unsigned delay; + unsigned int delay; if (monc->cur_mon < 0 || __sub_expired(monc)) delay = 10 * HZ; @@ -184,7 +184,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc) static void __send_subscribe(struct ceph_mon_client *monc) { dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", - (unsigned)monc->sub_sent, __sub_expired(monc), + (unsigned int)monc->sub_sent, __sub_expired(monc), monc->want_next_osdmap); if ((__sub_expired(monc) && !monc->sub_sent) || monc->want_next_osdmap == 1) { @@ -201,7 +201,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) if (monc->want_next_osdmap) { dout("__send_subscribe to 'osdmap' %u\n", - (unsigned)monc->have_osdmap); + (unsigned int)monc->have_osdmap); ceph_encode_string(&p, end, "osdmap", 6); i = p; i->have = cpu_to_le64(monc->have_osdmap); @@ -211,7 +211,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) } if (monc->want_mdsmap) { dout("__send_subscribe to 'mdsmap' %u+\n", - (unsigned)monc->have_mdsmap); + (unsigned int)monc->have_mdsmap); ceph_encode_string(&p, end, "mdsmap", 6); i = p; i->have = cpu_to_le64(monc->have_mdsmap); @@ -236,7 +236,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { - unsigned seconds; + unsigned int seconds; struct ceph_mon_subscribe_ack *h = msg->front.iov_base; if (msg->front.iov_len < sizeof(*h)) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 5e254055c91..1b0ef3c4d39 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1214,7 +1214,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, } if (!req->r_got_reply) { - unsigned bytes; + unsigned int bytes; req->r_result = le32_to_cpu(rhead->result); bytes = le32_to_cpu(msg->hdr.data_len); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 29ad46ec9dc..56e561a6900 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -38,7 +38,7 @@ done: /* maps */ -static int calc_bits_of(unsigned t) +static int calc_bits_of(unsigned int t) { int b = 0; while (t) { @@ -154,7 +154,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) magic = ceph_decode_32(p); if (magic != CRUSH_MAGIC) { pr_err("crush_decode magic %x != current %x\n", - (unsigned)magic, (unsigned)CRUSH_MAGIC); + (unsigned int)magic, (unsigned int)CRUSH_MAGIC); goto bad; } c->max_buckets = ceph_decode_32(p); @@ -460,7 +460,7 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) { - unsigned n, m; + unsigned int n, m; ceph_decode_copy(p, &pi->v, sizeof(pi->v)); calc_pg_masks(pi); @@ -970,7 +970,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, objsetno = stripeno / su_per_object; *ono = objsetno * sc + stripepos; - dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); + dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ t = off; @@ -998,12 +998,12 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, struct ceph_file_layout *fl, struct ceph_osdmap *osdmap) { - unsigned num, num_mask; + unsigned int num, num_mask; struct ceph_pg pgid; s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); int poolid = le32_to_cpu(fl->fl_pg_pool); struct ceph_pg_pool_info *pool; - unsigned ps; + unsigned int ps; BUG_ON(!osdmap); @@ -1045,7 +1045,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; - unsigned poolid, ps, pps, t; + unsigned int poolid, ps, pps, t; int preferred; poolid = le32_to_cpu(pgid.pool); diff --git a/net/compat.c b/net/compat.c index e055708b8ec..e240441a231 100644 --- a/net/compat.c +++ b/net/compat.c @@ -741,13 +741,13 @@ static unsigned char nas[21] = { }; #undef AL -asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags) + unsigned int vlen, unsigned int flags) { return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT); @@ -758,20 +758,20 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } -asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) +asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags) { return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, - unsigned flags, struct sockaddr __user *addr, + unsigned int flags, struct sockaddr __user *addr, int __user *addrlen) { return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); } asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags, + unsigned int vlen, unsigned int flags, struct compat_timespec __user *timeout) { int datagrams; diff --git a/net/core/datagram.c b/net/core/datagram.c index e4fbfd6e2bd..ae6acf6a3de 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -65,7 +65,7 @@ static inline int connection_based(struct sock *sk) return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; } -static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, +static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, void *key) { unsigned long bits = (unsigned long)key; @@ -158,7 +158,7 @@ out_noerr: * quite explicitly by POSIX 1003.1g, don't change them without having * the standard around please. */ -struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, int *peeked, int *off, int *err) { struct sk_buff *skb; @@ -216,7 +216,7 @@ no_packet: } EXPORT_SYMBOL(__skb_recv_datagram); -struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int noblock, int *err) { int peeked, off = 0; diff --git a/net/core/dev.c b/net/core/dev.c index 99e1d759f41..cd0981977f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -208,7 +208,8 @@ static inline void dev_base_seq_inc(struct net *net) static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { - unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); + unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); + return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; } @@ -299,10 +300,9 @@ static const unsigned short netdev_lock_type[] = ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, - ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, - ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, - ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, - ARPHRD_VOID, ARPHRD_NONE}; + ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, + ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, + ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", @@ -317,10 +317,9 @@ static const char *const netdev_lock_name[] = "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", - "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", - "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", - "_xmit_PHONET_PIPE", "_xmit_IEEE802154", - "_xmit_VOID", "_xmit_NONE"}; + "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", + "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", + "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; @@ -1676,10 +1675,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) if (skb_network_header(skb2) < skb2->data || skb2->network_header > skb2->tail) { - if (net_ratelimit()) - pr_crit("protocol %04x is buggy, dev %s\n", - ntohs(skb2->protocol), - dev->name); + net_crit_ratelimited("protocol %04x is buggy, dev %s\n", + ntohs(skb2->protocol), + dev->name); skb_reset_network_header(skb2); } @@ -2316,11 +2314,9 @@ EXPORT_SYMBOL(__skb_tx_hash); static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { - if (net_ratelimit()) { - pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n", - dev->name, queue_index, - dev->real_num_tx_queues); - } + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", + dev->name, queue_index, + dev->real_num_tx_queues); return 0; } return queue_index; @@ -2562,17 +2558,15 @@ int dev_queue_xmit(struct sk_buff *skb) } } HARD_TX_UNLOCK(dev, txq); - if (net_ratelimit()) - pr_crit("Virtual device %s asks to queue packet!\n", - dev->name); + net_crit_ratelimited("Virtual device %s asks to queue packet!\n", + dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ recursion_alert: - if (net_ratelimit()) - pr_crit("Dead loop on virtual device %s, fix it urgently!\n", - dev->name); + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", + dev->name); } } @@ -3053,9 +3047,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) struct Qdisc *q; if (unlikely(MAX_RED_LOOP < ttl++)) { - if (net_ratelimit()) - pr_warn("Redir loop detected Dropping packet (%d->%d)\n", - skb->skb_iif, dev->ifindex); + net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", + skb->skb_iif, dev->ifindex); return TC_ACT_SHOT; } @@ -3515,10 +3508,16 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_DROP: - case GRO_MERGED_FREE: kfree_skb(skb); break; + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); + break; + case GRO_HELD: case GRO_MERGED: break; @@ -3603,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, } EXPORT_SYMBOL(napi_frags_finish); -struct sk_buff *napi_frags_skb(struct napi_struct *napi) +static struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; struct ethhdr *eth; @@ -3638,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi) out: return skb; } -EXPORT_SYMBOL(napi_frags_skb); gro_result_t napi_gro_frags(struct napi_struct *napi) { @@ -4592,9 +4590,9 @@ void dev_set_rx_mode(struct net_device *dev) * * Get the combination of flag bits exported through APIs to userspace. */ -unsigned dev_get_flags(const struct net_device *dev) +unsigned int dev_get_flags(const struct net_device *dev) { - unsigned flags; + unsigned int flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 626698f0db8..c4cc2bc49f0 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -21,12 +21,35 @@ * General list handling functions */ +static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, + unsigned char *addr, int addr_len, + unsigned char addr_type, bool global) +{ + struct netdev_hw_addr *ha; + int alloc_size; + + alloc_size = sizeof(*ha); + if (alloc_size < L1_CACHE_BYTES) + alloc_size = L1_CACHE_BYTES; + ha = kmalloc(alloc_size, GFP_ATOMIC); + if (!ha) + return -ENOMEM; + memcpy(ha->addr, addr, addr_len); + ha->type = addr_type; + ha->refcount = 1; + ha->global_use = global; + ha->synced = false; + list_add_tail_rcu(&ha->list, &list->list); + list->count++; + + return 0; +} + static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; - int alloc_size; if (addr_len > MAX_ADDR_LEN) return -EINVAL; @@ -46,21 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, } } - - alloc_size = sizeof(*ha); - if (alloc_size < L1_CACHE_BYTES) - alloc_size = L1_CACHE_BYTES; - ha = kmalloc(alloc_size, GFP_ATOMIC); - if (!ha) - return -ENOMEM; - memcpy(ha->addr, addr, addr_len); - ha->type = addr_type; - ha->refcount = 1; - ha->global_use = global; - ha->synced = false; - list_add_tail_rcu(&ha->list, &list->list); - list->count++; - return 0; + return __hw_addr_create_ex(list, addr, addr_len, addr_type, global); } static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, @@ -377,6 +386,34 @@ EXPORT_SYMBOL(dev_addr_del_multiple); */ /** + * dev_uc_add_excl - Add a global secondary unicast address + * @dev: device + * @addr: address to add + */ +int dev_uc_add_excl(struct net_device *dev, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + int err; + + netif_addr_lock_bh(dev); + list_for_each_entry(ha, &dev->uc.list, list) { + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == NETDEV_HW_ADDR_T_UNICAST) { + err = -EEXIST; + goto out; + } + } + err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_UNICAST, true); + if (!err) + __dev_set_rx_mode(dev); +out: + netif_addr_unlock_bh(dev); + return err; +} +EXPORT_SYMBOL(dev_uc_add_excl); + +/** * dev_uc_add - Add a secondary unicast address * @dev: device * @addr: address to add @@ -501,6 +538,34 @@ EXPORT_SYMBOL(dev_uc_init); * Multicast list handling functions */ +/** + * dev_mc_add_excl - Add a global secondary multicast address + * @dev: device + * @addr: address to add + */ +int dev_mc_add_excl(struct net_device *dev, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + int err; + + netif_addr_lock_bh(dev); + list_for_each_entry(ha, &dev->mc.list, list) { + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == NETDEV_HW_ADDR_T_MULTICAST) { + err = -EEXIST; + goto out; + } + } + err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_MULTICAST, true); + if (!err) + __dev_set_rx_mode(dev); +out: + netif_addr_unlock_bh(dev); + return err; +} +EXPORT_SYMBOL(dev_mc_add_excl); + static int __dev_mc_add(struct net_device *dev, unsigned char *addr, bool global) { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index a7cad741df0..3252e7e0a00 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -4,6 +4,8 @@ * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> @@ -22,6 +24,7 @@ #include <linux/timer.h> #include <linux/bitops.h> #include <linux/slab.h> +#include <linux/module.h> #include <net/genetlink.h> #include <net/netevent.h> @@ -261,9 +264,15 @@ static int set_all_monitor_traces(int state) switch (state) { case TRACE_ON: + if (!try_module_get(THIS_MODULE)) { + rc = -ENODEV; + break; + } + rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); break; + case TRACE_OFF: rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); @@ -279,6 +288,9 @@ static int set_all_monitor_traces(int state) kfree_rcu(new_stat, rcu); } } + + module_put(THIS_MODULE); + break; default: rc = 1; @@ -381,10 +393,10 @@ static int __init init_net_drop_monitor(void) struct per_cpu_dm_data *data; int cpu, rc; - printk(KERN_INFO "Initializing network drop monitor service\n"); + pr_info("Initializing network drop monitor service\n"); if (sizeof(void *) > 8) { - printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n"); + pr_err("Unable to store program counters on this arch, Drop monitor failed\n"); return -ENOSPC; } @@ -392,19 +404,19 @@ static int __init init_net_drop_monitor(void) dropmon_ops, ARRAY_SIZE(dropmon_ops)); if (rc) { - printk(KERN_ERR "Could not create drop monitor netlink family\n"); + pr_err("Could not create drop monitor netlink family\n"); return rc; } rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { - printk(KERN_CRIT "Failed to register netdevice notifier\n"); + pr_crit("Failed to register netdevice notifier\n"); goto out_unreg; } rc = 0; - for_each_present_cpu(cpu) { + for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); @@ -423,4 +435,36 @@ out: return rc; } -late_initcall(init_net_drop_monitor); +static void exit_net_drop_monitor(void) +{ + struct per_cpu_dm_data *data; + int cpu; + + BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); + + /* + * Because of the module_get/put we do in the trace state change path + * we are guarnateed not to have any current users when we get here + * all we need to do is make sure that we don't have any running timers + * or pending schedule calls + */ + + for_each_possible_cpu(cpu) { + data = &per_cpu(dm_cpu_data, cpu); + del_timer_sync(&data->send_timer); + cancel_work_sync(&data->dm_alert_work); + /* + * At this point, we should have exclusive access + * to this struct and can free the skb inside it + */ + kfree_skb(data->skb); + } + + BUG_ON(genl_unregister_family(&net_drop_monitor_family)); +} + +module_init(init_net_drop_monitor); +module_exit(exit_net_drop_monitor); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6d6d7d25caa..9c2afb48027 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -17,6 +17,8 @@ #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> @@ -36,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_link); +int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; +} +EXPORT_SYMBOL(ethtool_op_get_ts_info); + /* Handlers for each ethtool command */ #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) @@ -738,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr) return 0; } -static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) +static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, + int (*getter)(struct net_device *, + struct ethtool_eeprom *, u8 *), + u32 total_len) { struct ethtool_eeprom eeprom; - const struct ethtool_ops *ops = dev->ethtool_ops; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; - if (!ops->get_eeprom || !ops->get_eeprom_len) - return -EOPNOTSUPP; - if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; @@ -758,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) return -EINVAL; /* Check for exceeding total eeprom len */ - if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + if (eeprom.offset + eeprom.len > total_len) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); @@ -769,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); - ret = ops->get_eeprom(dev, &eeprom, data); + ret = getter(dev, &eeprom, data); if (ret) break; if (copy_to_user(userbuf, data, eeprom.len)) { @@ -790,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) return ret; } +static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, + ops->get_eeprom_len(dev)); +} + static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; @@ -1278,6 +1301,81 @@ out: return ret; } +static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) +{ + int err = 0; + struct ethtool_ts_info info; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GET_TS_INFO; + + if (phydev && phydev->drv && phydev->drv->ts_info) { + + err = phydev->drv->ts_info(phydev, &info); + + } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) { + + err = ops->get_ts_info(dev, &info); + + } else { + info.so_timestamping = + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info.phc_index = -1; + } + + if (err) + return err; + + if (copy_to_user(useraddr, &info, sizeof(info))) + err = -EFAULT; + + return err; +} + +static int ethtool_get_module_info(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_info) + return -EOPNOTSUPP; + + if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) + return -EFAULT; + + ret = ops->get_module_info(dev, &modinfo); + if (ret) + return ret; + + if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) + return -EFAULT; + + return 0; +} + +static int ethtool_get_module_eeprom(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_info || !ops->get_module_eeprom) + return -EOPNOTSUPP; + + ret = ops->get_module_info(dev, &modinfo); + if (ret) + return ret; + + return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom, + modinfo.eeprom_len); +} + /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1295,11 +1393,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) return -EFAULT; if (!dev->ethtool_ops) { - /* ETHTOOL_GDRVINFO does not require any driver support. - * It is also unprivileged and does not change anything, - * so we can take a shortcut to it. */ + /* A few commands do not require any driver support, + * are unprivileged, and do not change anything, so we + * can take a shortcut to them. */ if (ethcmd == ETHTOOL_GDRVINFO) return ethtool_get_drvinfo(dev, useraddr); + else if (ethcmd == ETHTOOL_GET_TS_INFO) + return ethtool_get_ts_info(dev, useraddr); else return -EOPNOTSUPP; } @@ -1330,6 +1430,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GFEATURES: + case ETHTOOL_GET_TS_INFO: break; default: if (!capable(CAP_NET_ADMIN)) @@ -1496,6 +1597,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GET_DUMP_DATA: rc = ethtool_get_dump_data(dev, useraddr); break; + case ETHTOOL_GET_TS_INFO: + rc = ethtool_get_ts_info(dev, useraddr); + break; + case ETHTOOL_GMODULEINFO: + rc = ethtool_get_module_info(dev, useraddr); + break; + case ETHTOOL_GMODULEEEPROM: + rc = ethtool_get_module_eeprom(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index c02e63c908d..72cceb79d0d 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh = nlmsg_data(nlh); frh->family = ops->family; frh->table = rule->table; - NLA_PUT_U32(skb, FRA_TABLE, rule->table); + if (nla_put_u32(skb, FRA_TABLE, rule->table)) + goto nla_put_failure; frh->res1 = 0; frh->res2 = 0; frh->action = rule->action; @@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->flags |= FIB_RULE_UNRESOLVED; if (rule->iifname[0]) { - NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); - + if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) + goto nla_put_failure; if (rule->iifindex == -1) frh->flags |= FIB_RULE_IIF_DETACHED; } if (rule->oifname[0]) { - NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); - + if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) + goto nla_put_failure; if (rule->oifindex == -1) frh->flags |= FIB_RULE_OIF_DETACHED; } - if (rule->pref) - NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); - - if (rule->mark) - NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); - - if (rule->mark_mask || rule->mark) - NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); - - if (rule->target) - NLA_PUT_U32(skb, FRA_GOTO, rule->target); - + if ((rule->pref && + nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || + (rule->mark && + nla_put_u32(skb, FRA_FWMARK, rule->mark)) || + ((rule->mark_mask || rule->mark) && + nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || + (rule->target && + nla_put_u32(skb, FRA_GOTO, rule->target))) + goto nla_put_failure; if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; diff --git a/net/core/filter.c b/net/core/filter.c index 6f755cca452..47a5f055e7f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -317,6 +317,9 @@ load_b: case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; + case BPF_S_ANC_ALU_XOR_X: + A ^= X; + continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; @@ -528,7 +531,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ - if (ftest->k >= (unsigned)(flen-pc-1)) + if (ftest->k >= (unsigned int)(flen-pc-1)) return -EINVAL; break; case BPF_S_JMP_JEQ_K: @@ -561,6 +564,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) ANCILLARY(HATYPE); ANCILLARY(RXHASH); ANCILLARY(CPU); + ANCILLARY(ALU_XOR_X); } } ftest->code = code; @@ -589,6 +593,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu) } EXPORT_SYMBOL(sk_filter_release_rcu); +static int __sk_prepare_filter(struct sk_filter *fp) +{ + int err; + + fp->bpf_func = sk_run_filter; + + err = sk_chk_filter(fp->insns, fp->len); + if (err) + return err; + + bpf_jit_compile(fp); + return 0; +} + +/** + * sk_unattached_filter_create - create an unattached filter + * @fprog: the filter program + * @sk: the socket to use + * + * Create a filter independent ofr any socket. We first run some + * sanity checks on it to make sure it does not explode on us later. + * If an error occurs or there is insufficient memory for the filter + * a negative errno code is returned. On success the return is zero. + */ +int sk_unattached_filter_create(struct sk_filter **pfp, + struct sock_fprog *fprog) +{ + struct sk_filter *fp; + unsigned int fsize = sizeof(struct sock_filter) * fprog->len; + int err; + + /* Make sure new filter is there and in the right amounts. */ + if (fprog->filter == NULL) + return -EINVAL; + + fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); + if (!fp) + return -ENOMEM; + memcpy(fp->insns, fprog->filter, fsize); + + atomic_set(&fp->refcnt, 1); + fp->len = fprog->len; + + err = __sk_prepare_filter(fp); + if (err) + goto free_mem; + + *pfp = fp; + return 0; +free_mem: + kfree(fp); + return err; +} +EXPORT_SYMBOL_GPL(sk_unattached_filter_create); + +void sk_unattached_filter_destroy(struct sk_filter *fp) +{ + sk_filter_release(fp); +} +EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); + /** * sk_attach_filter - attach a socket filter * @fprog: the filter program @@ -619,16 +684,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) atomic_set(&fp->refcnt, 1); fp->len = fprog->len; - fp->bpf_func = sk_run_filter; - err = sk_chk_filter(fp->insns, fp->len); + err = __sk_prepare_filter(fp); if (err) { sk_filter_uncharge(sk, fp); return err; } - bpf_jit_compile(fp); - old_fp = rcu_dereference_protected(sk->sk_filter, sock_owned_by_user(sk)); rcu_assign_pointer(sk->sk_filter, fp); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 0452eb27a27..ddedf211e58 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -27,7 +27,8 @@ static inline int gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) { - NLA_PUT(d->skb, type, size, buf); + if (nla_put(d->skb, type, size, buf)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h deleted file mode 100644 index 52d0a445904..00000000000 --- a/net/core/kmap_skb.h +++ /dev/null @@ -1,19 +0,0 @@ -#include <linux/highmem.h> - -static inline void *kmap_skb_frag(const skb_frag_t *frag) -{ -#ifdef CONFIG_HIGHMEM - BUG_ON(in_irq()); - - local_bh_disable(); -#endif - return kmap_atomic(skb_frag_page(frag)); -} - -static inline void kunmap_skb_frag(void *vaddr) -{ - kunmap_atomic(vaddr); -#ifdef CONFIG_HIGHMEM - local_bh_enable(); -#endif -} diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 0a68045782d..eb09f8bbbf0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -15,6 +15,8 @@ * Harald Welte Add neighbour cache statistics like rtstat */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> @@ -712,14 +714,13 @@ void neigh_destroy(struct neighbour *neigh) NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); if (!neigh->dead) { - printk(KERN_WARNING - "Destroying alive neighbour %p\n", neigh); + pr_warn("Destroying alive neighbour %p\n", neigh); dump_stack(); return; } if (neigh_del_timer(neigh)) - printk(KERN_WARNING "Impossible event.\n"); + pr_warn("Impossible event\n"); skb_queue_purge(&neigh->arp_queue); neigh->arp_queue_len_bytes = 0; @@ -890,7 +891,7 @@ static void neigh_timer_handler(unsigned long arg) { unsigned long now, next; struct neighbour *neigh = (struct neighbour *)arg; - unsigned state; + unsigned int state; int notify = 0; write_lock(&neigh->lock); @@ -1500,7 +1501,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms) static struct lock_class_key neigh_table_proxy_queue_class; -void neigh_table_init_no_netlink(struct neigh_table *tbl) +static void neigh_table_init_no_netlink(struct neigh_table *tbl) { unsigned long now = jiffies; unsigned long phsize; @@ -1538,7 +1539,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; } -EXPORT_SYMBOL(neigh_table_init_no_netlink); void neigh_table_init(struct neigh_table *tbl) { @@ -1555,8 +1555,8 @@ void neigh_table_init(struct neigh_table *tbl) write_unlock(&neigh_tbl_lock); if (unlikely(tmp)) { - printk(KERN_ERR "NEIGH: Registering multiple tables for " - "family %d\n", tbl->family); + pr_err("Registering multiple tables for family %d\n", + tbl->family); dump_stack(); } } @@ -1572,7 +1572,7 @@ int neigh_table_clear(struct neigh_table *tbl) pneigh_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) - printk(KERN_CRIT "neighbour leakage\n"); + pr_crit("neighbour leakage\n"); write_lock(&neigh_tbl_lock); for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { if (*tp == tbl) { @@ -1768,29 +1768,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) if (nest == NULL) return -ENOBUFS; - if (parms->dev) - NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); - - NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); - NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); - /* approximative value for deprecated QUEUE_LEN (in packets) */ - NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, - DIV_ROUND_UP(parms->queue_len_bytes, - SKB_TRUESIZE(ETH_FRAME_LEN))); - NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); - NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); - NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); - NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); - NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); - NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, - parms->base_reachable_time); - NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); - NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); - NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); - NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); - NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); - NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); - + if ((parms->dev && + nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || + nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) || + nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) || + /* approximative value for deprecated QUEUE_LEN (in packets) */ + nla_put_u32(skb, NDTPA_QUEUE_LEN, + DIV_ROUND_UP(parms->queue_len_bytes, + SKB_TRUESIZE(ETH_FRAME_LEN))) || + nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) || + nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) || + nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) || + nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) || + nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) || + nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, + parms->base_reachable_time) || + nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) || + nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, + parms->delay_probe_time) || + nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) || + nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) || + nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) || + nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: @@ -1815,12 +1815,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndtmsg->ndtm_pad1 = 0; ndtmsg->ndtm_pad2 = 0; - NLA_PUT_STRING(skb, NDTA_NAME, tbl->id); - NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); - NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); - NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); - NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); - + if (nla_put_string(skb, NDTA_NAME, tbl->id) || + nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) || + nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || + nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || + nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) + goto nla_put_failure; { unsigned long now = jiffies; unsigned int flush_delta = now - tbl->last_flush; @@ -1841,7 +1841,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); rcu_read_unlock_bh(); - NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); + if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) + goto nla_put_failure; } { @@ -1866,7 +1867,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndst.ndts_forced_gc_runs += st->forced_gc_runs; } - NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); + if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst)) + goto nla_put_failure; } BUG_ON(tbl->parms.dev); @@ -2137,7 +2139,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, ndm->ndm_type = neigh->type; ndm->ndm_ifindex = neigh->dev->ifindex; - NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key); + if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) + goto nla_put_failure; read_lock_bh(&neigh->lock); ndm->ndm_state = neigh->nud_state; @@ -2157,8 +2160,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; read_unlock_bh(&neigh->lock); - NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); - NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); + if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || + nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -2187,7 +2191,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, ndm->ndm_ifindex = pn->dev->ifindex; ndm->ndm_state = NUD_NONE; - NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key); + if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -2795,7 +2800,6 @@ enum { static struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; - char *dev_name; } neigh_sysctl_template __read_mostly = { .neigh_vars = { [NEIGH_VAR_MCAST_PROBE] = { @@ -2921,19 +2925,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, { struct neigh_sysctl_table *t; const char *dev_name_source = NULL; - -#define NEIGH_CTL_PATH_ROOT 0 -#define NEIGH_CTL_PATH_PROTO 1 -#define NEIGH_CTL_PATH_NEIGH 2 -#define NEIGH_CTL_PATH_DEV 3 - - struct ctl_path neigh_path[] = { - { .procname = "net", }, - { .procname = "proto", }, - { .procname = "neigh", }, - { .procname = "default", }, - { }, - }; + char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); if (!t) @@ -2961,7 +2953,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); } else { - dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; + dev_name_source = "default"; t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; @@ -2984,23 +2976,16 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; } - t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); - if (!t->dev_name) - goto free; - - neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name; - neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; - + snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", + p_name, dev_name_source); t->sysctl_header = - register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars); + register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); if (!t->sysctl_header) - goto free_procname; + goto free; p->sysctl_table = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); err: @@ -3013,8 +2998,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p) if (p->sysctl_table) { struct neigh_sysctl_table *t = p->sysctl_table; p->sysctl_table = NULL; - unregister_sysctl_table(t->sysctl_header); - kfree(t->dev_name); + unregister_net_sysctl_table(t->sysctl_header); kfree(t); } } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 495586232aa..fdf9e61d065 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, int (*set)(struct net_device *, unsigned long)) { struct net_device *net = to_net_dev(dev); - char *endp; unsigned long new; int ret = -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; - new = simple_strtoul(buf, &endp, 0); - if (endp == buf) + ret = kstrtoul(buf, 0, &new); + if (ret) goto err; if (!rtnl_trylock()) @@ -232,7 +231,7 @@ NETDEVICE_SHOW(flags, fmt_hex); static int change_flags(struct net_device *net, unsigned long new_flags) { - return dev_change_flags(net, (unsigned) new_flags); + return dev_change_flags(net, (unsigned int) new_flags); } static ssize_t store_flags(struct device *dev, struct device_attribute *attr, @@ -582,7 +581,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, return err; } - map = kzalloc(max_t(unsigned, + map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) { @@ -903,7 +902,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue, const char *buf, size_t len) { struct dql *dql = &queue->dql; - unsigned value; + unsigned int value; int err; err = kstrtouint(buf, 10, &value); @@ -1107,7 +1106,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, return err; } - new_dev_maps = kzalloc(max_t(unsigned, + new_dev_maps = kzalloc(max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL); if (!new_dev_maps) { free_cpumask_var(mask); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 31a5ae51a45..dddbacb8f28 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/workqueue.h> #include <linux/rtnetlink.h> #include <linux/cache.h> @@ -212,8 +214,8 @@ static void net_free(struct net *net) { #ifdef NETNS_REFCNT_DEBUG if (unlikely(atomic_read(&net->use_count) != 0)) { - printk(KERN_EMERG "network namespace not free! Usage: %d\n", - atomic_read(&net->use_count)); + pr_emerg("network namespace not free! Usage: %d\n", + atomic_read(&net->use_count)); return; } #endif diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ba6900f7390..09eda68b676 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -9,6 +9,8 @@ * Authors: Neil Horman <nhorman@tuxdriver.com> */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> @@ -88,7 +90,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len) old_priomap = rtnl_dereference(dev->priomap); if (!new_priomap) { - printk(KERN_WARNING "Unable to alloc new priomap!\n"); + pr_warn("Unable to alloc new priomap!\n"); return; } @@ -136,7 +138,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) ret = get_prioidx(&cs->prioidx); if (ret != 0) { - printk(KERN_WARNING "No space in priority index array\n"); + pr_warn("No space in priority index array\n"); kfree(cs); return ERR_PTR(ret); } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b81369b6ddc..cce9e53528b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -320,7 +320,7 @@ struct pktgen_dev { (see RFC 3260, sec. 4) */ /* MPLS */ - unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ + unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ __be32 labels[MAX_MPLS_LABELS]; /* VLAN/SVLAN (802.1Q/Q-in-Q) */ @@ -373,10 +373,10 @@ struct pktgen_dev { */ char odevname[32]; struct flow_state *flows; - unsigned cflows; /* Concurrent flows (config) */ - unsigned lflow; /* Flow length (config) */ - unsigned nflows; /* accumulated flows (stats) */ - unsigned curfl; /* current sequenced flow (state)*/ + unsigned int cflows; /* Concurrent flows (config) */ + unsigned int lflow; /* Flow length (config) */ + unsigned int nflows; /* accumulated flows (stats) */ + unsigned int curfl; /* current sequenced flow (state)*/ u16 queue_map_min; u16 queue_map_max; @@ -592,7 +592,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) pkt_dev->src_mac_count, pkt_dev->dst_mac_count); if (pkt_dev->nr_labels) { - unsigned i; + unsigned int i; seq_printf(seq, " mpls: "); for (i = 0; i < pkt_dev->nr_labels; i++) seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), @@ -812,7 +812,7 @@ done_str: static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) { - unsigned n = 0; + unsigned int n = 0; char c; ssize_t i = 0; int len; @@ -891,8 +891,8 @@ static ssize_t pktgen_if_write(struct file *file, if (copy_from_user(tb, user_buffer, copy)) return -EFAULT; tb[copy] = 0; - printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, - (unsigned long)count, tb); + pr_debug("%s,%lu buffer -:%s:-\n", + name, (unsigned long)count, tb); } if (!strcmp(name, "min_pkt_size")) { @@ -1261,8 +1261,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_daddr = pkt_dev->daddr_min; } if (debug) - printk(KERN_DEBUG "pktgen: dst_min set to: %s\n", - pkt_dev->dst_min); + pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); i += len; sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); return count; @@ -1284,8 +1283,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_daddr = pkt_dev->daddr_max; } if (debug) - printk(KERN_DEBUG "pktgen: dst_max set to: %s\n", - pkt_dev->dst_max); + pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); i += len; sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); return count; @@ -1307,7 +1305,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; if (debug) - printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf); + pr_debug("dst6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6=%s", buf); @@ -1329,7 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; if (debug) - printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf); + pr_debug("dst6_min set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_min=%s", buf); @@ -1350,7 +1348,7 @@ static ssize_t pktgen_if_write(struct file *file, snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); if (debug) - printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); + pr_debug("dst6_max set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_max=%s", buf); @@ -1373,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; if (debug) - printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf); + pr_debug("src6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: src6=%s", buf); @@ -1394,8 +1392,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_saddr = pkt_dev->saddr_min; } if (debug) - printk(KERN_DEBUG "pktgen: src_min set to: %s\n", - pkt_dev->src_min); + pr_debug("src_min set to: %s\n", pkt_dev->src_min); i += len; sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); return count; @@ -1415,8 +1412,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_saddr = pkt_dev->saddr_max; } if (debug) - printk(KERN_DEBUG "pktgen: src_max set to: %s\n", - pkt_dev->src_max); + pr_debug("src_max set to: %s\n", pkt_dev->src_max); i += len; sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); return count; @@ -1510,7 +1506,7 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "mpls")) { - unsigned n, cnt; + unsigned int n, cnt; len = get_labels(&user_buffer[i], pkt_dev); if (len < 0) @@ -1527,7 +1523,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n"); + pr_debug("VLAN/SVLAN auto turned off\n"); } return count; } @@ -1542,10 +1538,10 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->vlan_id = value; /* turn on VLAN */ if (debug) - printk(KERN_DEBUG "pktgen: VLAN turned on\n"); + pr_debug("VLAN turned on\n"); if (debug && pkt_dev->nr_labels) - printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); + pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); @@ -1554,7 +1550,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); + pr_debug("VLAN/SVLAN turned off\n"); } return count; } @@ -1599,10 +1595,10 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = value; /* turn on SVLAN */ if (debug) - printk(KERN_DEBUG "pktgen: SVLAN turned on\n"); + pr_debug("SVLAN turned on\n"); if (debug && pkt_dev->nr_labels) - printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); + pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); @@ -1611,7 +1607,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); + pr_debug("VLAN/SVLAN turned off\n"); } return count; } @@ -1779,8 +1775,7 @@ static ssize_t pktgen_thread_write(struct file *file, i += len; if (debug) - printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n", - name, (unsigned long)count); + pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); if (!t) { pr_err("ERROR: No thread\n"); @@ -2324,7 +2319,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) } if (pkt_dev->flags & F_MPLS_RND) { - unsigned i; + unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) pkt_dev->labels[i] = MPLS_STACK_BOTTOM | @@ -2550,7 +2545,7 @@ err: static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) { - unsigned i; + unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; @@ -2934,8 +2929,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, if (datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); - if (net_ratelimit()) - pr_info("increased datalen to %d\n", datalen); + net_info_ratelimited("increased datalen to %d\n", datalen); } udph->source = htons(pkt_dev->cur_udp_src); @@ -3365,8 +3359,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->errors++; break; default: /* Drivers are not supposed to return other values! */ - if (net_ratelimit()) - pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret); + net_info_ratelimited("%s xmit error: %d\n", + pkt_dev->odevname, ret); pkt_dev->errors++; /* fallthru */ case NETDEV_TX_LOCKED: diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 90430b776ec..21318d15bbc 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -35,7 +35,9 @@ #include <linux/security.h> #include <linux/mutex.h> #include <linux/if_addr.h> +#include <linux/if_bridge.h> #include <linux/pci.h> +#include <linux/etherdevice.h> #include <asm/uaccess.h> @@ -552,7 +554,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data } EXPORT_SYMBOL(__rta_fill); -int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) +int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; int err = 0; @@ -607,7 +609,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) for (i = 0; i < RTAX_MAX; i++) { if (metrics[i]) { valid++; - NLA_PUT_U32(skb, i+1, metrics[i]); + if (nla_put_u32(skb, i+1, metrics[i])) + goto nla_put_failure; } } @@ -782,6 +785,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(4) /* IFLA_MASTER */ + + nla_total_size(4) /* IFLA_PROMISCUITY */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(ext_filter_mask @@ -807,7 +811,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) vf_port = nla_nest_start(skb, IFLA_VF_PORT); if (!vf_port) goto nla_put_failure; - NLA_PUT_U32(skb, IFLA_PORT_VF, vf); + if (nla_put_u32(skb, IFLA_PORT_VF, vf)) + goto nla_put_failure; err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); if (err == -EMSGSIZE) goto nla_put_failure; @@ -891,25 +896,23 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); - NLA_PUT_U8(skb, IFLA_OPERSTATE, - netif_running(dev) ? dev->operstate : IF_OPER_DOWN); - NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U32(skb, IFLA_GROUP, dev->group); - - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - - if (dev->master) - NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); - - if (dev->qdisc) - NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); - - if (dev->ifalias) - NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || + nla_put_u8(skb, IFLA_OPERSTATE, + netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || + nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u32(skb, IFLA_GROUP, dev->group) || + nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink)) || + (dev->master && + nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) || + (dev->qdisc && + nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || + (dev->ifalias && + nla_put_string(skb, IFLA_IFALIAS, dev->ifalias))) + goto nla_put_failure; if (1) { struct rtnl_link_ifmap map = { @@ -920,12 +923,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, .dma = dev->dma, .port = dev->if_port, }; - NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); + if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) + goto nla_put_failure; } if (dev->addr_len) { - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); + if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || + nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) + goto nla_put_failure; } attr = nla_reserve(skb, IFLA_STATS, @@ -942,8 +947,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); - if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) - NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); + if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && + nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) + goto nla_put_failure; if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { @@ -986,12 +992,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } - NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); - NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); - NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), - &vf_tx_rate); - NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), - &vf_spoofchk); + if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || + nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || + nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), + &vf_tx_rate) || + nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), + &vf_spoofchk)) + goto nla_put_failure; nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); @@ -1113,6 +1120,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_PORT_SELF] = { .type = NLA_NESTED }, [IFLA_AF_SPEC] = { .type = NLA_NESTED }, [IFLA_EXT_MASK] = { .type = NLA_U32 }, + [IFLA_PROMISCUITY] = { .type = NLA_U32 }, }; EXPORT_SYMBOL(ifla_policy); @@ -1516,11 +1524,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, err = 0; errout: - if (err < 0 && modified && net_ratelimit()) - printk(KERN_WARNING "A link change request failed with " - "some changes committed already. Interface %s may " - "have been left with an inconsistent configuration, " - "please check.\n", dev->name); + if (err < 0 && modified) + net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", + dev->name); if (send_addr_notify) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); @@ -1634,14 +1640,14 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, int err; struct net_device *dev; unsigned int num_queues = 1; - unsigned int real_num_queues = 1; if (ops->get_tx_queues) { - err = ops->get_tx_queues(src_net, tb, &num_queues, - &real_num_queues); - if (err) + err = ops->get_tx_queues(src_net, tb); + if (err < 0) goto err; + num_queues = err; } + err = -ENOMEM; dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); if (!dev) @@ -1947,7 +1953,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change) { struct net *net = dev_net(dev); struct sk_buff *skb; @@ -1972,6 +1978,267 @@ errout: rtnl_set_sk_err(net, RTNLGRP_LINK, err); } +static int nlmsg_populate_fdb_fill(struct sk_buff *skb, + struct net_device *dev, + u8 *addr, u32 pid, u32 seq, + int type, unsigned int flags) +{ + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = flags; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dev->ifindex; + ndm->ndm_state = NUD_PERMANENT; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static inline size_t rtnl_fdb_nlmsg_size(void) +{ + return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); +} + +static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) +{ + struct net *net = dev_net(dev); + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); + if (!skb) + goto errout; + + err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); + if (err < 0) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); + return; +errout: + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +} + +static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net *net = sock_net(skb->sk); + struct net_device *master = NULL; + struct ndmsg *ndm; + struct nlattr *tb[NDA_MAX+1]; + struct net_device *dev; + u8 *addr; + int err; + + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); + if (err < 0) + return err; + + ndm = nlmsg_data(nlh); + if (ndm->ndm_ifindex == 0) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n"); + return -EINVAL; + } + + dev = __dev_get_by_index(net, ndm->ndm_ifindex); + if (dev == NULL) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n"); + return -ENODEV; + } + + if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n"); + return -EINVAL; + } + + addr = nla_data(tb[NDA_LLADDR]); + if (!is_valid_ether_addr(addr)) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n"); + return -EINVAL; + } + + err = -EOPNOTSUPP; + + /* Support fdb on master device the net/bridge default case */ + if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && + (dev->priv_flags & IFF_BRIDGE_PORT)) { + master = dev->master; + err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr, + nlh->nlmsg_flags); + if (err) + goto out; + else + ndm->ndm_flags &= ~NTF_MASTER; + } + + /* Embedded bridge, macvlan, and any other device support */ + if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { + err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr, + nlh->nlmsg_flags); + + if (!err) { + rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); + ndm->ndm_flags &= ~NTF_SELF; + } + } +out: + return err; +} + +static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net *net = sock_net(skb->sk); + struct ndmsg *ndm; + struct nlattr *llattr; + struct net_device *dev; + int err = -EINVAL; + __u8 *addr; + + if (nlmsg_len(nlh) < sizeof(*ndm)) + return -EINVAL; + + ndm = nlmsg_data(nlh); + if (ndm->ndm_ifindex == 0) { + pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n"); + return -EINVAL; + } + + dev = __dev_get_by_index(net, ndm->ndm_ifindex); + if (dev == NULL) { + pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n"); + return -ENODEV; + } + + llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); + if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { + pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n"); + return -EINVAL; + } + + addr = nla_data(llattr); + err = -EOPNOTSUPP; + + /* Support fdb on master device the net/bridge default case */ + if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && + (dev->priv_flags & IFF_BRIDGE_PORT)) { + struct net_device *master = dev->master; + + if (master->netdev_ops->ndo_fdb_del) + err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr); + + if (err) + goto out; + else + ndm->ndm_flags &= ~NTF_MASTER; + } + + /* Embedded bridge, macvlan, and any other device support */ + if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { + err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr); + + if (!err) { + rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); + ndm->ndm_flags &= ~NTF_SELF; + } + } +out: + return err; +} + +static int nlmsg_populate_fdb(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int *idx, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int err; + u32 pid, seq; + + pid = NETLINK_CB(cb->skb).pid; + seq = cb->nlh->nlmsg_seq; + + list_for_each_entry(ha, &list->list, list) { + if (*idx < cb->args[0]) + goto skip; + + err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, + pid, seq, 0, NTF_SELF); + if (err < 0) + return err; +skip: + *idx += 1; + } + return 0; +} + +/** + * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table. + * @nlh: netlink message header + * @dev: netdevice + * + * Default netdevice operation to dump the existing unicast address list. + * Returns zero on success. + */ +int ndo_dflt_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) +{ + int err; + + netif_addr_lock_bh(dev); + err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc); + if (err) + goto out; + nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc); +out: + netif_addr_unlock_bh(dev); + return idx; +} +EXPORT_SYMBOL(ndo_dflt_fdb_dump); + +static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0; + struct net *net = sock_net(skb->sk); + struct net_device *dev; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->priv_flags & IFF_BRIDGE_PORT) { + struct net_device *master = dev->master; + const struct net_device_ops *ops = master->netdev_ops; + + if (ops->ndo_fdb_dump) + idx = ops->ndo_fdb_dump(skb, cb, dev, idx); + } + + if (dev->netdev_ops->ndo_fdb_dump) + idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); + } + rcu_read_unlock(); + + cb->args[0] = idx; + return skb->len; +} + /* Protected by RTNL sempahore. */ static struct rtattr **rta_buf; static int rtattr_max; @@ -2042,7 +2309,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { - unsigned flavor = attr->rta_type; + unsigned int flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) return -EINVAL; @@ -2144,5 +2411,9 @@ void __init rtnetlink_init(void) rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); + + rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e59840010d4..016694d6248 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -36,6 +36,8 @@ * The functions in this file will not compile correctly with gcc 2.4.x */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> @@ -67,10 +69,9 @@ #include <asm/uaccess.h> #include <trace/events/skb.h> +#include <linux/highmem.h> -#include "kmap_skb.h" - -static struct kmem_cache *skbuff_head_cache __read_mostly; +struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; static void sock_pipe_buf_release(struct pipe_inode_info *pipe, @@ -119,11 +120,10 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = { */ static void skb_over_panic(struct sk_buff *skb, int sz, void *here) { - printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%#lx dev:%s\n", - here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, (unsigned long)skb->end, - skb->dev ? skb->dev->name : "<NULL>"); + pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", + __func__, here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, (unsigned long)skb->end, + skb->dev ? skb->dev->name : "<NULL>"); BUG(); } @@ -138,11 +138,10 @@ static void skb_over_panic(struct sk_buff *skb, int sz, void *here) static void skb_under_panic(struct sk_buff *skb, int sz, void *here) { - printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%#lx dev:%s\n", - here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, (unsigned long)skb->end, - skb->dev ? skb->dev->name : "<NULL>"); + pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", + __func__, here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, (unsigned long)skb->end, + skb->dev ? skb->dev->name : "<NULL>"); BUG(); } @@ -246,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb); /** * build_skb - build a network buffer * @data: data buffer provided by caller + * @frag_size: size of fragment, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() @@ -259,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb); * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ -struct sk_buff *build_skb(void *data) +struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; - unsigned int size; + unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; - size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); + skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; @@ -292,6 +293,46 @@ struct sk_buff *build_skb(void *data) } EXPORT_SYMBOL(build_skb); +struct netdev_alloc_cache { + struct page *page; + unsigned int offset; +}; +static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); + +/** + * netdev_alloc_frag - allocate a page fragment + * @fragsz: fragment size + * + * Allocates a frag from a page for receive buffer. + * Uses GFP_ATOMIC allocations. + */ +void *netdev_alloc_frag(unsigned int fragsz) +{ + struct netdev_alloc_cache *nc; + void *data = NULL; + unsigned long flags; + + local_irq_save(flags); + nc = &__get_cpu_var(netdev_alloc_cache); + if (unlikely(!nc->page)) { +refill: + nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); + nc->offset = 0; + } + if (likely(nc->page)) { + if (nc->offset + fragsz > PAGE_SIZE) { + put_page(nc->page); + goto refill; + } + data = page_address(nc->page) + nc->offset; + nc->offset += fragsz; + get_page(nc->page); + } + local_irq_restore(flags); + return data; +} +EXPORT_SYMBOL(netdev_alloc_frag); + /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on @@ -306,11 +347,23 @@ EXPORT_SYMBOL(build_skb); * %NULL is returned if there is no free memory. */ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask) + unsigned int length, gfp_t gfp_mask) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; + unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { + void *data = netdev_alloc_frag(fragsz); - skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); + if (likely(data)) { + skb = build_skb(data, fragsz); + if (unlikely(!skb)) + put_page(virt_to_head_page(data)); + } + } else { + skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); + } if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; @@ -329,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, } EXPORT_SYMBOL(skb_add_rx_frag); -/** - * dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. Although this function - * allocates memory it can be called from an interrupt. - */ -struct sk_buff *dev_alloc_skb(unsigned int length) -{ - /* - * There is more code here than it seems: - * __dev_alloc_skb is an inline - */ - return __dev_alloc_skb(length, GFP_ATOMIC); -} -EXPORT_SYMBOL(dev_alloc_skb); - static void skb_drop_list(struct sk_buff **listp) { struct sk_buff *list = *listp; @@ -377,6 +408,14 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } +static void skb_free_head(struct sk_buff *skb) +{ + if (skb->head_frag) + put_page(virt_to_head_page(skb->head)); + else + kfree(skb->head); +} + static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || @@ -403,7 +442,7 @@ static void skb_release_data(struct sk_buff *skb) if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); - kfree(skb->head); + skb_free_head(skb); } } @@ -645,6 +684,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(tail); C(end); C(head); + C(head_frag); C(data); C(truesize); atomic_set(&n->users, 1); @@ -707,10 +747,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) } return -ENOMEM; } - vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); + vaddr = kmap_atomic(skb_frag_page(f)); memcpy(page_address(page), vaddr + f->page_offset, skb_frag_size(f)); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); page->private = (unsigned long)head; head = page; } @@ -819,7 +859,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb_headroom(skb); - unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; + unsigned int size = skb_end_offset(skb) + skb->data_len; struct sk_buff *n = alloc_skb(size, gfp_mask); if (!n) @@ -920,9 +960,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, { int i; u8 *data; - int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; + int size = nhead + skb_end_offset(skb) + ntail; long off; - bool fastpath; BUG_ON(nhead < 0); @@ -931,27 +970,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, size = SKB_DATA_ALIGN(size); - /* Check if we can avoid taking references on fragments if we own - * the last reference on skb->head. (see skb_release_data()) - */ - if (!skb->cloned) - fastpath = true; - else { - int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; - fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; - } - - if (fastpath && - size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { - memmove(skb->head + size, skb_shinfo(skb), - offsetof(struct skb_shared_info, - frags[skb_shinfo(skb)->nr_frags])); - memmove(skb->head + nhead, skb->head, - skb_tail_pointer(skb) - skb->head); - off = nhead; - goto adjust_others; - } - data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), gfp_mask); if (!data) @@ -967,9 +985,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); - if (fastpath) { - kfree(skb->head); - } else { + /* + * if shinfo is shared we must drop the old head gracefully, but if it + * is not we can just drop the old head and let the existing refcount + * be since all we did is relocate the values + */ + if (skb_cloned(skb)) { /* copy this zero copy skb frags */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) @@ -982,11 +1003,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb_clone_fraglist(skb); skb_release_data(skb); + } else { + skb_free_head(skb); } off = (data + nhead) - skb->head; skb->head = data; -adjust_others: + skb->head_frag = 0; skb->data += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; @@ -1275,7 +1298,7 @@ drop_pages: return -ENOMEM; nfrag->next = frag->next; - kfree_skb(frag); + consume_skb(frag); frag = nfrag; *fragp = frag; } @@ -1487,21 +1510,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); - end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); + end = start + skb_frag_size(f); if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; - vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); + vaddr = kmap_atomic(skb_frag_page(f)); memcpy(to, - vaddr + skb_shinfo(skb)->frags[i].page_offset+ - offset - start, copy); - kunmap_skb_frag(vaddr); + vaddr + f->page_offset + offset - start, + copy); + kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; @@ -1547,9 +1571,9 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) put_page(spd->pages[i]); } -static inline struct page *linear_to_page(struct page *page, unsigned int *len, - unsigned int *offset, - struct sk_buff *skb, struct sock *sk) +static struct page *linear_to_page(struct page *page, unsigned int *len, + unsigned int *offset, + struct sk_buff *skb, struct sock *sk) { struct page *p = sk->sk_sndmsg_page; unsigned int off; @@ -1565,6 +1589,9 @@ new_page: } else { unsigned int mlen; + /* If we are the only user of the page, we can reset offset */ + if (page_count(p) == 1) + sk->sk_sndmsg_off = 0; off = sk->sk_sndmsg_off; mlen = PAGE_SIZE - off; if (mlen < 64 && mlen < *len) { @@ -1578,36 +1605,48 @@ new_page: memcpy(page_address(p) + off, page_address(page) + *offset, *len); sk->sk_sndmsg_off += *len; *offset = off; - get_page(p); return p; } +static bool spd_can_coalesce(const struct splice_pipe_desc *spd, + struct page *page, + unsigned int offset) +{ + return spd->nr_pages && + spd->pages[spd->nr_pages - 1] == page && + (spd->partial[spd->nr_pages - 1].offset + + spd->partial[spd->nr_pages - 1].len == offset); +} + /* * Fill page/offset/length into spd, if it can hold more pages. */ -static inline int spd_fill_page(struct splice_pipe_desc *spd, - struct pipe_inode_info *pipe, struct page *page, - unsigned int *len, unsigned int offset, - struct sk_buff *skb, int linear, - struct sock *sk) +static bool spd_fill_page(struct splice_pipe_desc *spd, + struct pipe_inode_info *pipe, struct page *page, + unsigned int *len, unsigned int offset, + struct sk_buff *skb, bool linear, + struct sock *sk) { - if (unlikely(spd->nr_pages == pipe->buffers)) - return 1; + if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) + return true; if (linear) { page = linear_to_page(page, len, &offset, skb, sk); if (!page) - return 1; - } else - get_page(page); - + return true; + } + if (spd_can_coalesce(spd, page, offset)) { + spd->partial[spd->nr_pages - 1].len += *len; + return false; + } + get_page(page); spd->pages[spd->nr_pages] = page; spd->partial[spd->nr_pages].len = *len; spd->partial[spd->nr_pages].offset = offset; spd->nr_pages++; - return 0; + return false; } static inline void __segment_seek(struct page **page, unsigned int *poff, @@ -1624,20 +1663,20 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, *plen -= off; } -static inline int __splice_segment(struct page *page, unsigned int poff, - unsigned int plen, unsigned int *off, - unsigned int *len, struct sk_buff *skb, - struct splice_pipe_desc *spd, int linear, - struct sock *sk, - struct pipe_inode_info *pipe) +static bool __splice_segment(struct page *page, unsigned int poff, + unsigned int plen, unsigned int *off, + unsigned int *len, struct sk_buff *skb, + struct splice_pipe_desc *spd, bool linear, + struct sock *sk, + struct pipe_inode_info *pipe) { if (!*len) - return 1; + return true; /* skip this segment if already processed */ if (*off >= plen) { *off -= plen; - return 0; + return false; } /* ignore any bits we already processed */ @@ -1653,34 +1692,38 @@ static inline int __splice_segment(struct page *page, unsigned int poff, flen = min_t(unsigned int, flen, PAGE_SIZE - poff); if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) - return 1; + return true; __segment_seek(&page, &poff, &plen, flen); *len -= flen; } while (*len && plen); - return 0; + return false; } /* - * Map linear and fragment data from the skb to spd. It reports failure if the + * Map linear and fragment data from the skb to spd. It reports true if the * pipe is full or if we already spliced the requested length. */ -static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, - unsigned int *offset, unsigned int *len, - struct splice_pipe_desc *spd, struct sock *sk) +static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, + unsigned int *offset, unsigned int *len, + struct splice_pipe_desc *spd, struct sock *sk) { int seg; - /* - * map the linear part + /* map the linear part : + * If skb->head_frag is set, this 'linear' part is backed by a + * fragment, and if the head is not shared with any clones then + * we can avoid a copy since we own the head portion of this page. */ if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), - offset, len, skb, spd, 1, sk, pipe)) - return 1; + offset, len, skb, spd, + skb_head_is_locked(skb), + sk, pipe)) + return true; /* * then map the fragments @@ -1690,11 +1733,11 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, if (__splice_segment(skb_frag_page(f), f->page_offset, skb_frag_size(f), - offset, len, skb, spd, 0, sk, pipe)) - return 1; + offset, len, skb, spd, false, sk, pipe)) + return true; } - return 0; + return false; } /* @@ -1707,8 +1750,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) { - struct partial_page partial[PIPE_DEF_BUFFERS]; - struct page *pages[PIPE_DEF_BUFFERS]; + struct partial_page partial[MAX_SKB_FRAGS]; + struct page *pages[MAX_SKB_FRAGS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, @@ -1720,9 +1763,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct sock *sk = skb->sk; int ret = 0; - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - /* * __skb_splice_bits() only fails if the output has no room left, * so no point in going over the frag_list for the error case. @@ -1758,7 +1798,6 @@ done: lock_sock(sk); } - splice_shrink_spd(pipe, &spd); return ret; } @@ -1806,10 +1845,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); memcpy(vaddr + frag->page_offset + offset - start, from, copy); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; @@ -1869,21 +1908,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); - end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); + end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = csum_partial(vaddr + frag->page_offset + offset - start, copy, 0); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; @@ -1955,12 +1994,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + offset - start, to, copy, 0); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; @@ -2480,7 +2519,7 @@ next_skb: if (abs_offset < block_limit) { if (!st->frag_data) - st->frag_data = kmap_skb_frag(frag); + st->frag_data = kmap_atomic(skb_frag_page(frag)); *data = (u8 *) st->frag_data + frag->page_offset + (abs_offset - st->stepped_offset); @@ -2489,7 +2528,7 @@ next_skb: } if (st->frag_data) { - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); st->frag_data = NULL; } @@ -2498,7 +2537,7 @@ next_skb: } if (st->frag_data) { - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); st->frag_data = NULL; } @@ -2526,7 +2565,7 @@ EXPORT_SYMBOL(skb_seq_read); void skb_abort_seq_read(struct skb_seq_state *st) { if (st->frag_data) - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); } EXPORT_SYMBOL(skb_abort_seq_read); @@ -2718,14 +2757,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) if (unlikely(!nskb)) goto err; - hsize = skb_end_pointer(nskb) - nskb->head; + hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); goto err; } - nskb->truesize += skb_end_pointer(nskb) - nskb->head - - hsize; + nskb->truesize += skb_end_offset(nskb) - hsize; skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { @@ -2843,6 +2881,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) unsigned int len = skb_gro_len(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); + unsigned int delta_truesize; if (p->len + len >= 65536) return -E2BIG; @@ -2872,11 +2911,41 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) frag->page_offset += offset; skb_frag_size_sub(frag, offset); + /* all fragments truesize : remove (head size + sk_buff) */ + delta_truesize = skb->truesize - + SKB_TRUESIZE(skb_end_offset(skb)); + skb->truesize -= skb->data_len; skb->len -= skb->data_len; skb->data_len = 0; - NAPI_GRO_CB(skb)->free = 1; + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; + goto done; + } else if (skb->head_frag) { + int nr_frags = pinfo->nr_frags; + skb_frag_t *frag = pinfo->frags + nr_frags; + struct page *page = virt_to_head_page(skb->head); + unsigned int first_size = headlen - offset; + unsigned int first_offset; + + if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) + return -E2BIG; + + first_offset = skb->data - + (unsigned char *)page_address(page) + + offset; + + pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; + + frag->page.p = page; + frag->page_offset = first_offset; + skb_frag_size_set(frag, first_size); + + memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); + /* We dont need to clear skbinfo->nr_frags here */ + + delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } else if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; @@ -2918,7 +2987,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) p = nskb; merge: - p->truesize += skb->truesize - len; + delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; @@ -2938,7 +3007,7 @@ merge: done: NAPI_GRO_CB(p)->count++; p->data_len += len; - p->truesize += len; + p->truesize += delta_truesize; p->len += len; NAPI_GRO_CB(skb)->same_flow = 1; @@ -3166,7 +3235,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) int len = skb->len; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) + (unsigned int)sk->sk_rcvbuf) return -ENOMEM; skb_orphan(skb); @@ -3260,10 +3329,8 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) { if (unlikely(start > skb_headlen(skb)) || unlikely((int)start + off > skb_headlen(skb) - 2)) { - if (net_ratelimit()) - printk(KERN_WARNING - "bad partial csum: csum=%u/%u len=%u\n", - start, off, skb_headlen(skb)); + net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", + start, off, skb_headlen(skb)); return false; } skb->ip_summed = CHECKSUM_PARTIAL; @@ -3275,8 +3342,93 @@ EXPORT_SYMBOL_GPL(skb_partial_csum_set); void __skb_warn_lro_forwarding(const struct sk_buff *skb) { - if (net_ratelimit()) - pr_warning("%s: received packets cannot be forwarded" - " while LRO is enabled\n", skb->dev->name); + net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", + skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); + +void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) +{ + if (head_stolen) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); +} +EXPORT_SYMBOL(kfree_skb_partial); + +/** + * skb_try_coalesce - try to merge skb to prior one + * @to: prior buffer + * @from: buffer to add + * @fragstolen: pointer to boolean + * + */ +bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize) +{ + int i, delta, len = from->len; + + *fragstolen = false; + + if (skb_cloned(to)) + return false; + + if (len <= skb_tailroom(to)) { + BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); + *delta_truesize = 0; + return true; + } + + if (skb_has_frag_list(to) || skb_has_frag_list(from)) + return false; + + if (skb_headlen(from) != 0) { + struct page *page; + unsigned int offset; + + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + return false; + + if (skb_head_is_locked(from)) + return false; + + delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + + skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, + page, offset, skb_headlen(from)); + *fragstolen = true; + } else { + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) + return false; + + delta = from->truesize - + SKB_TRUESIZE(skb_end_pointer(from) - from->head); + } + + WARN_ON_ONCE(delta < len); + + memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, + skb_shinfo(from)->frags, + skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); + skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; + + if (!skb_cloned(from)) + skb_shinfo(from)->nr_frags = 0; + + /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + for (i = 0; i < skb_shinfo(from)->nr_frags; i++) + skb_frag_ref(from, i); + + to->truesize += delta; + to->len += len; + to->data_len += len; + + *delta_truesize = delta; + return true; +} +EXPORT_SYMBOL(skb_try_coalesce); diff --git a/net/core/sock.c b/net/core/sock.c index b2e14c07d92..5efcd6307fa 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -89,6 +89,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> @@ -113,6 +115,7 @@ #include <linux/user_namespace.h> #include <linux/static_key.h> #include <linux/memcontrol.h> +#include <linux/prefetch.h> #include <asm/uaccess.h> @@ -258,7 +261,9 @@ static struct lock_class_key af_callback_keys[AF_MAX]; /* Run time adjustable parameters. */ __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; +EXPORT_SYMBOL(sysctl_wmem_max); __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; +EXPORT_SYMBOL(sysctl_rmem_max); __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; @@ -294,9 +299,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) *timeo_p = 0; if (warned < 10 && net_ratelimit()) { warned++; - printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " - "tries to set negative timeout\n", - current->comm, task_pid_nr(current)); + pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", + __func__, current->comm, task_pid_nr(current)); } return 0; } @@ -314,8 +318,8 @@ static void sock_warn_obsolete_bsdism(const char *name) static char warncomm[TASK_COMM_LEN]; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); - printk(KERN_WARNING "process `%s' is using obsolete " - "%s SO_BSDCOMPAT\n", warncomm, name); + pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", + warncomm, name); warned++; } } @@ -389,7 +393,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) skb->dev = NULL; - if (sk_rcvqueues_full(sk, skb)) { + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } @@ -406,7 +410,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else if (sk_add_backlog(sk, skb)) { + } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; @@ -561,7 +565,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, sock_valbool_flag(sk, SOCK_DBG, valbool); break; case SO_REUSEADDR: - sk->sk_reuse = valbool; + sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_TYPE: case SO_PROTOCOL: @@ -577,23 +581,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname, break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ - - if (val > sysctl_wmem_max) - val = sysctl_wmem_max; + * about it this is right. Otherwise apps have to + * play 'guess the biggest size' games. RCVBUF/SNDBUF + * are treated in BSD as hints + */ + val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - if ((val * 2) < SOCK_MIN_SNDBUF) - sk->sk_sndbuf = SOCK_MIN_SNDBUF; - else - sk->sk_sndbuf = val * 2; - - /* - * Wake up sending tasks if we - * upped the value. - */ + sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); + /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; @@ -606,12 +602,11 @@ set_sndbuf: case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ - - if (val > sysctl_rmem_max) - val = sysctl_rmem_max; + * about it this is right. Otherwise apps have to + * play 'guess the biggest size' games. RCVBUF/SNDBUF + * are treated in BSD as hints + */ + val = min_t(u32, val, sysctl_rmem_max); set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* @@ -629,10 +624,7 @@ set_rcvbuf: * returning the value we actually used in getsockopt * is the most desirable behavior. */ - if ((val * 2) < SOCK_MIN_RCVBUF) - sk->sk_rcvbuf = SOCK_MIN_RCVBUF; - else - sk->sk_rcvbuf = val * 2; + sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: @@ -858,7 +850,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_BROADCAST: - v.val = !!sock_flag(sk, SOCK_BROADCAST); + v.val = sock_flag(sk, SOCK_BROADCAST); break; case SO_SNDBUF: @@ -874,7 +866,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_KEEPALIVE: - v.val = !!sock_flag(sk, SOCK_KEEPOPEN); + v.val = sock_flag(sk, SOCK_KEEPOPEN); break; case SO_TYPE: @@ -896,7 +888,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_OOBINLINE: - v.val = !!sock_flag(sk, SOCK_URGINLINE); + v.val = sock_flag(sk, SOCK_URGINLINE); break; case SO_NO_CHECK: @@ -909,7 +901,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, case SO_LINGER: lv = sizeof(v.ling); - v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); + v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); v.ling.l_linger = sk->sk_lingertime / HZ; break; @@ -975,7 +967,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PASSCRED: - v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; + v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); break; case SO_PEERCRED: @@ -1010,7 +1002,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PASSSEC: - v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; + v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); break; case SO_PEERSEC: @@ -1021,11 +1013,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_RXQ_OVFL: - v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); + v.val = sock_flag(sk, SOCK_RXQ_OVFL); break; case SO_WIFI_STATUS: - v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); + v.val = sock_flag(sk, SOCK_WIFI_STATUS); break; case SO_PEEK_OFF: @@ -1035,7 +1027,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_peek_off; break; case SO_NOFCS: - v.val = !!sock_flag(sk, SOCK_NOFCS); + v.val = sock_flag(sk, SOCK_NOFCS); break; default: return -ENOPROTOOPT; @@ -1247,8 +1239,8 @@ static void __sk_free(struct sock *sk) sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); if (atomic_read(&sk->sk_omem_alloc)) - printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", - __func__, atomic_read(&sk->sk_omem_alloc)); + pr_debug("%s: optmem leakage (%d bytes) detected\n", + __func__, atomic_read(&sk->sk_omem_alloc)); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); @@ -1534,7 +1526,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { - if ((unsigned)size <= sysctl_optmem_max && + if ((unsigned int)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc @@ -1712,6 +1704,7 @@ static void __release_sock(struct sock *sk) do { struct sk_buff *next = skb->next; + prefetch(next); WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); @@ -2432,7 +2425,7 @@ static void assign_proto_idx(struct proto *prot) prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { - printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); + pr_err("PROTO_INUSE_NR exhausted\n"); return; } @@ -2462,8 +2455,8 @@ int proto_register(struct proto *prot, int alloc_slab) NULL); if (prot->slab == NULL) { - printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", - prot->name); + pr_crit("%s: Can't create sock SLAB cache!\n", + prot->name); goto out; } @@ -2477,8 +2470,8 @@ int proto_register(struct proto *prot, int alloc_slab) SLAB_HWCACHE_ALIGN, NULL); if (prot->rsk_prot->slab == NULL) { - printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", - prot->name); + pr_crit("%s: Can't create request sock SLAB cache!\n", + prot->name); goto out_free_request_sock_slab_name; } } @@ -2576,7 +2569,7 @@ static char proto_method_implemented(const void *method) } static long sock_prot_memory_allocated(struct proto *proto) { - return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; + return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; } static char *sock_prot_memory_pressure(struct proto *proto) diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index b9868e1fd62..5fd146720f3 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -10,7 +10,7 @@ #include <linux/inet_diag.h> #include <linux/sock_diag.h> -static struct sock_diag_handler *sock_diag_handlers[AF_MAX]; +static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); @@ -70,7 +70,7 @@ void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlms } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); -int sock_diag_register(struct sock_diag_handler *hndl) +int sock_diag_register(const struct sock_diag_handler *hndl) { int err = 0; @@ -88,7 +88,7 @@ int sock_diag_register(struct sock_diag_handler *hndl) } EXPORT_SYMBOL_GPL(sock_diag_register); -void sock_diag_unregister(struct sock_diag_handler *hnld) +void sock_diag_unregister(const struct sock_diag_handler *hnld) { int family = hnld->family; @@ -102,7 +102,7 @@ void sock_diag_unregister(struct sock_diag_handler *hnld) } EXPORT_SYMBOL_GPL(sock_diag_unregister); -static inline struct sock_diag_handler *sock_diag_lock_handler(int family) +static const inline struct sock_diag_handler *sock_diag_lock_handler(int family) { if (sock_diag_handlers[family] == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, @@ -112,7 +112,7 @@ static inline struct sock_diag_handler *sock_diag_lock_handler(int family) return sock_diag_handlers[family]; } -static inline void sock_diag_unlock_handler(struct sock_diag_handler *h) +static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h) { mutex_unlock(&sock_diag_table_mutex); } @@ -121,7 +121,7 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = NLMSG_DATA(nlh); - struct sock_diag_handler *hndl; + const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 0c285087425..a7c36845b12 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -14,6 +14,7 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/kmemleak.h> #include <net/ip.h> #include <net/sock.h> @@ -202,12 +203,6 @@ static struct ctl_table netns_core_table[] = { { } }; -__net_initdata struct ctl_path net_core_path[] = { - { .procname = "net", }, - { .procname = "core", }, - { }, -}; - static __net_init int sysctl_core_net_init(struct net *net) { struct ctl_table *tbl; @@ -223,8 +218,7 @@ static __net_init int sysctl_core_net_init(struct net *net) tbl[0].data = &net->core.sysctl_somaxconn; } - net->core.sysctl_hdr = register_net_sysctl_table(net, - net_core_path, tbl); + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); if (net->core.sysctl_hdr == NULL) goto err_reg; @@ -254,10 +248,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = { static __init int sysctl_core_init(void) { - static struct ctl_table empty[1]; - - register_sysctl_paths(net_core_path, empty); - register_net_sysctl_rotable(net_core_path, net_core_table); + register_net_sysctl(&init_net, "net/core", net_core_table); return register_pernet_subsys(&sysctl_core_ops); } diff --git a/net/core/utils.c b/net/core/utils.c index dc3c3faff2f..39895a65e54 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -58,14 +58,11 @@ __be32 in_aton(const char *str) int i; l = 0; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { l <<= 8; - if (*str != '\0') - { + if (*str != '\0') { val = 0; - while (*str != '\0' && *str != '.' && *str != '\n') - { + while (*str != '\0' && *str != '.' && *str != '\n') { val *= 10; val += *str - '0'; str++; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d86053002c1..656c7c75b19 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, + [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, }; static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { @@ -703,6 +704,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, pid, seq, flags); + dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); out: return ret; } @@ -935,6 +937,7 @@ static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); + dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } @@ -1205,13 +1208,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, if (!app) goto nla_put_failure; - if (app_info_type) - NLA_PUT(skb, app_info_type, sizeof(info), &info); - - for (i = 0; i < app_count; i++) - NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app), - &table[i]); + if (app_info_type && + nla_put(skb, app_info_type, sizeof(info), &info)) + goto nla_put_failure; + for (i = 0; i < app_count; i++) { + if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), + &table[i])) + goto nla_put_failure; + } nla_nest_end(skb, app); } err = 0; @@ -1230,8 +1235,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) int dcbx; int err = -EMSGSIZE; - NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); - + if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) + goto nla_put_failure; ieee = nla_nest_start(skb, DCB_ATTR_IEEE); if (!ieee) goto nla_put_failure; @@ -1239,15 +1244,28 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; err = ops->ieee_getets(netdev, &ets); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) + goto nla_put_failure; + } + + if (ops->ieee_getmaxrate) { + struct ieee_maxrate maxrate; + err = ops->ieee_getmaxrate(netdev, &maxrate); + if (!err) { + err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, + sizeof(maxrate), &maxrate); + if (err) + goto nla_put_failure; + } } if (ops->ieee_getpfc) { struct ieee_pfc pfc; err = ops->ieee_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); @@ -1278,15 +1296,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getets) { struct ieee_ets ets; err = ops->ieee_peer_getets(netdev, &ets); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) + goto nla_put_failure; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; err = ops->ieee_peer_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { @@ -1340,10 +1360,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); + if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) + goto nla_put_failure; nla_nest_end(skb, tc_nest); } @@ -1356,7 +1377,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); - NLA_PUT_U8(skb, i, tc_pct); + if (nla_put_u8(skb, i, tc_pct)) + goto nla_put_failure; } nla_nest_end(skb, pg); return 0; @@ -1373,8 +1395,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) int dcbx, i, err = -EMSGSIZE; u8 value; - NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); - + if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) + goto nla_put_failure; cee = nla_nest_start(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; @@ -1401,7 +1423,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); - NLA_PUT_U8(skb, i, value); + if (nla_put_u8(skb, i, value)) + goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } @@ -1454,8 +1477,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) - if (!ops->getfeatcfg(netdev, i, &value)) - NLA_PUT_U8(skb, i, value); + if (!ops->getfeatcfg(netdev, i, &value) && + nla_put_u8(skb, i, value)) + goto nla_put_failure; nla_nest_end(skb, feat); } @@ -1464,15 +1488,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpg) { struct cee_pg pg; err = ops->cee_peer_getpg(netdev, &pg); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); + if (!err && + nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) + goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; err = ops->cee_peer_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { @@ -1589,6 +1615,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, goto err; } + if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { + struct ieee_maxrate *maxrate = + nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); + err = ops->ieee_setmaxrate(netdev, maxrate); + if (err) + goto err; + } + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 70bfaf2d196..8c67bedf85b 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -100,7 +100,7 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) DCCP_BUG_ON(hc->tx_t_ipi == 0); ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, - hc->tx_s, (unsigned)(hc->tx_x >> 6)); + hc->tx_s, (unsigned int)(hc->tx_x >> 6)); } static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) @@ -153,9 +153,9 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) if (hc->tx_x != old_x) { ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " - "X_recv=%u\n", (unsigned)(old_x >> 6), - (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, - (unsigned)(hc->tx_x_recv >> 6)); + "X_recv=%u\n", (unsigned int)(old_x >> 6), + (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc, + (unsigned int)(hc->tx_x_recv >> 6)); ccid3_update_send_interval(hc); } @@ -425,8 +425,8 @@ done_computing_x: "p=%u, X_calc=%u, X_recv=%u, X=%u\n", dccp_role(sk), sk, hc->tx_rtt, r_sample, hc->tx_s, hc->tx_p, hc->tx_x_calc, - (unsigned)(hc->tx_x_recv >> 6), - (unsigned)(hc->tx_x >> 6)); + (unsigned int)(hc->tx_x_recv >> 6), + (unsigned int)(hc->tx_x >> 6)); /* unschedule no feedback timer */ sk_stop_timer(sk, &hc->tx_no_feedback_timer); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 29d6bb629a6..9040be049d8 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -75,7 +75,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); * state, about 60 seconds */ /* RFC 1122, 4.2.3.1 initial RTO value */ -#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ)) +#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ)) /* * The maximum back-off value for retransmissions. This is needed for @@ -84,7 +84,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); * - feature-negotiation retransmission (sec. 6.6.3), * - Acks in client-PARTOPEN state (sec. 8.1.5). */ -#define DCCP_RTO_MAX ((unsigned)(64 * HZ)) +#define DCCP_RTO_MAX ((unsigned int)(64 * HZ)) /* * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4 @@ -287,9 +287,9 @@ extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, extern int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - struct dccp_hdr *dh, unsigned len); + struct dccp_hdr *dh, unsigned int len); extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len); + const struct dccp_hdr *dh, const unsigned int len); extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); extern void dccp_destroy_sock(struct sock *sk); diff --git a/net/dccp/input.c b/net/dccp/input.c index 51d5fe5fffb..bc93a333931 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -285,7 +285,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) } static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len) + const struct dccp_hdr *dh, const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); @@ -366,7 +366,7 @@ discard: } int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len) + const struct dccp_hdr *dh, const unsigned int len) { if (dccp_check_seqno(sk, skb)) goto discard; @@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_established); static int dccp_rcv_request_sent_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, - const unsigned len) + const unsigned int len) { /* * Step 4: Prepare sequence numbers in REQUEST @@ -521,7 +521,7 @@ unable_to_proceed: static int dccp_rcv_respond_partopen_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, - const unsigned len) + const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; @@ -572,7 +572,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, } int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - struct dccp_hdr *dh, unsigned len) + struct dccp_hdr *dh, unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index caf6e1734b6..07f5579ca75 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -574,6 +574,11 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } +void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req) +{ +} +EXPORT_SYMBOL(dccp_syn_ack_timeout); + static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct dccp_request_sock), @@ -581,6 +586,7 @@ static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v4_reqsk_destructor, .send_reset = dccp_v4_ctl_send_reset, + .syn_ack_timeout = dccp_syn_ack_timeout, }; int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4dc588f520e..fa9512d86f3 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -343,6 +343,7 @@ static struct request_sock_ops dccp6_request_sock_ops = { .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v6_reqsk_destructor, .send_reset = dccp_v6_ctl_send_reset, + .syn_ack_timeout = dccp_syn_ack_timeout, }; static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) @@ -579,7 +580,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); - kfree_skb(ireq6->pktopts); + consume_skb(ireq6->pktopts); ireq6->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 7065c0ae1e7..6c7c78b8394 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, default: dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type)); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); } verify_sock_status: if (sock_flag(sk, SOCK_DONE)) { @@ -905,7 +905,7 @@ verify_sock_status: len = skb->len; found_fin_ok: if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); break; } while (1); out: diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 42348824ee3..607ab71b5a0 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -98,18 +98,11 @@ static struct ctl_table dccp_default_table[] = { { } }; -static struct ctl_path dccp_path[] = { - { .procname = "net", }, - { .procname = "dccp", }, - { .procname = "default", }, - { } -}; - static struct ctl_table_header *dccp_table_header; int __init dccp_sysctl_init(void) { - dccp_table_header = register_sysctl_paths(dccp_path, + dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default", dccp_default_table); return dccp_table_header != NULL ? 0 : -ENOMEM; @@ -118,7 +111,7 @@ int __init dccp_sysctl_init(void) void dccp_sysctl_exit(void) { if (dccp_table_header != NULL) { - unregister_sysctl_table(dccp_table_header); + unregister_net_sysctl_table(dccp_table_header); dccp_table_header = NULL; } } diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 4136987d94d..2ba1a2814c2 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -250,7 +250,7 @@ static void dn_unhash_sock_bh(struct sock *sk) static struct hlist_head *listen_hash(struct sockaddr_dn *addr) { int i; - unsigned hash = addr->sdn_objnum; + unsigned int hash = addr->sdn_objnum; if (hash == 0) { hash = addr->sdn_objnamel; @@ -1844,9 +1844,9 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't * make much practical difference. */ -unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) +unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu) { - unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; + unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER; if (dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); mtu -= LL_RESERVED_SPACE(dev); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index c00e3077988..f3924ab1e01 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -209,15 +209,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * struct dn_dev_sysctl_table *t; int i; -#define DN_CTL_PATH_DEV 3 - - struct ctl_path dn_ctl_path[] = { - { .procname = "net", }, - { .procname = "decnet", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; + char path[sizeof("net/decnet/conf/") + IFNAMSIZ]; t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) @@ -228,15 +220,12 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * t->dn_dev_vars[i].data = ((char *)parms) + offset; } - if (dev) { - dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; - } else { - dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name; - } + snprintf(path, sizeof(path), "net/decnet/conf/%s", + dev? dev->name : parms->name); t->dn_dev_vars[0].extra1 = (void *)dev; - t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); + t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else @@ -248,7 +237,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) if (parms->sysctl) { struct dn_dev_sysctl_table *t = parms->sysctl; parms->sysctl = NULL; - unregister_sysctl_table(t->sysctl_header); + unregister_net_sysctl_table(t->sysctl_header); kfree(t); } } @@ -694,13 +683,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; - if (ifa->ifa_address) - NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); - if (ifa->ifa_local) - NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); - if (ifa->ifa_label[0]) - NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); - + if ((ifa->ifa_address && + nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) || + (ifa->ifa_local && + nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) || + (ifa->ifa_label[0] && + nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 9e885f180b6..7eaf9879972 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -302,11 +302,12 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta struct rtattr *attr = RTA_DATA(rta->rta_mx); while(RTA_OK(attr, attrlen)) { - unsigned flavour = attr->rta_type; + unsigned int flavour = attr->rta_type; + if (flavour) { if (flavour > RTAX_MAX) goto err_inval; - fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr); + fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr); } attr = RTA_NEXT(attr, attrlen); } @@ -437,9 +438,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn res->fi = NULL; return 1; default: - if (net_ratelimit()) - printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", - type); + net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", + type); res->fi = NULL; return -EINVAL; } diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index ee7013f24fc..ac90f658586 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -162,8 +162,8 @@ static int dn_neigh_construct(struct neighbour *neigh) else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK)) dn_dn2eth(neigh->ha, dn->addr); else { - if (net_ratelimit()) - printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type); + net_dbg_ratelimited("Trying to create neigh for hw %d\n", + dev->type); return -EINVAL; } @@ -236,15 +236,13 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_long_output: no memory\n"); + net_crit_ratelimited("dn_long_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_long_output: Increasing headroom\n"); + net_info_ratelimited("dn_long_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_long_packet) + 3); @@ -281,15 +279,13 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_short_output: no memory\n"); + net_crit_ratelimited("dn_short_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_short_output: Increasing headroom\n"); + net_info_ratelimited("dn_short_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_short_packet) + 2); @@ -322,15 +318,13 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_phase3_output: no memory\n"); + net_crit_ratelimited("dn_phase3_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_phase3_output: Increasing headroom\n"); + net_info_ratelimited("dn_phase3_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_short_packet) + 2); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index f6544b2c91b..c344163e6ac 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -80,12 +80,15 @@ extern int decnet_log_martians; static void dn_log_martian(struct sk_buff *skb, const char *msg) { - if (decnet_log_martians && net_ratelimit()) { + if (decnet_log_martians) { char *devname = skb->dev ? skb->dev->name : "???"; struct dn_skb_cb *cb = DN_SKB_CB(skb); - printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", - msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst), - le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port)); + net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", + msg, devname, + le16_to_cpu(cb->src), + le16_to_cpu(cb->dst), + le16_to_cpu(cb->src_port), + le16_to_cpu(cb->dst_port)); } } @@ -588,7 +591,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) { + (unsigned int)sk->sk_rcvbuf) { err = -ENOMEM; goto out; } diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index e446e85e64a..564a6ad13ce 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -1,4 +1,3 @@ - /* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket @@ -209,7 +208,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt) * * Returns: The number of times the packet has been sent previously */ -static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, +static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb, gfp_t gfp) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -240,7 +239,7 @@ void dn_nsp_output(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb; - unsigned reduce_win = 0; + unsigned int reduce_win = 0; /* * First we check for otherdata/linkservice messages @@ -554,8 +553,8 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, unsigned char *msg; if ((dst == NULL) || (rem == 0)) { - if (net_ratelimit()) - printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst); + net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", + le16_to_cpu(rem), dst); return; } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 80a3de4906d..586302e557a 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -122,7 +122,7 @@ static int dn_route_input(struct sk_buff *); static void dn_run_flush(unsigned long dummy); static struct dn_rt_hash_bucket *dn_rt_hash_table; -static unsigned dn_rt_hash_mask; +static unsigned int dn_rt_hash_mask; static struct timer_list dn_route_timer; static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); @@ -149,13 +149,13 @@ static void dn_dst_destroy(struct dst_entry *dst) dst_destroy_metrics_generic(dst); } -static __inline__ unsigned dn_hash(__le16 src, __le16 dst) +static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) { __u16 tmp = (__u16 __force)(src ^ dst); tmp ^= (tmp >> 3); tmp ^= (tmp >> 5); tmp ^= (tmp >> 10); - return dn_rt_hash_mask & (unsigned)tmp; + return dn_rt_hash_mask & (unsigned int)tmp; } static inline void dnrt_free(struct dn_route *rt) @@ -297,7 +297,7 @@ static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; } -static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) +static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp) { struct dn_route *rth; struct dn_route __rcu **rthp; @@ -748,8 +748,7 @@ static int dn_output(struct sk_buff *skb) dn_to_neigh_output); error: - if (net_ratelimit()) - printk(KERN_DEBUG "dn_output: This should not happen\n"); + net_dbg_ratelimited("dn_output: This should not happen\n"); kfree_skb(skb); @@ -807,12 +806,10 @@ drop: */ static int dn_rt_bug(struct sk_buff *skb) { - if (net_ratelimit()) { - struct dn_skb_cb *cb = DN_SKB_CB(skb); + struct dn_skb_cb *cb = DN_SKB_CB(skb); - printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", - le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); - } + net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", + le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); kfree_skb(skb); @@ -934,8 +931,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o struct dn_route *rt = NULL; struct net_device *dev_out = NULL, *dev; struct neighbour *neigh = NULL; - unsigned hash; - unsigned flags = 0; + unsigned int hash; + unsigned int flags = 0; struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; int err; int free_res = 0; @@ -1209,7 +1206,7 @@ e_neighbour: */ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) { - unsigned hash = dn_hash(flp->saddr, flp->daddr); + unsigned int hash = dn_hash(flp->saddr, flp->daddr); struct dn_route *rt = NULL; if (!(flags & MSG_TRYHARD)) { @@ -1275,7 +1272,7 @@ static int dn_route_input_slow(struct sk_buff *skb) struct net_device *out_dev = NULL; struct dn_dev *dn_db; struct neighbour *neigh = NULL; - unsigned hash; + unsigned int hash; int flags = 0; __le16 gateway = 0; __le16 local_src = 0; @@ -1327,9 +1324,7 @@ static int dn_route_input_slow(struct sk_buff *skb) out_dev = DN_FIB_RES_DEV(res); if (out_dev == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "Bug in dn_route_input_slow() " - "No output device\n"); + net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n"); goto e_inval; } dev_hold(out_dev); @@ -1490,7 +1485,7 @@ static int dn_route_input(struct sk_buff *skb) { struct dn_route *rt; struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned hash = dn_hash(cb->src, cb->dst); + unsigned int hash = dn_hash(cb->src, cb->dst); if (skb_dst(skb)) return 0; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index f65c9ddaee4..e65f2c856e0 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -177,11 +177,11 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, return 1; } -unsigned dnet_addr_type(__le16 addr) +unsigned int dnet_addr_type(__le16 addr) { struct flowidn fld = { .daddr = addr }; struct dn_fib_res res; - unsigned ret = RTN_UNICAST; + unsigned int ret = RTN_UNICAST; struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); res.r = NULL; @@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = r->src_len; frh->tos = 0; - if (r->dst_len) - NLA_PUT_LE16(skb, FRA_DST, r->dst); - if (r->src_len) - NLA_PUT_LE16(skb, FRA_SRC, r->src); - + if ((r->dst_len && + nla_put_le16(skb, FRA_DST, r->dst)) || + (r->src_len && + nla_put_le16(skb, FRA_SRC, r->src))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index a9a62f225a6..650f3380c98 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -836,8 +836,8 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) if (!create) return NULL; - if (in_interrupt() && net_ratelimit()) { - printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); + if (in_interrupt()) { + net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n"); return NULL; } diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 1531135130d..44b890936fc 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -57,8 +57,7 @@ nlmsg_failure: if (skb) kfree_skb(skb); *errp = -ENOMEM; - if (net_ratelimit()) - printk(KERN_ERR "dn_rtmsg: error creating netlink message\n"); + net_err_ratelimited("dn_rtmsg: error creating netlink message\n"); return NULL; } diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index 02e75d11cfb..a55eeccaa72 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -351,20 +351,14 @@ static ctl_table dn_table[] = { { } }; -static struct ctl_path dn_path[] = { - { .procname = "net", }, - { .procname = "decnet", }, - { } -}; - void dn_register_sysctl(void) { - dn_table_header = register_sysctl_paths(dn_path, dn_table); + dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table); } void dn_unregister_sysctl(void) { - unregister_sysctl_table(dn_table_header); + unregister_net_sysctl_table(dn_table_header); } #else /* CONFIG_SYSCTL */ diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index c73bba326d7..6f70ea935b0 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -38,7 +38,7 @@ MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); -unsigned dns_resolver_debug; +unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h index 189ca9e9b78..17c7886b5b3 100644 --- a/net/dns_resolver/internal.h +++ b/net/dns_resolver/internal.h @@ -31,7 +31,7 @@ extern const struct cred *dns_resolver_cache; /* * debug tracing */ -extern unsigned dns_resolver_debug; +extern unsigned int dns_resolver_debug; #define kdebug(FMT, ...) \ do { \ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 56cf9b8e1c7..e32083d5d8f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -66,7 +66,7 @@ static int dsa_slave_open(struct net_device *dev) if (!(master->flags & IFF_UP)) return -ENETDOWN; - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { err = dev_uc_add(master, dev->dev_addr); if (err < 0) goto out; @@ -89,7 +89,7 @@ clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(master, -1); del_unicast: - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); out: return err; @@ -107,7 +107,7 @@ static int dsa_slave_close(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(master, -1); - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); return 0; @@ -146,13 +146,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) if (!(dev->flags & IFF_UP)) goto out; - if (compare_ether_addr(addr->sa_data, master->dev_addr)) { + if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { err = dev_uc_add(master, addr->sa_data); if (err < 0) return err; } - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); out: diff --git a/net/econet/Kconfig b/net/econet/Kconfig deleted file mode 100644 index 39a2d2975e0..00000000000 --- a/net/econet/Kconfig +++ /dev/null @@ -1,36 +0,0 @@ -# -# Acorn Econet/AUN protocols -# - -config ECONET - tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)" - depends on EXPERIMENTAL && INET - ---help--- - Econet is a fairly old and slow networking protocol mainly used by - Acorn computers to access file and print servers. It uses native - Econet network cards. AUN is an implementation of the higher level - parts of Econet that runs over ordinary Ethernet connections, on - top of the UDP packet protocol, which in turn runs on top of the - Internet protocol IP. - - If you say Y here, you can choose with the next two options whether - to send Econet/AUN traffic over a UDP Ethernet connection or over - a native Econet network card. - - To compile this driver as a module, choose M here: the module - will be called econet. - -config ECONET_AUNUDP - bool "AUN over UDP" - depends on ECONET - help - Say Y here if you want to send Econet/AUN traffic over a UDP - connection (UDP is a packet based protocol that runs on top of the - Internet protocol IP) using an ordinary Ethernet network card. - -config ECONET_NATIVE - bool "Native Econet" - depends on ECONET - help - Say Y here if you have a native Econet network card installed in - your computer. diff --git a/net/econet/Makefile b/net/econet/Makefile deleted file mode 100644 index 05fae8be2fe..00000000000 --- a/net/econet/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for Econet support code. -# - -obj-$(CONFIG_ECONET) += econet.o - -econet-y := af_econet.o diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c deleted file mode 100644 index 71b5edcee40..00000000000 --- a/net/econet/af_econet.c +++ /dev/null @@ -1,1172 +0,0 @@ -/* - * An implementation of the Acorn Econet and AUN protocols. - * Philip Blundell <philb@gnu.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#define pr_fmt(fmt) fmt - -#include <linux/module.h> - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/socket.h> -#include <linux/sockios.h> -#include <linux/in.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/if_ether.h> -#include <linux/netdevice.h> -#include <linux/inetdevice.h> -#include <linux/route.h> -#include <linux/inet.h> -#include <linux/etherdevice.h> -#include <linux/if_arp.h> -#include <linux/wireless.h> -#include <linux/skbuff.h> -#include <linux/udp.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <net/sock.h> -#include <net/inet_common.h> -#include <linux/stat.h> -#include <linux/init.h> -#include <linux/if_ec.h> -#include <net/udp.h> -#include <net/ip.h> -#include <linux/spinlock.h> -#include <linux/rcupdate.h> -#include <linux/bitops.h> -#include <linux/mutex.h> - -#include <linux/uaccess.h> - -static const struct proto_ops econet_ops; -static struct hlist_head econet_sklist; -static DEFINE_SPINLOCK(econet_lock); -static DEFINE_MUTEX(econet_mutex); - -/* Since there are only 256 possible network numbers (or fewer, depends - how you count) it makes sense to use a simple lookup table. */ -static struct net_device *net2dev_map[256]; - -#define EC_PORT_IP 0xd2 - -#ifdef CONFIG_ECONET_AUNUDP -static DEFINE_SPINLOCK(aun_queue_lock); -static struct socket *udpsock; -#define AUN_PORT 0x8000 - -struct aunhdr { - unsigned char code; /* AUN magic protocol byte */ - unsigned char port; - unsigned char cb; - unsigned char pad; - unsigned long handle; -}; - -static unsigned long aun_seq; - -/* Queue of packets waiting to be transmitted. */ -static struct sk_buff_head aun_queue; -static struct timer_list ab_cleanup_timer; - -#endif /* CONFIG_ECONET_AUNUDP */ - -/* Per-packet information */ -struct ec_cb { - struct sockaddr_ec sec; - unsigned long cookie; /* Supplied by user. */ -#ifdef CONFIG_ECONET_AUNUDP - int done; - unsigned long seq; /* Sequencing */ - unsigned long timeout; /* Timeout */ - unsigned long start; /* jiffies */ -#endif -#ifdef CONFIG_ECONET_NATIVE - void (*sent)(struct sk_buff *, int result); -#endif -}; - -static void econet_remove_socket(struct hlist_head *list, struct sock *sk) -{ - spin_lock_bh(&econet_lock); - sk_del_node_init(sk); - spin_unlock_bh(&econet_lock); -} - -static void econet_insert_socket(struct hlist_head *list, struct sock *sk) -{ - spin_lock_bh(&econet_lock); - sk_add_node(sk, list); - spin_unlock_bh(&econet_lock); -} - -/* - * Pull a packet from our receive queue and hand it to the user. - * If necessary we block. - */ - -static int econet_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) -{ - struct sock *sk = sock->sk; - struct sk_buff *skb; - size_t copied; - int err; - - msg->msg_namelen = sizeof(struct sockaddr_ec); - - mutex_lock(&econet_mutex); - - /* - * Call the generic datagram receiver. This handles all sorts - * of horrible races and re-entrancy so we can forget about it - * in the protocol layers. - * - * Now it will return ENETDOWN, if device have just gone down, - * but then it will block. - */ - - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); - - /* - * An error occurred so return it. Because skb_recv_datagram() - * handles the blocking we don't see and worry about blocking - * retries. - */ - - if (skb == NULL) - goto out; - - /* - * You lose any data beyond the buffer you gave. If it worries a - * user program they can ask the device for its MTU anyway. - */ - - copied = skb->len; - if (copied > len) { - copied = len; - msg->msg_flags |= MSG_TRUNC; - } - - /* We can't use skb_copy_datagram here */ - err = memcpy_toiovec(msg->msg_iov, skb->data, copied); - if (err) - goto out_free; - sk->sk_stamp = skb->tstamp; - - if (msg->msg_name) - memcpy(msg->msg_name, skb->cb, msg->msg_namelen); - - /* - * Free or return the buffer as appropriate. Again this - * hides all the races and re-entrancy issues from us. - */ - err = copied; - -out_free: - skb_free_datagram(sk, skb); -out: - mutex_unlock(&econet_mutex); - return err; -} - -/* - * Bind an Econet socket. - */ - -static int econet_bind(struct socket *sock, struct sockaddr *uaddr, - int addr_len) -{ - struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; - struct sock *sk; - struct econet_sock *eo; - - /* - * Check legality - */ - - if (addr_len < sizeof(struct sockaddr_ec) || - sec->sec_family != AF_ECONET) - return -EINVAL; - - mutex_lock(&econet_mutex); - - sk = sock->sk; - eo = ec_sk(sk); - - eo->cb = sec->cb; - eo->port = sec->port; - eo->station = sec->addr.station; - eo->net = sec->addr.net; - - mutex_unlock(&econet_mutex); - - return 0; -} - -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) -/* - * Queue a transmit result for the user to be told about. - */ - -static void tx_result(struct sock *sk, unsigned long cookie, int result) -{ - struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); - struct ec_cb *eb; - struct sockaddr_ec *sec; - - if (skb == NULL) { - pr_debug("econet: memory squeeze, transmit result dropped\n"); - return; - } - - eb = (struct ec_cb *)&skb->cb; - sec = (struct sockaddr_ec *)&eb->sec; - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->cookie = cookie; - sec->type = ECTYPE_TRANSMIT_STATUS | result; - sec->sec_family = AF_ECONET; - - if (sock_queue_rcv_skb(sk, skb) < 0) - kfree_skb(skb); -} -#endif - -#ifdef CONFIG_ECONET_NATIVE -/* - * Called by the Econet hardware driver when a packet transmit - * has completed. Tell the user. - */ - -static void ec_tx_done(struct sk_buff *skb, int result) -{ - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - tx_result(skb->sk, eb->cookie, result); -} -#endif - -/* - * Send a packet. We have to work out which device it's going out on - * and hence whether to use real Econet or the UDP emulation. - */ - -static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) -{ - struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name; - struct net_device *dev; - struct ec_addr addr; - int err; - unsigned char port, cb; -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) - struct sock *sk = sock->sk; - struct sk_buff *skb; - struct ec_cb *eb; -#endif -#ifdef CONFIG_ECONET_AUNUDP - struct msghdr udpmsg; - struct iovec iov[2]; - struct aunhdr ah; - struct sockaddr_in udpdest; - __kernel_size_t size; - mm_segment_t oldfs; - char *userbuf; -#endif - - /* - * Check the flags. - */ - - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) - return -EINVAL; - - /* - * Get and verify the address. - */ - - mutex_lock(&econet_mutex); - - if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) { - mutex_unlock(&econet_mutex); - return -EINVAL; - } - addr.station = saddr->addr.station; - addr.net = saddr->addr.net; - port = saddr->port; - cb = saddr->cb; - - /* Look for a device with the right network number. */ - dev = net2dev_map[addr.net]; - - /* If not directly reachable, use some default */ - if (dev == NULL) { - dev = net2dev_map[0]; - /* No interfaces at all? */ - if (dev == NULL) { - mutex_unlock(&econet_mutex); - return -ENETDOWN; - } - } - - if (dev->type == ARPHRD_ECONET) { - /* Real hardware Econet. We're not worthy etc. */ -#ifdef CONFIG_ECONET_NATIVE - unsigned short proto = 0; - int hlen, tlen; - int res; - - if (len + 15 > dev->mtu) { - mutex_unlock(&econet_mutex); - return -EMSGSIZE; - } - - dev_hold(dev); - - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = sock_alloc_send_skb(sk, len + hlen + tlen, - msg->msg_flags & MSG_DONTWAIT, &err); - if (skb == NULL) - goto out_unlock; - - skb_reserve(skb, hlen); - skb_reset_network_header(skb); - - eb = (struct ec_cb *)&skb->cb; - - eb->cookie = saddr->cookie; - eb->sec = *saddr; - eb->sent = ec_tx_done; - - err = -EINVAL; - res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len); - if (res < 0) - goto out_free; - if (res > 0) { - struct ec_framehdr *fh; - /* Poke in our control byte and - port number. Hack, hack. */ - fh = (struct ec_framehdr *)skb->data; - fh->cb = cb; - fh->port = port; - if (sock->type != SOCK_DGRAM) { - skb_reset_tail_pointer(skb); - skb->len = 0; - } - } - - /* Copy the data. Returns -EFAULT on error */ - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); - skb->protocol = proto; - skb->dev = dev; - skb->priority = sk->sk_priority; - if (err) - goto out_free; - - err = -ENETDOWN; - if (!(dev->flags & IFF_UP)) - goto out_free; - - /* - * Now send it - */ - - dev_queue_xmit(skb); - dev_put(dev); - mutex_unlock(&econet_mutex); - return len; - -out_free: - kfree_skb(skb); -out_unlock: - if (dev) - dev_put(dev); -#else - err = -EPROTOTYPE; -#endif - mutex_unlock(&econet_mutex); - - return err; - } - -#ifdef CONFIG_ECONET_AUNUDP - /* AUN virtual Econet. */ - - if (udpsock == NULL) { - mutex_unlock(&econet_mutex); - return -ENETDOWN; /* No socket - can't send */ - } - - if (len > 32768) { - err = -E2BIG; - goto error; - } - - /* Make up a UDP datagram and hand it off to some higher intellect. */ - - memset(&udpdest, 0, sizeof(udpdest)); - udpdest.sin_family = AF_INET; - udpdest.sin_port = htons(AUN_PORT); - - /* At the moment we use the stupid Acorn scheme of Econet address - y.x maps to IP a.b.c.x. This should be replaced with something - more flexible and more aware of subnet masks. */ - { - struct in_device *idev; - unsigned long network = 0; - - rcu_read_lock(); - idev = __in_dev_get_rcu(dev); - if (idev) { - if (idev->ifa_list) - network = ntohl(idev->ifa_list->ifa_address) & - 0xffffff00; /* !!! */ - } - rcu_read_unlock(); - udpdest.sin_addr.s_addr = htonl(network | addr.station); - } - - memset(&ah, 0, sizeof(ah)); - ah.port = port; - ah.cb = cb & 0x7f; - ah.code = 2; /* magic */ - - /* tack our header on the front of the iovec */ - size = sizeof(struct aunhdr); - iov[0].iov_base = (void *)&ah; - iov[0].iov_len = size; - - userbuf = vmalloc(len); - if (userbuf == NULL) { - err = -ENOMEM; - goto error; - } - - iov[1].iov_base = userbuf; - iov[1].iov_len = len; - err = memcpy_fromiovec(userbuf, msg->msg_iov, len); - if (err) - goto error_free_buf; - - /* Get a skbuff (no data, just holds our cb information) */ - skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err); - if (skb == NULL) - goto error_free_buf; - - eb = (struct ec_cb *)&skb->cb; - - eb->cookie = saddr->cookie; - eb->timeout = 5 * HZ; - eb->start = jiffies; - ah.handle = aun_seq; - eb->seq = (aun_seq++); - eb->sec = *saddr; - - skb_queue_tail(&aun_queue, skb); - - udpmsg.msg_name = (void *)&udpdest; - udpmsg.msg_namelen = sizeof(udpdest); - udpmsg.msg_iov = &iov[0]; - udpmsg.msg_iovlen = 2; - udpmsg.msg_control = NULL; - udpmsg.msg_controllen = 0; - udpmsg.msg_flags = 0; - - oldfs = get_fs(); - set_fs(KERNEL_DS); /* More privs :-) */ - err = sock_sendmsg(udpsock, &udpmsg, size); - set_fs(oldfs); - -error_free_buf: - vfree(userbuf); -error: -#else - err = -EPROTOTYPE; -#endif - mutex_unlock(&econet_mutex); - - return err; -} - -/* - * Look up the address of a socket. - */ - -static int econet_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer) -{ - struct sock *sk; - struct econet_sock *eo; - struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; - - if (peer) - return -EOPNOTSUPP; - - memset(sec, 0, sizeof(*sec)); - mutex_lock(&econet_mutex); - - sk = sock->sk; - eo = ec_sk(sk); - - sec->sec_family = AF_ECONET; - sec->port = eo->port; - sec->addr.station = eo->station; - sec->addr.net = eo->net; - - mutex_unlock(&econet_mutex); - - *uaddr_len = sizeof(*sec); - return 0; -} - -static void econet_destroy_timer(unsigned long data) -{ - struct sock *sk = (struct sock *)data; - - if (!sk_has_allocations(sk)) { - sk_free(sk); - return; - } - - sk->sk_timer.expires = jiffies + 10 * HZ; - add_timer(&sk->sk_timer); - pr_debug("econet: socket destroy delayed\n"); -} - -/* - * Close an econet socket. - */ - -static int econet_release(struct socket *sock) -{ - struct sock *sk; - - mutex_lock(&econet_mutex); - - sk = sock->sk; - if (!sk) - goto out_unlock; - - econet_remove_socket(&econet_sklist, sk); - - /* - * Now the socket is dead. No more input will appear. - */ - - sk->sk_state_change(sk); /* It is useless. Just for sanity. */ - - sock_orphan(sk); - - /* Purge queues */ - - skb_queue_purge(&sk->sk_receive_queue); - - if (sk_has_allocations(sk)) { - sk->sk_timer.data = (unsigned long)sk; - sk->sk_timer.expires = jiffies + HZ; - sk->sk_timer.function = econet_destroy_timer; - add_timer(&sk->sk_timer); - - goto out_unlock; - } - - sk_free(sk); - -out_unlock: - mutex_unlock(&econet_mutex); - return 0; -} - -static struct proto econet_proto = { - .name = "ECONET", - .owner = THIS_MODULE, - .obj_size = sizeof(struct econet_sock), -}; - -/* - * Create an Econet socket - */ - -static int econet_create(struct net *net, struct socket *sock, int protocol, - int kern) -{ - struct sock *sk; - struct econet_sock *eo; - int err; - - if (!net_eq(net, &init_net)) - return -EAFNOSUPPORT; - - /* Econet only provides datagram services. */ - if (sock->type != SOCK_DGRAM) - return -ESOCKTNOSUPPORT; - - sock->state = SS_UNCONNECTED; - - err = -ENOBUFS; - sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto); - if (sk == NULL) - goto out; - - sk->sk_reuse = 1; - sock->ops = &econet_ops; - sock_init_data(sock, sk); - - eo = ec_sk(sk); - sock_reset_flag(sk, SOCK_ZAPPED); - sk->sk_family = PF_ECONET; - eo->num = protocol; - - econet_insert_socket(&econet_sklist, sk); - return 0; -out: - return err; -} - -/* - * Handle Econet specific ioctls - */ - -static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) -{ - struct ifreq ifr; - struct ec_device *edev; - struct net_device *dev; - struct sockaddr_ec *sec; - int err; - - /* - * Fetch the caller's info block into kernel space - */ - - if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) - return -EFAULT; - - dev = dev_get_by_name(&init_net, ifr.ifr_name); - if (dev == NULL) - return -ENODEV; - - sec = (struct sockaddr_ec *)&ifr.ifr_addr; - - mutex_lock(&econet_mutex); - - err = 0; - switch (cmd) { - case SIOCSIFADDR: - if (!capable(CAP_NET_ADMIN)) { - err = -EPERM; - break; - } - - edev = dev->ec_ptr; - if (edev == NULL) { - /* Magic up a new one. */ - edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL); - if (edev == NULL) { - err = -ENOMEM; - break; - } - dev->ec_ptr = edev; - } else - net2dev_map[edev->net] = NULL; - edev->station = sec->addr.station; - edev->net = sec->addr.net; - net2dev_map[sec->addr.net] = dev; - if (!net2dev_map[0]) - net2dev_map[0] = dev; - break; - - case SIOCGIFADDR: - edev = dev->ec_ptr; - if (edev == NULL) { - err = -ENODEV; - break; - } - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->addr.station = edev->station; - sec->addr.net = edev->net; - sec->sec_family = AF_ECONET; - dev_put(dev); - if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) - err = -EFAULT; - break; - - default: - err = -EINVAL; - break; - } - - mutex_unlock(&econet_mutex); - - dev_put(dev); - - return err; -} - -/* - * Handle generic ioctls - */ - -static int econet_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) -{ - struct sock *sk = sock->sk; - void __user *argp = (void __user *)arg; - - switch (cmd) { - case SIOCGSTAMP: - return sock_get_timestamp(sk, argp); - - case SIOCGSTAMPNS: - return sock_get_timestampns(sk, argp); - - case SIOCSIFADDR: - case SIOCGIFADDR: - return ec_dev_ioctl(sock, cmd, argp); - - } - - return -ENOIOCTLCMD; -} - -static const struct net_proto_family econet_family_ops = { - .family = PF_ECONET, - .create = econet_create, - .owner = THIS_MODULE, -}; - -static const struct proto_ops econet_ops = { - .family = PF_ECONET, - .owner = THIS_MODULE, - .release = econet_release, - .bind = econet_bind, - .connect = sock_no_connect, - .socketpair = sock_no_socketpair, - .accept = sock_no_accept, - .getname = econet_getname, - .poll = datagram_poll, - .ioctl = econet_ioctl, - .listen = sock_no_listen, - .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, - .sendmsg = econet_sendmsg, - .recvmsg = econet_recvmsg, - .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, -}; - -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) -/* - * Find the listening socket, if any, for the given data. - */ - -static struct sock *ec_listening_socket(unsigned char port, unsigned char - station, unsigned char net) -{ - struct sock *sk; - struct hlist_node *node; - - spin_lock(&econet_lock); - sk_for_each(sk, node, &econet_sklist) { - struct econet_sock *opt = ec_sk(sk); - if ((opt->port == port || opt->port == 0) && - (opt->station == station || opt->station == 0) && - (opt->net == net || opt->net == 0)) { - sock_hold(sk); - goto found; - } - } - sk = NULL; -found: - spin_unlock(&econet_lock); - return sk; -} - -/* - * Queue a received packet for a socket. - */ - -static int ec_queue_packet(struct sock *sk, struct sk_buff *skb, - unsigned char stn, unsigned char net, - unsigned char cb, unsigned char port) -{ - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec; - - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->sec_family = AF_ECONET; - sec->type = ECTYPE_PACKET_RECEIVED; - sec->port = port; - sec->cb = cb; - sec->addr.net = net; - sec->addr.station = stn; - - return sock_queue_rcv_skb(sk, skb); -} -#endif - -#ifdef CONFIG_ECONET_AUNUDP -/* - * Send an AUN protocol response. - */ - -static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb) -{ - struct sockaddr_in sin = { - .sin_family = AF_INET, - .sin_port = htons(AUN_PORT), - .sin_addr = {.s_addr = addr} - }; - struct aunhdr ah = {.code = code, .cb = cb, .handle = seq}; - struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)}; - struct msghdr udpmsg; - - udpmsg.msg_name = (void *)&sin; - udpmsg.msg_namelen = sizeof(sin); - udpmsg.msg_control = NULL; - udpmsg.msg_controllen = 0; - udpmsg.msg_flags = 0; - - kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah)); -} - - -/* - * Handle incoming AUN packets. Work out if anybody wants them, - * and send positive or negative acknowledgements as appropriate. - */ - -static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) -{ - struct iphdr *ip = ip_hdr(skb); - unsigned char stn = ntohl(ip->saddr) & 0xff; - struct dst_entry *dst = skb_dst(skb); - struct ec_device *edev = NULL; - struct sock *sk = NULL; - struct sk_buff *newskb; - - if (dst) - edev = dst->dev->ec_ptr; - - if (!edev) - goto bad; - - sk = ec_listening_socket(ah->port, stn, edev->net); - if (sk == NULL) - goto bad; /* Nobody wants it */ - - newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15, - GFP_ATOMIC); - if (newskb == NULL) { - pr_debug("AUN: memory squeeze, dropping packet\n"); - /* Send nack and hope sender tries again */ - goto bad; - } - - memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1), - len - sizeof(struct aunhdr)); - - if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) { - /* Socket is bankrupt. */ - kfree_skb(newskb); - goto bad; - } - - aun_send_response(ip->saddr, ah->handle, 3, 0); - sock_put(sk); - return; - -bad: - aun_send_response(ip->saddr, ah->handle, 4, 0); - if (sk) - sock_put(sk); -} - -/* - * Handle incoming AUN transmit acknowledgements. If the sequence - * number matches something in our backlog then kill it and tell - * the user. If the remote took too long to reply then we may have - * dropped the packet already. - */ - -static void aun_tx_ack(unsigned long seq, int result) -{ - struct sk_buff *skb; - unsigned long flags; - struct ec_cb *eb; - - spin_lock_irqsave(&aun_queue_lock, flags); - skb_queue_walk(&aun_queue, skb) { - eb = (struct ec_cb *)&skb->cb; - if (eb->seq == seq) - goto foundit; - } - spin_unlock_irqrestore(&aun_queue_lock, flags); - pr_debug("AUN: unknown sequence %ld\n", seq); - return; - -foundit: - tx_result(skb->sk, eb->cookie, result); - skb_unlink(skb, &aun_queue); - spin_unlock_irqrestore(&aun_queue_lock, flags); - kfree_skb(skb); -} - -/* - * Deal with received AUN frames - sort out what type of thing it is - * and hand it to the right function. - */ - -static void aun_data_available(struct sock *sk, int slen) -{ - int err; - struct sk_buff *skb; - unsigned char *data; - struct aunhdr *ah; - size_t len; - - while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { - if (err == -EAGAIN) { - pr_err("AUN: no data available?!\n"); - return; - } - pr_debug("AUN: recvfrom() error %d\n", -err); - } - - data = skb_transport_header(skb) + sizeof(struct udphdr); - ah = (struct aunhdr *)data; - len = skb->len - sizeof(struct udphdr); - - switch (ah->code) { - case 2: - aun_incoming(skb, ah, len); - break; - case 3: - aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK); - break; - case 4: - aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); - break; - default: - pr_debug("AUN: unknown packet type: %d\n", data[0]); - } - - skb_free_datagram(sk, skb); -} - -/* - * Called by the timer to manage the AUN transmit queue. If a packet - * was sent to a dead or nonexistent host then we will never get an - * acknowledgement back. After a few seconds we need to spot this and - * drop the packet. - */ - -static void ab_cleanup(unsigned long h) -{ - struct sk_buff *skb, *n; - unsigned long flags; - - spin_lock_irqsave(&aun_queue_lock, flags); - skb_queue_walk_safe(&aun_queue, skb, n) { - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - if ((jiffies - eb->start) > eb->timeout) { - tx_result(skb->sk, eb->cookie, - ECTYPE_TRANSMIT_NOT_PRESENT); - skb_unlink(skb, &aun_queue); - kfree_skb(skb); - } - } - spin_unlock_irqrestore(&aun_queue_lock, flags); - - mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2)); -} - -static int __init aun_udp_initialise(void) -{ - int error; - struct sockaddr_in sin; - - skb_queue_head_init(&aun_queue); - setup_timer(&ab_cleanup_timer, ab_cleanup, 0); - ab_cleanup_timer.expires = jiffies + (HZ * 2); - add_timer(&ab_cleanup_timer); - - memset(&sin, 0, sizeof(sin)); - sin.sin_port = htons(AUN_PORT); - - /* We can count ourselves lucky Acorn machines are too dim to - speak IPv6. :-) */ - error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock); - if (error < 0) { - pr_err("AUN: socket error %d\n", -error); - return error; - } - - udpsock->sk->sk_reuse = 1; - udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it - from interrupts */ - - error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin, - sizeof(sin)); - if (error < 0) { - pr_err("AUN: bind error %d\n", -error); - goto release; - } - - udpsock->sk->sk_data_ready = aun_data_available; - - return 0; - -release: - sock_release(udpsock); - udpsock = NULL; - return error; -} -#endif - -#ifdef CONFIG_ECONET_NATIVE - -/* - * Receive an Econet frame from a device. - */ - -static int econet_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - struct ec_framehdr *hdr; - struct sock *sk = NULL; - struct ec_device *edev = dev->ec_ptr; - - if (!net_eq(dev_net(dev), &init_net)) - goto drop; - - if (skb->pkt_type == PACKET_OTHERHOST) - goto drop; - - if (!edev) - goto drop; - - skb = skb_share_check(skb, GFP_ATOMIC); - if (skb == NULL) - return NET_RX_DROP; - - if (!pskb_may_pull(skb, sizeof(struct ec_framehdr))) - goto drop; - - hdr = (struct ec_framehdr *)skb->data; - - /* First check for encapsulated IP */ - if (hdr->port == EC_PORT_IP) { - skb->protocol = htons(ETH_P_IP); - skb_pull(skb, sizeof(struct ec_framehdr)); - netif_rx(skb); - return NET_RX_SUCCESS; - } - - sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net); - if (!sk) - goto drop; - - if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, - hdr->port)) - goto drop; - sock_put(sk); - return NET_RX_SUCCESS; - -drop: - if (sk) - sock_put(sk); - kfree_skb(skb); - return NET_RX_DROP; -} - -static struct packet_type econet_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_ECONET), - .func = econet_rcv, -}; - -static void econet_hw_initialise(void) -{ - dev_add_pack(&econet_packet_type); -} - -#endif - -static int econet_notifier(struct notifier_block *this, unsigned long msg, - void *data) -{ - struct net_device *dev = data; - struct ec_device *edev; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - switch (msg) { - case NETDEV_UNREGISTER: - /* A device has gone down - kill any data we hold for it. */ - edev = dev->ec_ptr; - if (edev) { - if (net2dev_map[0] == dev) - net2dev_map[0] = NULL; - net2dev_map[edev->net] = NULL; - kfree(edev); - dev->ec_ptr = NULL; - } - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block econet_netdev_notifier = { - .notifier_call = econet_notifier, -}; - -static void __exit econet_proto_exit(void) -{ -#ifdef CONFIG_ECONET_AUNUDP - del_timer(&ab_cleanup_timer); - if (udpsock) - sock_release(udpsock); -#endif - unregister_netdevice_notifier(&econet_netdev_notifier); -#ifdef CONFIG_ECONET_NATIVE - dev_remove_pack(&econet_packet_type); -#endif - sock_unregister(econet_family_ops.family); - proto_unregister(&econet_proto); -} - -static int __init econet_proto_init(void) -{ - int err = proto_register(&econet_proto, 0); - - if (err != 0) - goto out; - sock_register(&econet_family_ops); -#ifdef CONFIG_ECONET_AUNUDP - aun_udp_initialise(); -#endif -#ifdef CONFIG_ECONET_NATIVE - econet_hw_initialise(); -#endif - register_netdevice_notifier(&econet_netdev_notifier); -out: - return err; -} - -module_init(econet_proto_init); -module_exit(econet_proto_exit); - -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NETPROTO(PF_ECONET); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index bf10a311cf1..36e58800a9e 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -77,7 +77,7 @@ __setup("ether=", netdev_boot_setup); */ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); @@ -164,7 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) eth = eth_hdr(skb); if (unlikely(is_multicast_ether_addr(eth->h_dest))) { - if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) + if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; @@ -179,7 +179,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) */ else if (1 /*dev->flags&IFF_PROMISC */ ) { - if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr))) + if (unlikely(!ether_addr_equal_64bits(eth->h_dest, + dev->dev_addr))) skb->pkt_type = PACKET_OTHERHOST; } diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 840821b90bc..32eb4179e8f 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -196,7 +196,7 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { - memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); + memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } @@ -221,7 +221,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", - lladdr, IEEE802154_ALEN); + lladdr, IEEE802154_ADDR_LEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); @@ -371,7 +371,7 @@ err: static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, - const void *_saddr, unsigned len) + const void *_saddr, unsigned int len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; @@ -650,6 +650,53 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr) kfree(entry); } +static struct lowpan_fragment * +lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag) +{ + struct lowpan_fragment *frame; + + frame = kzalloc(sizeof(struct lowpan_fragment), + GFP_ATOMIC); + if (!frame) + goto frame_err; + + INIT_LIST_HEAD(&frame->list); + + frame->length = (iphc0 & 7) | (len << 3); + frame->tag = tag; + + /* allocate buffer for frame assembling */ + frame->skb = alloc_skb(frame->length + + sizeof(struct ipv6hdr), GFP_ATOMIC); + + if (!frame->skb) + goto skb_err; + + frame->skb->priority = skb->priority; + frame->skb->dev = skb->dev; + + /* reserve headroom for uncompressed ipv6 header */ + skb_reserve(frame->skb, sizeof(struct ipv6hdr)); + skb_put(frame->skb, frame->length); + + init_timer(&frame->timer); + /* time out is the same as for ipv6 - 60 sec */ + frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; + frame->timer.data = (unsigned long)frame; + frame->timer.function = lowpan_fragment_timer_expired; + + add_timer(&frame->timer); + + list_add_tail(&frame->list, &lowpan_fragments); + + return frame; + +skb_err: + kfree(frame); +frame_err: + return NULL; +} + static int lowpan_process_data(struct sk_buff *skb) { @@ -692,41 +739,9 @@ lowpan_process_data(struct sk_buff *skb) /* alloc new frame structure */ if (!found) { - frame = kzalloc(sizeof(struct lowpan_fragment), - GFP_ATOMIC); + frame = lowpan_alloc_new_frame(skb, iphc0, len, tag); if (!frame) goto unlock_and_drop; - - INIT_LIST_HEAD(&frame->list); - - frame->length = (iphc0 & 7) | (len << 3); - frame->tag = tag; - - /* allocate buffer for frame assembling */ - frame->skb = alloc_skb(frame->length + - sizeof(struct ipv6hdr), GFP_ATOMIC); - - if (!frame->skb) { - kfree(frame); - goto unlock_and_drop; - } - - frame->skb->priority = skb->priority; - frame->skb->dev = skb->dev; - - /* reserve headroom for uncompressed ipv6 header */ - skb_reserve(frame->skb, sizeof(struct ipv6hdr)); - skb_put(frame->skb, frame->length); - - init_timer(&frame->timer); - /* time out is the same as for ipv6 - 60 sec */ - frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; - frame->timer.data = (unsigned long)frame; - frame->timer.function = lowpan_fragment_timer_expired; - - add_timer(&frame->timer); - - list_add_tail(&frame->list, &lowpan_fragments); } if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index aeff3f31048..8c2251fb0a3 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -53,9 +53,6 @@ #ifndef __6LOWPAN_H__ #define __6LOWPAN_H__ -/* need to know address length to manipulate with it */ -#define IEEE802154_ALEN 8 - #define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ #define UIP_IPH_LEN 40 /* ipv6 fixed header size */ #define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 1b09eaabaac..6fbb2ad7bb6 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -44,8 +44,8 @@ struct dgram_sock { struct ieee802154_addr src_addr; struct ieee802154_addr dst_addr; - unsigned bound:1; - unsigned want_ack:1; + unsigned int bound:1; + unsigned int want_ack:1; }; static inline struct dgram_sock *dgram_sk(const struct sock *sk) @@ -206,7 +206,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; - unsigned mtu; + unsigned int mtu; struct sk_buff *skb; struct dgram_sock *ro = dgram_sk(sk); int hlen, tlen; diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index adaf4621490..ca92587720f 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, - addr->hwaddr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, + addr->hwaddr) || + nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); @@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - if (addr->addr_type == IEEE802154_ADDR_LONG) - NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, - addr->hwaddr); - else - NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, - addr->short_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr)) + goto nla_put_failure; + if (addr->addr_type == IEEE802154_ADDR_LONG) { + if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, + addr->hwaddr)) + goto nla_put_failure; + } else { + if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, + addr->short_addr)) + goto nla_put_failure; + } + if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) || + nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); - NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); - NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); - - if (edl) - NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) || + nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) || + nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) || + nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || + (edl && + nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, phy = ieee802154_mlme_ops(dev)->get_phy(dev); BUG_ON(!phy); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, - ieee802154_mlme_ops(dev)->get_short_addr(dev)); - NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, - ieee802154_mlme_ops(dev)->get_pan_id(dev)); + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, + ieee802154_mlme_ops(dev)->get_short_addr(dev)) || + nla_put_u16(msg, IEEE802154_ATTR_PAN_ID, + ieee802154_mlme_ops(dev)->get_pan_id(dev))) + goto nla_put_failure; wpan_phy_put(phy); return genlmsg_end(msg, hdr); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index c64a38d57aa..eed291626da 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, goto out; mutex_lock(&phy->pib_lock); - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - - NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page); - NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel); + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) || + nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) + goto nla_put_failure; for (i = 0; i < 32; i++) { if (phy->channels_supported[i]) buf[pages++] = phy->channels_supported[i] | (i << 27); } - if (pages) - NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, - pages * sizeof(uint32_t), buf); - + if (pages && + nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, + pages * sizeof(uint32_t), buf)) + goto nla_put_failure; mutex_unlock(&phy->pib_lock); kfree(buf); return genlmsg_end(msg, hdr); @@ -179,6 +179,7 @@ static int ieee802154_add_iface(struct sk_buff *skb, const char *devname; int rc = -ENOBUFS; struct net_device *dev; + int type = __IEEE802154_DEV_INVALID; pr_debug("%s\n", __func__); @@ -221,7 +222,13 @@ static int ieee802154_add_iface(struct sk_buff *skb, goto nla_put_failure; } - dev = phy->add_iface(phy, devname); + if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { + type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); + if (type >= __IEEE802154_DEV_MAX) + return -EINVAL; + } + + dev = phy->add_iface(phy, devname, type); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; @@ -245,9 +252,9 @@ static int ieee802154_add_iface(struct sk_buff *skb, goto dev_unregister; } - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) + goto nla_put_failure; dev_put(dev); wpan_phy_put(phy); @@ -333,10 +340,9 @@ static int ieee802154_del_iface(struct sk_buff *skb, rtnl_unlock(); - - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name); - + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name)) + goto nla_put_failure; wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index f96bae8fd33..50e823927d4 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -106,7 +106,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; - unsigned mtu; + unsigned int mtu; struct sk_buff *skb; int hlen, tlen; int err; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index d183262943d..20f1cb5c8ab 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -262,8 +262,8 @@ config ARPD bool "IP: ARP daemon support" ---help--- The kernel maintains an internal cache which maps IP addresses to - hardware addresses on the local network, so that Ethernet/Token Ring/ - etc. frames are sent to the proper address on the physical networking + hardware addresses on the local network, so that Ethernet + frames are sent to the proper address on the physical networking layer. Normally, kernel uses the ARP protocol to resolve these mappings. @@ -312,7 +312,7 @@ config SYN_COOKIES config INET_AH tristate "IP: AH transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 @@ -324,7 +324,7 @@ config INET_AH config INET_ESP tristate "IP: ESP transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_AUTHENC select CRYPTO_HMAC diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 10e3751466b..c8f7aee587d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -350,7 +350,7 @@ lookup_protocol: err = 0; sk->sk_no_check = answer_no_check; if (INET_PROTOSW_REUSE & answer_flags) - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; @@ -541,7 +541,7 @@ out: } EXPORT_SYMBOL(inet_bind); -int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, +int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index fd508b52601..e8f2617ecd4 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -77,7 +77,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) { - unsigned char * optptr = (unsigned char*)(iph+1); + unsigned char *optptr = (unsigned char *)(iph+1); int l = iph->ihl*4 - sizeof(struct iphdr); int optlen; @@ -406,8 +406,8 @@ static void ah4_err(struct sk_buff *skb, u32 info) ah->spi, IPPROTO_AH, AF_INET); if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", - ntohl(ah->spi), ntohl(iph->daddr)); + pr_debug("pmtu discovery on SA AH/%08x/%08x\n", + ntohl(ah->spi), ntohl(iph->daddr)); xfrm_state_put(x); } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 18d9b81ecb1..cda37be02f8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -73,6 +73,8 @@ * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> @@ -89,7 +91,6 @@ #include <linux/etherdevice.h> #include <linux/fddidevice.h> #include <linux/if_arp.h> -#include <linux/trdevice.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -193,9 +194,6 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) case ARPHRD_IEEE802: ip_eth_mc_map(addr, haddr); return 0; - case ARPHRD_IEEE802_TR: - ip_tr_mc_map(addr, haddr); - return 0; case ARPHRD_INFINIBAND: ip_ib_mc_map(addr, dev->broadcast, haddr); return 0; @@ -364,8 +362,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) probes -= neigh->parms->ucast_probes; if (probes < 0) { if (!(neigh->nud_state & NUD_VALID)) - printk(KERN_DEBUG - "trying to ucast probe in NUD_INVALID\n"); + pr_debug("trying to ucast probe in NUD_INVALID\n"); dst_ha = neigh->ha; read_lock_bh(&neigh->lock); } else { @@ -452,7 +449,7 @@ static int arp_set_predefined(int addr_hint, unsigned char *haddr, { switch (addr_hint) { case RTN_LOCAL: - printk(KERN_DEBUG "ARP: arp called for own IP address\n"); + pr_debug("arp called for own IP address\n"); memcpy(haddr, dev->dev_addr, dev->addr_len); return 1; case RTN_MULTICAST: @@ -473,7 +470,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) struct neighbour *n; if (!skb_dst(skb)) { - printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); + pr_debug("arp_find is called with dst==NULL\n"); kfree_skb(skb); return 1; } @@ -648,12 +645,6 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, arp->ar_pro = htons(ETH_P_IP); break; #endif -#if IS_ENABLED(CONFIG_TR) - case ARPHRD_IEEE802_TR: - arp->ar_hrd = htons(ARPHRD_IEEE802); - arp->ar_pro = htons(ETH_P_IP); - break; -#endif } arp->ar_hln = dev->addr_len; @@ -751,11 +742,10 @@ static int arp_process(struct sk_buff *skb) goto out; break; case ARPHRD_ETHER: - case ARPHRD_IEEE802_TR: case ARPHRD_FDDI: case ARPHRD_IEEE802: /* - * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 + * ETHERNET, and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that @@ -1059,7 +1049,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev); err = PTR_ERR(neigh); if (!IS_ERR(neigh)) { - unsigned state = NUD_STALE; + unsigned int state = NUD_STALE; if (r->arp_flags & ATF_PERM) state = NUD_PERMANENT; err = neigh_update(neigh, (r->arp_flags & ATF_COM) ? @@ -1071,7 +1061,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, return err; } -static unsigned arp_state_to_flags(struct neighbour *neigh) +static unsigned int arp_state_to_flags(struct neighbour *neigh) { if (neigh->nud_state&NUD_PERMANENT) return ATF_PERM | ATF_COM; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 6e447ff94df..10e15a144e9 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -217,8 +217,7 @@ void in_dev_finish_destroy(struct in_device *idev) WARN_ON(idev->ifa_list); WARN_ON(idev->mc_list); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", - idev, dev ? dev->name : "NIL"); + pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif dev_put(dev); if (!idev->dead) @@ -1125,7 +1124,7 @@ skip: } } -static inline bool inetdev_valid_mtu(unsigned mtu) +static inline bool inetdev_valid_mtu(unsigned int mtu) { return mtu >= 68; } @@ -1174,7 +1173,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, switch (event) { case NETDEV_REGISTER: - printk(KERN_DEBUG "inetdev_event: bug\n"); + pr_debug("%s: bug\n", __func__); RCU_INIT_POINTER(dev->ip_ptr, NULL); break; case NETDEV_UP: @@ -1266,17 +1265,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; - if (ifa->ifa_address) - NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address); - - if (ifa->ifa_local) - NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local); - - if (ifa->ifa_broadcast) - NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); - - if (ifa->ifa_label[0]) - NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); + if ((ifa->ifa_address && + nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) || + (ifa->ifa_local && + nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) || + (ifa->ifa_broadcast && + nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || + (ifa->ifa_label[0] && + nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -1587,7 +1584,6 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write, static struct devinet_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX]; - char *dev_name; } devinet_sysctl = { .devinet_vars = { DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", @@ -1629,16 +1625,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, { int i; struct devinet_sysctl_table *t; - -#define DEVINET_CTL_PATH_DEV 3 - - struct ctl_path devinet_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; + char path[sizeof("net/ipv4/conf/") + IFNAMSIZ]; t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL); if (!t) @@ -1650,27 +1637,15 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, t->devinet_vars[i].extra2 = net; } - /* - * Make a copy of dev_name, because '.procname' is regarded as const - * by sysctl and we wouldn't want anyone to change it under our feet - * (see SIOCSIFNAME). - */ - t->dev_name = kstrdup(dev_name, GFP_KERNEL); - if (!t->dev_name) - goto free; - - devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name; + snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name); - t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path, - t->devinet_vars); + t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars); if (!t->sysctl_header) - goto free_procname; + goto free; p->sysctl = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); out: @@ -1686,7 +1661,6 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) cnf->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); - kfree(t->dev_name); kfree(t); } @@ -1716,12 +1690,6 @@ static struct ctl_table ctl_forward_entry[] = { }, { }, }; - -static __net_initdata struct ctl_path net_ipv4_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; #endif static __net_init int devinet_init_net(struct net *net) @@ -1767,7 +1735,7 @@ static __net_init int devinet_init_net(struct net *net) goto err_reg_dflt; err = -ENOMEM; - forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl); + forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); if (forw_hdr == NULL) goto err_reg_ctl; net->ipv4.forw_hdr = forw_hdr; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index cbe3a68507c..3854411fa37 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -136,13 +136,13 @@ static void fib_flush(struct net *net) * Find address type as if only "dev" was present in the system. If * on_dev is NULL then all interfaces are taken into consideration. */ -static inline unsigned __inet_dev_addr_type(struct net *net, - const struct net_device *dev, - __be32 addr) +static inline unsigned int __inet_dev_addr_type(struct net *net, + const struct net_device *dev, + __be32 addr) { struct flowi4 fl4 = { .daddr = addr }; struct fib_result res; - unsigned ret = RTN_BROADCAST; + unsigned int ret = RTN_BROADCAST; struct fib_table *local_table; if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) @@ -740,7 +740,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) #define BRD_OK 2 #define BRD0_OK 4 #define BRD1_OK 8 - unsigned ok = 0; + unsigned int ok = 0; int subnet = 0; /* Primary network */ int gone = 1; /* Address is missing */ int same_prefsrc = 0; /* Another primary with same IP */ diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 799fc790b3c..2d043f71ef7 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = rule4->src_len; frh->tos = rule4->tos; - if (rule4->dst_len) - NLA_PUT_BE32(skb, FRA_DST, rule4->dst); - - if (rule4->src_len) - NLA_PUT_BE32(skb, FRA_SRC, rule4->src); - + if ((rule4->dst_len && + nla_put_be32(skb, FRA_DST, rule4->dst)) || + (rule4->src_len && + nla_put_be32(skb, FRA_SRC, rule4->src))) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (rule4->tclassid) - NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); + if (rule4->tclassid && + nla_put_u32(skb, FRA_FLOW, rule4->tclassid)) + goto nla_put_failure; #endif return 0; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 5063fa38ac7..a8bdf740543 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -931,33 +931,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtm->rtm_table = tb_id; else rtm->rtm_table = RT_TABLE_COMPAT; - NLA_PUT_U32(skb, RTA_TABLE, tb_id); + if (nla_put_u32(skb, RTA_TABLE, tb_id)) + goto nla_put_failure; rtm->rtm_type = type; rtm->rtm_flags = fi->fib_flags; rtm->rtm_scope = fi->fib_scope; rtm->rtm_protocol = fi->fib_protocol; - if (rtm->rtm_dst_len) - NLA_PUT_BE32(skb, RTA_DST, dst); - - if (fi->fib_priority) - NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); - + if (rtm->rtm_dst_len && + nla_put_be32(skb, RTA_DST, dst)) + goto nla_put_failure; + if (fi->fib_priority && + nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) + goto nla_put_failure; if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) goto nla_put_failure; - if (fi->fib_prefsrc) - NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); - + if (fi->fib_prefsrc && + nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc)) + goto nla_put_failure; if (fi->fib_nhs == 1) { - if (fi->fib_nh->nh_gw) - NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); - - if (fi->fib_nh->nh_oif) - NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); + if (fi->fib_nh->nh_gw && + nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw)) + goto nla_put_failure; + if (fi->fib_nh->nh_oif && + nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (fi->fib_nh[0].nh_tclassid) - NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); + if (fi->fib_nh[0].nh_tclassid && + nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) + goto nla_put_failure; #endif } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -978,11 +981,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtnh->rtnh_hops = nh->nh_weight - 1; rtnh->rtnh_ifindex = nh->nh_oif; - if (nh->nh_gw) - NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); + if (nh->nh_gw && + nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (nh->nh_tclassid) - NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); + if (nh->nh_tclassid && + nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) + goto nla_put_failure; #endif /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 2cb2bf84564..c75efbdc71c 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -713,11 +713,10 @@ static void icmp_unreach(struct sk_buff *skb) if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { - if (net_ratelimit()) - pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", - &ip_hdr(skb)->saddr, - icmph->type, icmph->code, - &iph->daddr, skb->dev->name); + net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", + &ip_hdr(skb)->saddr, + icmph->type, icmph->code, + &iph->daddr, skb->dev->name); goto out; } @@ -906,8 +905,7 @@ out_err: static void icmp_address(struct sk_buff *skb) { #if 0 - if (net_ratelimit()) - printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); + net_dbg_ratelimited("a guy asks for address mask. Who is it?\n"); #endif } @@ -943,10 +941,10 @@ static void icmp_address_reply(struct sk_buff *skb) inet_ifa_match(ip_hdr(skb)->saddr, ifa)) break; } - if (!ifa && net_ratelimit()) { - pr_info("Wrong address mask %pI4 from %s/%pI4\n", - mp, dev->name, &ip_hdr(skb)->saddr); - } + if (!ifa) + net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n", + mp, + dev->name, &ip_hdr(skb)->saddr); } } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5dfecfd7d5e..6699f23e6f5 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -344,10 +344,10 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ ip_select_ident(pip, &rt->dst, NULL); - ((u8*)&pip[1])[0] = IPOPT_RA; - ((u8*)&pip[1])[1] = 4; - ((u8*)&pip[1])[2] = 0; - ((u8*)&pip[1])[3] = 0; + ((u8 *)&pip[1])[0] = IPOPT_RA; + ((u8 *)&pip[1])[1] = 4; + ((u8 *)&pip[1])[2] = 0; + ((u8 *)&pip[1])[3] = 0; skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; skb_put(skb, sizeof(*pig)); @@ -688,10 +688,10 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, iph->saddr = fl4.saddr; iph->protocol = IPPROTO_IGMP; ip_select_ident(iph, &rt->dst, NULL); - ((u8*)&iph[1])[0] = IPOPT_RA; - ((u8*)&iph[1])[1] = 4; - ((u8*)&iph[1])[2] = 0; - ((u8*)&iph[1])[3] = 0; + ((u8 *)&iph[1])[0] = IPOPT_RA; + ((u8 *)&iph[1])[1] = 4; + ((u8 *)&iph[1])[2] = 0; + ((u8 *)&iph[1])[3] = 0; ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); ih->type = type; @@ -774,7 +774,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) if (psf->sf_count[MCAST_INCLUDE] || pmc->sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) - continue; + break; if (srcs[i] == psf->sf_inaddr) { scount++; break; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 19d66cefd7d..95e61596e60 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -42,7 +42,8 @@ EXPORT_SYMBOL(sysctl_local_reserved_ports); void inet_get_local_port_range(int *low, int *high) { - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&sysctl_local_ports.lock); @@ -53,7 +54,7 @@ void inet_get_local_port_range(int *low, int *high) EXPORT_SYMBOL(inet_get_local_port_range); int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb) + const struct inet_bind_bucket *tb, bool relax) { struct sock *sk2; struct hlist_node *node; @@ -79,6 +80,14 @@ int inet_csk_bind_conflict(const struct sock *sk, sk2_rcv_saddr == sk_rcv_saddr(sk)) break; } + if (!relax && reuse && sk2->sk_reuse && + sk2->sk_state != TCP_LISTEN) { + const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); + + if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || + sk2_rcv_saddr == sk_rcv_saddr(sk)) + break; + } } } return node != NULL; @@ -122,12 +131,13 @@ again: (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && + !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { snum = smallest_rover; goto tb_found; } } - if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { snum = rover; goto tb_found; } @@ -172,18 +182,22 @@ have_snum: goto tb_not_found; tb_found: if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE) + goto success; + if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size == -1) { goto success; } else { ret = 1; - if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size != -1 && --attempts >= 0) { spin_unlock(&head->lock); goto again; } + goto fail_unlock; } } @@ -514,7 +528,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. - * If synack was not acknowledged for 3 seconds, it means + * If synack was not acknowledged for 1 second, it means * one of the following things: synack was lost, ack was lost, * rtt is high or nobody planned to ack (i.e. synflood). * When server is a bit loaded, queue is populated with old @@ -555,8 +569,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept, &expire, &resend); - if (req->rsk_ops->syn_ack_timeout) - req->rsk_ops->syn_ack_timeout(parent, req); + req->rsk_ops->syn_ack_timeout(parent, req); if (!expire && (!resend || !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8f8db724bfa..46d1e7199a8 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -999,12 +999,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); } -static struct sock_diag_handler inet_diag_handler = { +static const struct sock_diag_handler inet_diag_handler = { .family = AF_INET, .dump = inet_diag_handler_dump, }; -static struct sock_diag_handler inet6_diag_handler = { +static const struct sock_diag_handler inet6_diag_handler = { .family = AF_INET6, .dump = inet_diag_handler_dump, }; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 984ec656b03..7880af97020 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -217,7 +217,7 @@ begin: } EXPORT_SYMBOL_GPL(__inet_lookup_listener); -struct sock * __inet_lookup_established(struct net *net, +struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 89168c6351f..2784db3155f 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -89,8 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, #ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { - printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", - tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); + pr_debug("%s timewait_sock %p refcnt=%d\n", + tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); } #endif while (refcnt) { @@ -263,7 +263,7 @@ rescan: void inet_twdr_hangman(unsigned long data) { struct inet_timewait_death_row *twdr; - int unsigned need_timer; + unsigned int need_timer; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 29a07b6c716..e5c44fc586a 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -41,7 +41,7 @@ static int ip_forward_finish(struct sk_buff *skb) { - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); @@ -55,7 +55,7 @@ int ip_forward(struct sk_buff *skb) { struct iphdr *iph; /* Our header */ struct rtable *rt; /* Route we use */ - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); if (skb_warn_if_lro(skb)) goto drop; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 3727e234c88..9dbd3dd6022 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -148,17 +148,17 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q) return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); } -static int ip4_frag_match(struct inet_frag_queue *q, void *a) +static bool ip4_frag_match(struct inet_frag_queue *q, void *a) { struct ipq *qp; struct ip4_create_arg *arg = a; qp = container_of(q, struct ipq, q); return qp->id == arg->iph->id && - qp->saddr == arg->iph->saddr && - qp->daddr == arg->iph->daddr && - qp->protocol == arg->iph->protocol && - qp->user == arg->user; + qp->saddr == arg->iph->saddr && + qp->daddr == arg->iph->daddr && + qp->protocol == arg->iph->protocol && + qp->user == arg->user; } /* Memory Tracking Functions. */ @@ -545,6 +545,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, int len; int ihlen; int err; + int sum_truesize; u8 ecn; ipq_kill(qp); @@ -569,7 +570,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; - kfree_skb(qp->q.fragments); + consume_skb(qp->q.fragments); qp->q.fragments = head; } @@ -611,19 +612,32 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, atomic_add(clone->truesize, &qp->q.net->mem); } - skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; + sum_truesize = head->truesize; + for (fp = head->next; fp;) { + bool headstolen; + int delta; + struct sk_buff *next = fp->next; + + sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + + if (skb_try_coalesce(head, fp, &headstolen, &delta)) { + kfree_skb_partial(fp, headstolen); + } else { + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = fp; + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + } + fp = next; } - atomic_sub(head->truesize, &qp->q.net->mem); + atomic_sub(sum_truesize, &qp->q.net->mem); head->next = NULL; head->dev = dev; @@ -644,8 +658,7 @@ out_nomem: err = -ENOMEM; goto out_fail; out_oversize: - if (net_ratelimit()) - pr_info("Oversized IP packet from %pI4\n", &qp->saddr); + net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; @@ -782,7 +795,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) table[2].data = &net->ipv4.frags.timeout; } - hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); + hdr = register_net_sysctl(net, "net/ipv4", table); if (hdr == NULL) goto err_reg; @@ -807,7 +820,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) static void ip4_frags_ctl_register(void) { - register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); + register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else static inline int ip4_frags_ns_ctl_register(struct net *net) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index b57532d4742..f49047b7960 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -169,37 +169,56 @@ struct ipgre_net { /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipgre_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->multicast = dev->stats.multicast; + tot->rx_crc_errors = dev->stats.rx_crc_errors; + tot->rx_fifo_errors = dev->stats.rx_fifo_errors; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_errors = dev->stats.rx_errors; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + + return tot; } /* Given src, dst and key, find appropriate for input tunnel. */ -static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, - __be32 remote, __be32 local, - __be32 key, __be16 gre_proto) +static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, + __be32 remote, __be32 local, + __be32 key, __be16 gre_proto) { struct net *net = dev_net(dev); int link = dev->ifindex; @@ -464,7 +483,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) */ const struct iphdr *iph = (const struct iphdr *)skb->data; - __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); + __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2)); int grehlen = (iph->ihl<<2) + 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; @@ -574,7 +593,7 @@ static int ipgre_rcv(struct sk_buff *skb) iph = ip_hdr(skb); h = skb->data; - flags = *(__be16*)h; + flags = *(__be16 *)h; if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { /* - Version must be 0. @@ -598,11 +617,11 @@ static int ipgre_rcv(struct sk_buff *skb) offset += 4; } if (flags&GRE_KEY) { - key = *(__be32*)(h + offset); + key = *(__be32 *)(h + offset); offset += 4; } if (flags&GRE_SEQ) { - seqno = ntohl(*(__be32*)(h + offset)); + seqno = ntohl(*(__be32 *)(h + offset)); offset += 4; } } @@ -672,8 +691,10 @@ static int ipgre_rcv(struct sk_buff *skb) } tstats = this_cpu_ptr(tunnel->dev->tstats); + u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); __skb_tunnel_rx(skb, tunnel->dev); @@ -900,7 +921,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev htons(ETH_P_TEB) : skb->protocol; if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { - __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); + __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4); if (tunnel->parms.o_flags&GRE_SEQ) { ++tunnel->o_seqno; @@ -913,7 +934,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev } if (tunnel->parms.o_flags&GRE_CSUM) { *ptr = 0; - *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); + *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); } } @@ -1169,7 +1190,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, { struct ip_tunnel *t = netdev_priv(dev); struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); - __be16 *p = (__be16*)(iph+1); + __be16 *p = (__be16 *)(iph+1); memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); p[0] = t->parms.o_flags; @@ -1253,7 +1274,7 @@ static const struct net_device_ops ipgre_netdev_ops = { .ndo_start_xmit = ipgre_tunnel_xmit, .ndo_do_ioctl = ipgre_tunnel_ioctl, .ndo_change_mtu = ipgre_tunnel_change_mtu, - .ndo_get_stats = ipgre_get_stats, + .ndo_get_stats64 = ipgre_get_stats64, }; static void ipgre_dev_free(struct net_device *dev) @@ -1507,7 +1528,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ipgre_tunnel_change_mtu, - .ndo_get_stats = ipgre_get_stats, + .ndo_get_stats64 = ipgre_get_stats64, }; static void ipgre_tap_setup(struct net_device *dev) @@ -1654,17 +1675,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; - NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); - NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); - NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); - NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); - NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); - NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); - NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); - NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); - NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); - NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); - + if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || + nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || + nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || + nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || + nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || + nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) || + nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) || + nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || + nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || + nla_put_u8(skb, IFLA_GRE_PMTUDISC, + !!(p->iph.frag_off & htons(IP_DF)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 26eccc5bab1..8590144ca33 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -210,9 +210,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb) int ret; if (!net_eq(net, &init_net) && !ipprot->netns_ok) { - if (net_ratelimit()) - printk("%s: proto %d isn't netns-ready\n", - __func__, protocol); + net_info_ratelimited("%s: proto %d isn't netns-ready\n", + __func__, protocol); kfree_skb(skb); goto out; } @@ -298,10 +297,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb) if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { - if (IN_DEV_LOG_MARTIANS(in_dev) && - net_ratelimit()) - pr_info("source route option %pI4 -> %pI4\n", - &iph->saddr, &iph->daddr); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_info_ratelimited("source route option %pI4 -> %pI4\n", + &iph->saddr, + &iph->daddr); goto drop; } } diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index a0d0d9d9b87..708b99494e2 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -210,10 +210,10 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) * Simple and stupid 8), but the most efficient way. */ -void ip_options_fragment(struct sk_buff * skb) +void ip_options_fragment(struct sk_buff *skb) { unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; @@ -248,13 +248,13 @@ void ip_options_fragment(struct sk_buff * skb) */ int ip_options_compile(struct net *net, - struct ip_options * opt, struct sk_buff * skb) + struct ip_options *opt, struct sk_buff *skb) { int l; - unsigned char * iph; - unsigned char * optptr; + unsigned char *iph; + unsigned char *optptr; int optlen; - unsigned char * pp_ptr = NULL; + unsigned char *pp_ptr = NULL; struct rtable *rt = NULL; if (skb != NULL) { @@ -413,7 +413,7 @@ int ip_options_compile(struct net *net, opt->is_changed = 1; } } else { - unsigned overflow = optptr[3]>>4; + unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; @@ -473,20 +473,20 @@ EXPORT_SYMBOL(ip_options_compile); * Undo all the changes done by ip_options_compile(). */ -void ip_options_undo(struct ip_options * opt) +void ip_options_undo(struct ip_options *opt) { if (opt->srr) { - unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr); memmove(optptr+7, optptr+3, optptr[1]-7); memcpy(optptr+3, &opt->faddr, 4); } if (opt->rr_needaddr) { - unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr); optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } if (opt->ts) { - unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr); if (opt->ts_needtime) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); @@ -549,8 +549,8 @@ int ip_options_get(struct net *net, struct ip_options_rcu **optp, void ip_forward_options(struct sk_buff *skb) { - struct ip_options * opt = &(IPCB(skb)->opt); - unsigned char * optptr; + struct ip_options *opt = &(IPCB(skb)->opt); + unsigned char *optptr; struct rtable *rt = skb_rtable(skb); unsigned char *raw = skb_network_header(skb); @@ -578,8 +578,10 @@ void ip_forward_options(struct sk_buff *skb) ip_hdr(skb)->daddr = opt->nexthop; ip_rt_get_source(&optptr[srrptr-1], skb, rt); optptr[2] = srrptr+4; - } else if (net_ratelimit()) - pr_crit("%s(): Argh! Destination lost!\n", __func__); + } else { + net_crit_ratelimited("%s(): Argh! Destination lost!\n", + __func__); + } if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4910176d24e..451f97c42eb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -214,8 +214,8 @@ static inline int ip_finish_output2(struct sk_buff *skb) } rcu_read_unlock(); - if (net_ratelimit()) - printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); + net_dbg_ratelimited("%s: No header cache and no neighbour!\n", + __func__); kfree_skb(skb); return -EINVAL; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 2fd0fba7712..0d11f234d61 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -90,7 +90,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; - struct ip_options * opt = (struct ip_options *)optbuf; + struct ip_options *opt = (struct ip_options *)optbuf; if (IPCB(skb)->opt.optlen == 0) return; @@ -147,7 +147,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(skb->sk); - unsigned flags = inet->cmsg_flags; + unsigned int flags = inet->cmsg_flags; /* Ordered by supposed usage frequency */ if (flags & 1) @@ -673,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, break; } else { memset(&mreq, 0, sizeof(mreq)); - if (optlen >= sizeof(struct in_addr) && - copy_from_user(&mreq.imr_address, optval, - sizeof(struct in_addr))) - break; + if (optlen >= sizeof(struct ip_mreq)) { + if (copy_from_user(&mreq, optval, + sizeof(struct ip_mreq))) + break; + } else if (optlen >= sizeof(struct in_addr)) { + if (copy_from_user(&mreq.imr_address, optval, + sizeof(struct in_addr))) + break; + } } if (!mreq.imr_ifindex) { @@ -1094,7 +1099,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt); */ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned flags) + char __user *optval, int __user *optlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); int val; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 92ac7e7363a..67e8a6b086e 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -808,8 +808,6 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d b->op = BOOTP_REQUEST; if (dev->type < 256) /* check for false types */ b->htype = dev->type; - else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */ - b->htype = ARPHRD_IEEE802; else if (dev->type == ARPHRD_FDDI) b->htype = ARPHRD_ETHER; else { @@ -955,8 +953,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Fragments are not supported */ if (ip_is_fragment(h)) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Ignoring fragmented reply\n"); + net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n"); goto drop; } @@ -1004,16 +1001,14 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Is it a reply to our BOOTP request? */ if (b->op != BOOTP_REPLY || b->xid != d->xid) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", - b->op, b->xid); + net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", + b->op, b->xid); goto drop_unlock; } /* Is it a reply for the device we are configuring? */ if (b->xid != ic_dev_xid) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Ignoring delayed packet\n"); + net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n"); goto drop_unlock; } @@ -1198,7 +1193,7 @@ static int __init ic_dynamic(void) d = ic_first_dev; retries = CONF_SEND_RETRIES; get_random_bytes(&timeout, sizeof(timeout)); - timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); + timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM); for (;;) { /* Track the device we are configuring */ ic_dev_xid = d->xid; @@ -1626,11 +1621,13 @@ static int __init ip_auto_config_setup(char *addrs) return 1; } +__setup("ip=", ip_auto_config_setup); static int __init nfsaddrs_config_setup(char *addrs) { return ip_auto_config_setup(addrs); } +__setup("nfsaddrs=", nfsaddrs_config_setup); static int __init vendor_class_identifier_setup(char *addrs) { @@ -1641,7 +1638,4 @@ static int __init vendor_class_identifier_setup(char *addrs) vendor_class_identifier); return 1; } - -__setup("ip=", ip_auto_config_setup); -__setup("nfsaddrs=", nfsaddrs_config_setup); __setup("dhcpclass=", vendor_class_identifier_setup); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ae1413e3f2f..2d0f99bf61b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -144,33 +144,48 @@ static void ipip_dev_free(struct net_device *dev); /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipip_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + tot->collisions = dev->stats.collisions; + + return tot; } -static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, +static struct ip_tunnel *ipip_tunnel_lookup(struct net *net, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); @@ -245,7 +260,7 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) rcu_assign_pointer(*tp, t); } -static struct ip_tunnel * ipip_tunnel_locate(struct net *net, +static struct ip_tunnel *ipip_tunnel_locate(struct net *net, struct ip_tunnel_parm *parms, int create) { __be32 remote = parms->iph.daddr; @@ -404,8 +419,10 @@ static int ipip_rcv(struct sk_buff *skb) skb->pkt_type = PACKET_HOST; tstats = this_cpu_ptr(tunnel->dev->tstats); + u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); __skb_tunnel_rx(skb, tunnel->dev); @@ -730,7 +747,7 @@ static const struct net_device_ops ipip_netdev_ops = { .ndo_start_xmit = ipip_tunnel_xmit, .ndo_do_ioctl = ipip_tunnel_ioctl, .ndo_change_mtu = ipip_tunnel_change_mtu, - .ndo_get_stats = ipip_get_stats, + .ndo_get_stats64 = ipip_get_stats64, }; static void ipip_dev_free(struct net_device *dev) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 960fbfc3e97..a9e519ad6db 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -949,8 +949,7 @@ static int ipmr_cache_report(struct mr_table *mrt, ret = sock_queue_rcv_skb(mroute_sk, skb); rcu_read_unlock(); if (ret < 0) { - if (net_ratelimit()) - pr_warn("mroute: pending queue full, dropping entries\n"); + net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); kfree_skb(skb); } @@ -2119,15 +2118,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, rtm->rtm_src_len = 32; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; - NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + if (nla_put_u32(skb, RTA_TABLE, mrt->id)) + goto nla_put_failure; rtm->rtm_type = RTN_MULTICAST; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; - NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); - NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); - + if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || + nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) + goto nla_put_failure; if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 4f47e064e26..ed1b3678319 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -12,7 +12,7 @@ #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ -int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) +int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); @@ -237,13 +237,3 @@ static void ipv4_netfilter_fini(void) module_init(ipv4_netfilter_init); module_exit(ipv4_netfilter_fini); - -#ifdef CONFIG_SYSCTL -struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "netfilter", }, - { } -}; -EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path); -#endif /* CONFIG_SYSCTL */ diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 240b68469a7..c20674dc945 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -66,6 +66,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o # just filtering instance of ARP tables for now obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o - -obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o - diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index fd7a3f68917..97e61eadf58 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -221,9 +221,8 @@ static inline int arp_checkentry(const struct arpt_arp *arp) static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_err("arp_tables: error: '%s'\n", - (const char *)par->targinfo); + net_err_ratelimited("arp_tables: error: '%s'\n", + (const char *)par->targinfo); return NF_DROP; } @@ -303,7 +302,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } e = back; diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c deleted file mode 100644 index 94d45e1f888..00000000000 --- a/net/ipv4/netfilter/ip_queue.c +++ /dev/null @@ -1,639 +0,0 @@ -/* - * This is a module which is used for queueing IPv4 packets and - * communicating with userspace via netlink. - * - * (C) 2000-2002 James Morris <jmorris@intercode.com.au> - * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/module.h> -#include <linux/skbuff.h> -#include <linux/init.h> -#include <linux/ip.h> -#include <linux/notifier.h> -#include <linux/netdevice.h> -#include <linux/netfilter.h> -#include <linux/netfilter_ipv4/ip_queue.h> -#include <linux/netfilter_ipv4/ip_tables.h> -#include <linux/netlink.h> -#include <linux/spinlock.h> -#include <linux/sysctl.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/security.h> -#include <linux/net.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <net/net_namespace.h> -#include <net/sock.h> -#include <net/route.h> -#include <net/netfilter/nf_queue.h> -#include <net/ip.h> - -#define IPQ_QMAX_DEFAULT 1024 -#define IPQ_PROC_FS_NAME "ip_queue" -#define NET_IPQ_QMAX 2088 -#define NET_IPQ_QMAX_NAME "ip_queue_maxlen" - -typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); - -static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; -static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_SPINLOCK(queue_lock); -static int peer_pid __read_mostly; -static unsigned int copy_range __read_mostly; -static unsigned int queue_total; -static unsigned int queue_dropped = 0; -static unsigned int queue_user_dropped = 0; -static struct sock *ipqnl __read_mostly; -static LIST_HEAD(queue_list); -static DEFINE_MUTEX(ipqnl_mutex); - -static inline void -__ipq_enqueue_entry(struct nf_queue_entry *entry) -{ - list_add_tail(&entry->list, &queue_list); - queue_total++; -} - -static inline int -__ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status = 0; - - switch(mode) { - case IPQ_COPY_NONE: - case IPQ_COPY_META: - copy_mode = mode; - copy_range = 0; - break; - - case IPQ_COPY_PACKET: - if (range > 0xFFFF) - range = 0xFFFF; - copy_range = range; - copy_mode = mode; - break; - - default: - status = -EINVAL; - - } - return status; -} - -static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data); - -static inline void -__ipq_reset(void) -{ - peer_pid = 0; - net_disable_timestamp(); - __ipq_set_mode(IPQ_COPY_NONE, 0); - __ipq_flush(NULL, 0); -} - -static struct nf_queue_entry * -ipq_find_dequeue_entry(unsigned long id) -{ - struct nf_queue_entry *entry = NULL, *i; - - spin_lock_bh(&queue_lock); - - list_for_each_entry(i, &queue_list, list) { - if ((unsigned long)i == id) { - entry = i; - break; - } - } - - if (entry) { - list_del(&entry->list); - queue_total--; - } - - spin_unlock_bh(&queue_lock); - return entry; -} - -static void -__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - struct nf_queue_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &queue_list, list) { - if (!cmpfn || cmpfn(entry, data)) { - list_del(&entry->list); - queue_total--; - nf_reinject(entry, NF_DROP); - } - } -} - -static void -ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - spin_lock_bh(&queue_lock); - __ipq_flush(cmpfn, data); - spin_unlock_bh(&queue_lock); -} - -static struct sk_buff * -ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) -{ - sk_buff_data_t old_tail; - size_t size = 0; - size_t data_len = 0; - struct sk_buff *skb; - struct ipq_packet_msg *pmsg; - struct nlmsghdr *nlh; - struct timeval tv; - - switch (ACCESS_ONCE(copy_mode)) { - case IPQ_COPY_META: - case IPQ_COPY_NONE: - size = NLMSG_SPACE(sizeof(*pmsg)); - break; - - case IPQ_COPY_PACKET: - if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) - return NULL; - - data_len = ACCESS_ONCE(copy_range); - if (data_len == 0 || data_len > entry->skb->len) - data_len = entry->skb->len; - - size = NLMSG_SPACE(sizeof(*pmsg) + data_len); - break; - - default: - *errp = -EINVAL; - return NULL; - } - - skb = alloc_skb(size, GFP_ATOMIC); - if (!skb) - goto nlmsg_failure; - - old_tail = skb->tail; - nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); - pmsg = NLMSG_DATA(nlh); - memset(pmsg, 0, sizeof(*pmsg)); - - pmsg->packet_id = (unsigned long )entry; - pmsg->data_len = data_len; - tv = ktime_to_timeval(entry->skb->tstamp); - pmsg->timestamp_sec = tv.tv_sec; - pmsg->timestamp_usec = tv.tv_usec; - pmsg->mark = entry->skb->mark; - pmsg->hook = entry->hook; - pmsg->hw_protocol = entry->skb->protocol; - - if (entry->indev) - strcpy(pmsg->indev_name, entry->indev->name); - else - pmsg->indev_name[0] = '\0'; - - if (entry->outdev) - strcpy(pmsg->outdev_name, entry->outdev->name); - else - pmsg->outdev_name[0] = '\0'; - - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { - pmsg->hw_type = entry->skb->dev->type; - pmsg->hw_addrlen = dev_parse_header(entry->skb, - pmsg->hw_addr); - } - - if (data_len) - if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) - BUG(); - - nlh->nlmsg_len = skb->tail - old_tail; - return skb; - -nlmsg_failure: - kfree_skb(skb); - *errp = -EINVAL; - printk(KERN_ERR "ip_queue: error creating packet message\n"); - return NULL; -} - -static int -ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) -{ - int status = -EINVAL; - struct sk_buff *nskb; - - if (copy_mode == IPQ_COPY_NONE) - return -EAGAIN; - - nskb = ipq_build_packet_message(entry, &status); - if (nskb == NULL) - return status; - - spin_lock_bh(&queue_lock); - - if (!peer_pid) - goto err_out_free_nskb; - - if (queue_total >= queue_maxlen) { - queue_dropped++; - status = -ENOSPC; - if (net_ratelimit()) - printk (KERN_WARNING "ip_queue: full at %d entries, " - "dropping packets(s). Dropped: %d\n", queue_total, - queue_dropped); - goto err_out_free_nskb; - } - - /* netlink_unicast will either free the nskb or attach it to a socket */ - status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); - if (status < 0) { - queue_user_dropped++; - goto err_out_unlock; - } - - __ipq_enqueue_entry(entry); - - spin_unlock_bh(&queue_lock); - return status; - -err_out_free_nskb: - kfree_skb(nskb); - -err_out_unlock: - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e) -{ - int diff; - struct iphdr *user_iph = (struct iphdr *)v->payload; - struct sk_buff *nskb; - - if (v->data_len < sizeof(*user_iph)) - return 0; - diff = v->data_len - e->skb->len; - if (diff < 0) { - if (pskb_trim(e->skb, v->data_len)) - return -ENOMEM; - } else if (diff > 0) { - if (v->data_len > 0xFFFF) - return -EINVAL; - if (diff > skb_tailroom(e->skb)) { - nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), - diff, GFP_ATOMIC); - if (!nskb) { - printk(KERN_WARNING "ip_queue: error " - "in mangle, dropping packet\n"); - return -ENOMEM; - } - kfree_skb(e->skb); - e->skb = nskb; - } - skb_put(e->skb, diff); - } - if (!skb_make_writable(e->skb, v->data_len)) - return -ENOMEM; - skb_copy_to_linear_data(e->skb, v->payload, v->data_len); - e->skb->ip_summed = CHECKSUM_NONE; - - return 0; -} - -static int -ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) -{ - struct nf_queue_entry *entry; - - if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) - return -EINVAL; - - entry = ipq_find_dequeue_entry(vmsg->id); - if (entry == NULL) - return -ENOENT; - else { - int verdict = vmsg->value; - - if (vmsg->data_len && vmsg->data_len == len) - if (ipq_mangle_ipv4(vmsg, entry) < 0) - verdict = NF_DROP; - - nf_reinject(entry, verdict); - return 0; - } -} - -static int -ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status; - - spin_lock_bh(&queue_lock); - status = __ipq_set_mode(mode, range); - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_receive_peer(struct ipq_peer_msg *pmsg, - unsigned char type, unsigned int len) -{ - int status = 0; - - if (len < sizeof(*pmsg)) - return -EINVAL; - - switch (type) { - case IPQM_MODE: - status = ipq_set_mode(pmsg->msg.mode.value, - pmsg->msg.mode.range); - break; - - case IPQM_VERDICT: - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; - default: - status = -EINVAL; - } - return status; -} - -static int -dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) -{ - if (entry->indev) - if (entry->indev->ifindex == ifindex) - return 1; - if (entry->outdev) - if (entry->outdev->ifindex == ifindex) - return 1; -#ifdef CONFIG_BRIDGE_NETFILTER - if (entry->skb->nf_bridge) { - if (entry->skb->nf_bridge->physindev && - entry->skb->nf_bridge->physindev->ifindex == ifindex) - return 1; - if (entry->skb->nf_bridge->physoutdev && - entry->skb->nf_bridge->physoutdev->ifindex == ifindex) - return 1; - } -#endif - return 0; -} - -static void -ipq_dev_drop(int ifindex) -{ - ipq_flush(dev_cmp, ifindex); -} - -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) - -static inline void -__ipq_rcv_skb(struct sk_buff *skb) -{ - int status, type, pid, flags; - unsigned int nlmsglen, skblen; - struct nlmsghdr *nlh; - bool enable_timestamp = false; - - skblen = skb->len; - if (skblen < sizeof(*nlh)) - return; - - nlh = nlmsg_hdr(skb); - nlmsglen = nlh->nlmsg_len; - if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) - return; - - pid = nlh->nlmsg_pid; - flags = nlh->nlmsg_flags; - - if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) - RCV_SKB_FAIL(-EINVAL); - - if (flags & MSG_TRUNC) - RCV_SKB_FAIL(-ECOMM); - - type = nlh->nlmsg_type; - if (type < NLMSG_NOOP || type >= IPQM_MAX) - RCV_SKB_FAIL(-EINVAL); - - if (type <= IPQM_BASE) - return; - - if (!capable(CAP_NET_ADMIN)) - RCV_SKB_FAIL(-EPERM); - - spin_lock_bh(&queue_lock); - - if (peer_pid) { - if (peer_pid != pid) { - spin_unlock_bh(&queue_lock); - RCV_SKB_FAIL(-EBUSY); - } - } else { - enable_timestamp = true; - peer_pid = pid; - } - - spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); - status = ipq_receive_peer(NLMSG_DATA(nlh), type, - nlmsglen - NLMSG_LENGTH(0)); - if (status < 0) - RCV_SKB_FAIL(status); - - if (flags & NLM_F_ACK) - netlink_ack(skb, nlh, 0); -} - -static void -ipq_rcv_skb(struct sk_buff *skb) -{ - mutex_lock(&ipqnl_mutex); - __ipq_rcv_skb(skb); - mutex_unlock(&ipqnl_mutex); -} - -static int -ipq_rcv_dev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *dev = ptr; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - /* Drop any packets associated with the downed device */ - if (event == NETDEV_DOWN) - ipq_dev_drop(dev->ifindex); - return NOTIFY_DONE; -} - -static struct notifier_block ipq_dev_notifier = { - .notifier_call = ipq_rcv_dev_event, -}; - -static int -ipq_rcv_nl_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netlink_notify *n = ptr; - - if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { - spin_lock_bh(&queue_lock); - if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) - __ipq_reset(); - spin_unlock_bh(&queue_lock); - } - return NOTIFY_DONE; -} - -static struct notifier_block ipq_nl_notifier = { - .notifier_call = ipq_rcv_nl_event, -}; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *ipq_sysctl_header; - -static ctl_table ipq_table[] = { - { - .procname = NET_IPQ_QMAX_NAME, - .data = &queue_maxlen, - .maxlen = sizeof(queue_maxlen), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { } -}; -#endif - -#ifdef CONFIG_PROC_FS -static int ip_queue_show(struct seq_file *m, void *v) -{ - spin_lock_bh(&queue_lock); - - seq_printf(m, - "Peer PID : %d\n" - "Copy mode : %hu\n" - "Copy range : %u\n" - "Queue length : %u\n" - "Queue max. length : %u\n" - "Queue dropped : %u\n" - "Netlink dropped : %u\n", - peer_pid, - copy_mode, - copy_range, - queue_total, - queue_maxlen, - queue_dropped, - queue_user_dropped); - - spin_unlock_bh(&queue_lock); - return 0; -} - -static int ip_queue_open(struct inode *inode, struct file *file) -{ - return single_open(file, ip_queue_show, NULL); -} - -static const struct file_operations ip_queue_proc_fops = { - .open = ip_queue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; -#endif - -static const struct nf_queue_handler nfqh = { - .name = "ip_queue", - .outfn = &ipq_enqueue_packet, -}; - -static int __init ip_queue_init(void) -{ - int status = -ENOMEM; - struct proc_dir_entry *proc __maybe_unused; - - netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0, - ipq_rcv_skb, NULL, THIS_MODULE); - if (ipqnl == NULL) { - printk(KERN_ERR "ip_queue: failed to create netlink socket\n"); - goto cleanup_netlink_notifier; - } - -#ifdef CONFIG_PROC_FS - proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net, - &ip_queue_proc_fops); - if (!proc) { - printk(KERN_ERR "ip_queue: failed to create proc entry\n"); - goto cleanup_ipqnl; - } -#endif - register_netdevice_notifier(&ipq_dev_notifier); -#ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table); -#endif - status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh); - if (status < 0) { - printk(KERN_ERR "ip_queue: failed to register queue handler\n"); - goto cleanup_sysctl; - } - return status; - -cleanup_sysctl: -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); -cleanup_ipqnl: __maybe_unused - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - -cleanup_netlink_notifier: - netlink_unregister_notifier(&ipq_nl_notifier); - return status; -} - -static void __exit ip_queue_fini(void) -{ - nf_unregister_queue_handlers(&nfqh); - - ipq_flush(NULL, 0); - -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - - netlink_unregister_notifier(&ipq_nl_notifier); -} - -MODULE_DESCRIPTION("IPv4 packet queue handler"); -MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL); - -module_init(ip_queue_init); -module_exit(ip_queue_fini); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 24e556e83a3..170b1fdd6b7 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -153,8 +153,7 @@ ip_checkentry(const struct ipt_ip *ip) static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_info("error: `%s'\n", (const char *)par->targinfo); + net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } @@ -377,7 +376,7 @@ ipt_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } if (*stackptr <= origptr) { diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a639967eb72..fe5daea5214 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -246,8 +246,7 @@ clusterip_hashfn(const struct sk_buff *skb, dport = ports[1]; } } else { - if (net_ratelimit()) - pr_info("unknown protocol %u\n", iph->protocol); + net_info_ratelimited("unknown protocol %u\n", iph->protocol); } switch (config->hash_mode) { diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index cf73cc70ed2..91747d4ebc2 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -311,8 +311,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) static int ipv4_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); - NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); + if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || + nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) + goto nla_put_failure; return 0; nla_put_failure: @@ -364,7 +365,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { .nla_policy = ipv4_nla_policy, #endif #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) - .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, + .ctl_table_path = "net/ipv4/netfilter", .ctl_table = ip_ct_sysctl_table, #endif .me = THIS_MODULE, diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 7cbe9cb261c..0847e373d33 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -228,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl, static int icmp_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id); - NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type); - NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code); - + if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) || + nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) || + nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code)) + goto nla_put_failure; return 0; nla_put_failure: @@ -293,8 +293,8 @@ icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)); - + if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 82536701e3a..cad29c12131 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -42,9 +42,7 @@ static int set_addr(struct sk_buff *skb, if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet" - " error\n"); + net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n"); return -1; } @@ -58,9 +56,7 @@ static int set_addr(struct sk_buff *skb, if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet" - " error\n"); + net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n"); return -1; } /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy @@ -214,8 +210,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, /* Run out of expectations */ if (i >= H323_RTP_CHANNEL_MAX) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of expectations\n"); + net_notice_ratelimited("nf_nat_h323: out of expectations\n"); return 0; } @@ -244,8 +239,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of RTP ports\n"); + net_notice_ratelimited("nf_nat_h323: out of RTP ports\n"); return 0; } @@ -308,8 +302,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_h323: out of TCP ports\n"); return 0; } @@ -365,8 +358,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_q931: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; } @@ -456,8 +448,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_ras: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_ras: out of TCP ports\n"); return 0; } @@ -545,8 +536,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_q931: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 57932c43960..ea4a23813d2 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -283,7 +283,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff, __be32 newip; u_int16_t port; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; - unsigned buflen; + unsigned int buflen; /* Connection will come from reply */ if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 2133c30a4a5..746edec8b86 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1206,8 +1206,7 @@ static int snmp_translate(struct nf_conn *ct, if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), paylen, &map, &udph->check)) { - if (net_ratelimit()) - printk(KERN_WARNING "bsalg: parser failed\n"); + net_warn_ratelimited("bsalg: parser failed\n"); return NF_DROP; } return NF_ACCEPT; @@ -1241,9 +1240,8 @@ static int help(struct sk_buff *skb, unsigned int protoff, * can mess around with the payload. */ if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { - if (net_ratelimit()) - printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", - &iph->saddr, &iph->daddr); + net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", + &iph->saddr, &iph->daddr); return NF_DROP; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 50009c787bc..6e930c7174d 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -51,15 +51,16 @@ static struct ping_table ping_table; static u16 ping_port_rover; -static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask) +static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) { int res = (num + net_hash_mix(net)) & mask; + pr_debug("hash(%d) = %d\n", num, res); return res; } static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, - struct net *net, unsigned num) + struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } @@ -188,7 +189,8 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low, gid_t *high) { gid_t *data = net->ipv4.sysctl_ping_group_range; - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&sysctl_local_ports.lock); @@ -410,7 +412,7 @@ struct pingfakehdr { __wsum wcheck; }; -static int ping_getfrag(void *from, char * to, +static int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bbd604c68e6..4032b818f3e 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -288,7 +288,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) read_unlock(&raw_v4_hashinfo.lock); } -static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) +static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Charge it to the socket. */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 167ea10b521..ffcb3b01684 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -109,6 +109,7 @@ #include <net/rtnetlink.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> +#include <linux/kmemleak.h> #endif #include <net/secure_seq.h> @@ -229,7 +230,7 @@ const __u8 ip_tos2prio[16] = { TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; - +EXPORT_SYMBOL(ip_tos2prio); /* * Route cache. @@ -296,7 +297,7 @@ static inline void rt_hash_lock_init(void) #endif static struct rt_hash_bucket *rt_hash_table __read_mostly; -static unsigned rt_hash_mask __read_mostly; +static unsigned int rt_hash_mask __read_mostly; static unsigned int rt_hash_log __read_mostly; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); @@ -959,8 +960,7 @@ void rt_cache_flush_batch(struct net *net) static void rt_emergency_hash_rebuild(struct net *net) { - if (net_ratelimit()) - pr_warn("Route hash chain too long!\n"); + net_warn_ratelimited("Route hash chain too long!\n"); rt_cache_invalidate(net); } @@ -1083,8 +1083,7 @@ static int rt_garbage_collect(struct dst_ops *ops) goto out; if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size) goto out; - if (net_ratelimit()) - pr_warn("dst cache overflow\n"); + net_warn_ratelimited("dst cache overflow\n"); RT_CACHE_STAT_INC(gc_dst_overflow); return 1; @@ -1143,7 +1142,7 @@ static int rt_bind_neighbour(struct rtable *rt) return 0; } -static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, +static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, struct sk_buff *skb, int ifindex) { struct rtable *rth, *cand; @@ -1181,8 +1180,7 @@ restart: if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { int err = rt_bind_neighbour(rt); if (err) { - if (net_ratelimit()) - pr_warn("Neighbour table failure & not caching routes\n"); + net_warn_ratelimited("Neighbour table failure & not caching routes\n"); ip_rt_put(rt); return ERR_PTR(err); } @@ -1298,8 +1296,7 @@ restart: goto restart; } - if (net_ratelimit()) - pr_warn("Neighbour table overflow\n"); + net_warn_ratelimited("Neighbour table overflow\n"); rt_drop(rt); return ERR_PTR(-ENOBUFS); } @@ -1377,14 +1374,13 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) return; } } else if (!rt) - printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", - __builtin_return_address(0)); + pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0)); ip_select_fb_ident(iph); } EXPORT_SYMBOL(__ip_select_ident); -static void rt_del(unsigned hash, struct rtable *rt) +static void rt_del(unsigned int hash, struct rtable *rt) { struct rtable __rcu **rthp; struct rtable *aux; @@ -1502,11 +1498,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE - if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - pr_info("Redirect from %pI4 on %s about %pI4 ignored\n" - " Advised path = %pI4 -> %pI4\n", - &old_gw, dev->name, &new_gw, - &saddr, &daddr); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" + " Advised path = %pI4 -> %pI4\n", + &old_gw, dev->name, &new_gw, + &saddr, &daddr); #endif ; } @@ -1538,7 +1534,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) ip_rt_put(rt); ret = NULL; } else if (rt->rt_flags & RTCF_REDIRECTED) { - unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, + unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, rt->rt_oif, rt_genid(dev_net(dst->dev))); rt_del(hash, rt); @@ -1616,11 +1612,10 @@ void ip_rt_send_redirect(struct sk_buff *skb) ++peer->rate_tokens; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && - peer->rate_tokens == ip_rt_redirect_number && - net_ratelimit()) - pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", - &ip_hdr(skb)->saddr, rt->rt_iif, - &rt->rt_dst, &rt->rt_gateway); + peer->rate_tokens == ip_rt_redirect_number) + net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", + &ip_hdr(skb)->saddr, rt->rt_iif, + &rt->rt_dst, &rt->rt_gateway); #endif } } @@ -1843,9 +1838,9 @@ static void ipv4_link_failure(struct sk_buff *skb) static int ip_rt_bug(struct sk_buff *skb) { - printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n", - &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, - skb->dev ? skb->dev->name : "?"); + pr_debug("%s: %pI4 -> %pI4, %s\n", + __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, + skb->dev ? skb->dev->name : "?"); kfree_skb(skb); WARN_ON(1); return 0; @@ -2134,8 +2129,7 @@ static int __mkroute_input(struct sk_buff *skb, /* get a working reference to the output device */ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { - if (net_ratelimit()) - pr_crit("Bug in ip_route_input_slow(). Please report.\n"); + net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); return -EINVAL; } @@ -2215,9 +2209,9 @@ static int ip_mkroute_input(struct sk_buff *skb, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { - struct rtable* rth = NULL; + struct rtable *rth = NULL; int err; - unsigned hash; + unsigned int hash; #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) @@ -2255,13 +2249,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct fib_result res; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flowi4 fl4; - unsigned flags = 0; + unsigned int flags = 0; u32 itag = 0; - struct rtable * rth; - unsigned hash; + struct rtable *rth; + unsigned int hash; __be32 spec_dst; int err = -EINVAL; - struct net * net = dev_net(dev); + struct net *net = dev_net(dev); /* IP on this device is disabled. */ @@ -2406,9 +2400,9 @@ no_route: martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE - if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - pr_warn("martian destination %pI4 from %pI4, dev %s\n", - &daddr, &saddr, dev->name); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", + &daddr, &saddr, dev->name); #endif e_hostunreach: @@ -2433,8 +2427,8 @@ martian_source_keep_err: int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, bool noref) { - struct rtable * rth; - unsigned hash; + struct rtable *rth; + unsigned int hash; int iif = dev->ifindex; struct net *net; int res; @@ -2972,7 +2966,8 @@ static int rt_fill_info(struct net *net, r->rtm_src_len = 0; r->rtm_tos = rt->rt_key_tos; r->rtm_table = RT_TABLE_MAIN; - NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); + if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) + goto nla_put_failure; r->rtm_type = rt->rt_type; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; @@ -2980,31 +2975,38 @@ static int rt_fill_info(struct net *net, if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; - NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); - + if (nla_put_be32(skb, RTA_DST, rt->rt_dst)) + goto nla_put_failure; if (rt->rt_key_src) { r->rtm_src_len = 32; - NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src); + if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src)) + goto nla_put_failure; } - if (rt->dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); + if (rt->dst.dev && + nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (rt->dst.tclassid) - NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); + if (rt->dst.tclassid && + nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) + goto nla_put_failure; #endif - if (rt_is_input_route(rt)) - NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); - else if (rt->rt_src != rt->rt_key_src) - NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); - - if (rt->rt_dst != rt->rt_gateway) - NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); + if (rt_is_input_route(rt)) { + if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst)) + goto nla_put_failure; + } else if (rt->rt_src != rt->rt_key_src) { + if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src)) + goto nla_put_failure; + } + if (rt->rt_dst != rt->rt_gateway && + nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) + goto nla_put_failure; if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) goto nla_put_failure; - if (rt->rt_mark) - NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); + if (rt->rt_mark && + nla_put_be32(skb, RTA_MARK, rt->rt_mark)) + goto nla_put_failure; error = rt->dst.error; if (peer) { @@ -3045,7 +3047,8 @@ static int rt_fill_info(struct net *net, } } else #endif - NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif); + if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) + goto nla_put_failure; } if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, @@ -3059,7 +3062,7 @@ nla_put_failure: return -EMSGSIZE; } -static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; @@ -3334,23 +3337,6 @@ static ctl_table ipv4_route_table[] = { { } }; -static struct ctl_table empty[1]; - -static struct ctl_table ipv4_skeleton[] = -{ - { .procname = "route", - .mode = 0555, .child = ipv4_route_table}, - { .procname = "neigh", - .mode = 0555, .child = empty}, - { } -}; - -static __net_initdata struct ctl_path ipv4_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; - static struct ctl_table ipv4_route_flush_table[] = { { .procname = "flush", @@ -3361,13 +3347,6 @@ static struct ctl_table ipv4_route_flush_table[] = { { }, }; -static __net_initdata struct ctl_path ipv4_route_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "route", }, - { }, -}; - static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3380,8 +3359,7 @@ static __net_init int sysctl_route_net_init(struct net *net) } tbl[0].extra1 = net; - net->ipv4.route_hdr = - register_net_sysctl_table(net, ipv4_route_path, tbl); + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); if (net->ipv4.route_hdr == NULL) goto err_reg; return 0; @@ -3430,9 +3408,15 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; static __initdata unsigned long rhash_entries; static int __init set_rhash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - rhash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &rhash_entries); + if (ret) + return 0; + return 1; } __setup("rhash_entries=", set_rhash_entries); @@ -3505,6 +3489,6 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_path, ipv4_skeleton); + register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); } #endif diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 7a7724da9bf..ef32956ed65 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -27,6 +27,7 @@ #include <net/tcp_memcontrol.h> static int zero; +static int two = 2; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; @@ -78,7 +79,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) { gid_t *data = table->data; - unsigned seq; + unsigned int seq; do { seq = read_seqbegin(&sysctl_local_ports.lock); @@ -677,6 +678,15 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec }, { + .procname = "tcp_early_retrans", + .data = &sysctl_tcp_early_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &two, + }, + { .procname = "udp_mem", .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), @@ -768,13 +778,6 @@ static struct ctl_table ipv4_net_table[] = { { } }; -struct ctl_path net_ipv4_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; -EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); - static __net_init int ipv4_sysctl_init_net(struct net *net) { struct ctl_table *table; @@ -815,8 +818,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) tcp_init_mem(net); - net->ipv4.ipv4_hdr = register_net_sysctl_table(net, - net_ipv4_ctl_path, table); + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); if (net->ipv4.ipv4_hdr == NULL) goto err_reg; @@ -857,12 +859,12 @@ static __init int sysctl_ipv4_init(void) if (!i->procname) return -EINVAL; - hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); + hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); if (hdr == NULL) return -ENOMEM; if (register_pernet_subsys(&ipv4_sysctl_ops)) { - unregister_sysctl_table(hdr); + unregister_net_sysctl_table(hdr); return -ENOMEM; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6589e11d57b..bb485fcb077 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -363,6 +363,71 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max) return period; } +/* Address-family independent initialization for a tcp_sock. + * + * NOTE: A lot of things set to zero explicitly by call to + * sk_alloc() so need not be done here. + */ +void tcp_init_sock(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + + skb_queue_head_init(&tp->out_of_order_queue); + tcp_init_xmit_timers(sk); + tcp_prequeue_init(tp); + + icsk->icsk_rto = TCP_TIMEOUT_INIT; + tp->mdev = TCP_TIMEOUT_INIT; + + /* So many TCP implementations out there (incorrectly) count the + * initial SYN frame in their delayed-ACK and congestion control + * algorithms that we must have the following bandaid to talk + * efficiently to them. -DaveM + */ + tp->snd_cwnd = TCP_INIT_CWND; + + /* See draft-stevens-tcpca-spec-01 for discussion of the + * initialization of these values. + */ + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + tp->snd_cwnd_clamp = ~0; + tp->mss_cache = TCP_MSS_DEFAULT; + + tp->reordering = sysctl_tcp_reordering; + tcp_enable_early_retrans(tp); + icsk->icsk_ca_ops = &tcp_init_congestion_ops; + + sk->sk_state = TCP_CLOSE; + + sk->sk_write_space = sk_stream_write_space; + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + + icsk->icsk_sync_mss = tcp_sync_mss; + + /* TCP Cookie Transactions */ + if (sysctl_tcp_cookie_size > 0) { + /* Default, cookies without s_data_payload. */ + tp->cookie_values = + kzalloc(sizeof(*tp->cookie_values), + sk->sk_allocation); + if (tp->cookie_values != NULL) + kref_init(&tp->cookie_values->kref); + } + /* Presumed zeroed, in order of appearance: + * cookie_in_always, cookie_out_never, + * s_data_constant, s_data_in, s_data_out + */ + sk->sk_sndbuf = sysctl_tcp_wmem[1]; + sk->sk_rcvbuf = sysctl_tcp_rmem[1]; + + local_bh_disable(); + sock_update_memcg(sk); + sk_sockets_allocated_inc(sk); + local_bh_enable(); +} +EXPORT_SYMBOL(tcp_init_sock); + /* * Wait for a TCP event. * @@ -528,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) tp->pushed_seq = tp->write_seq; } -static inline int forced_push(const struct tcp_sock *tp) +static inline bool forced_push(const struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } @@ -784,9 +849,10 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse while (psize > 0) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct page *page = pages[poffset / PAGE_SIZE]; - int copy, i, can_coalesce; + int copy, i; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); + bool can_coalesce; if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { new_segment: @@ -918,7 +984,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags, err, copied; - int mss_now, size_goal; + int mss_now = 0, size_goal; bool sg; long timeo; @@ -932,6 +998,19 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; + if (unlikely(tp->repair)) { + if (tp->repair_queue == TCP_RECV_QUEUE) { + copied = tcp_send_rcvq(sk, msg, size); + goto out; + } + + err = -EINVAL; + if (tp->repair_queue == TCP_NO_QUEUE) + goto out_err; + + /* 'common' sending to sendq */ + } + /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); @@ -1002,7 +1081,7 @@ new_segment: if (err) goto do_fault; } else { - int merge = 0; + bool merge = false; int i = skb_shinfo(skb)->nr_frags; struct page *page = sk->sk_sndmsg_page; int off; @@ -1016,7 +1095,7 @@ new_segment: off != PAGE_SIZE) { /* We can extend the last page * fragment. */ - merge = 1; + merge = true; } else if (i == MAX_SKB_FRAGS || !sg) { /* Need to add new fragment and cannot * do this because interface is non-SG, @@ -1088,7 +1167,7 @@ new_segment: if ((seglen -= copy) == 0 && iovlen == 0) goto out; - if (skb->len < max || (flags & MSG_OOB)) + if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) continue; if (forced_push(tp)) { @@ -1101,7 +1180,7 @@ new_segment: wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - if (copied) + if (copied && likely(!tp->repair)) tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) @@ -1112,7 +1191,7 @@ wait_for_memory: } out: - if (copied) + if (copied && likely(!tp->repair)) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); return copied; @@ -1186,6 +1265,24 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) return -EAGAIN; } +static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) +{ + struct sk_buff *skb; + int copied = 0, err = 0; + + /* XXX -- need to support SO_PEEK_OFF */ + + skb_queue_walk(&sk->sk_write_queue, skb) { + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); + if (err) + break; + + copied += skb->len; + } + + return err ?: copied; +} + /* Clean up the receive buffer for full frames taken by the user, * then send an ACK if necessary. COPIED is the number of bytes * tcp_recvmsg has given to the user so far, it speeds up the @@ -1195,7 +1292,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) void tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); - int time_to_ack = 0; + bool time_to_ack = false; struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); @@ -1221,7 +1318,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && !icsk->icsk_ack.pingpong)) && !atomic_read(&sk->sk_rmem_alloc))) - time_to_ack = 1; + time_to_ack = true; } /* We send an ACK if we can now advertise a non-zero window @@ -1243,7 +1340,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) * "Lots" means "at least twice" here. */ if (new_window && new_window >= 2 * rcv_window_now) - time_to_ack = 1; + time_to_ack = true; } } if (time_to_ack) @@ -1375,11 +1472,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, break; } if (tcp_hdr(skb)->fin) { - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); ++seq; break; } - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); if (!desc->count) break; tp->copied_seq = seq; @@ -1415,7 +1512,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; - int copied_early = 0; + bool copied_early = false; struct sk_buff *skb; u32 urg_hole = 0; @@ -1431,6 +1528,21 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto recv_urg; + if (unlikely(tp->repair)) { + err = -EPERM; + if (!(flags & MSG_PEEK)) + goto out; + + if (tp->repair_queue == TCP_SEND_QUEUE) + goto recv_sndq; + + err = -EINVAL; + if (tp->repair_queue == TCP_NO_QUEUE) + goto out; + + /* 'common' recv queue MSG_PEEK-ing */ + } + seq = &tp->copied_seq; if (flags & MSG_PEEK) { peek_seq = tp->copied_seq; @@ -1632,9 +1744,9 @@ do_prequeue: } if ((flags & MSG_PEEK) && (peek_seq - copied - urg_hole != tp->copied_seq)) { - if (net_ratelimit()) - printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", - current->comm, task_pid_nr(current)); + net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", + current->comm, + task_pid_nr(current)); peek_seq = tp->copied_seq; } continue; @@ -1688,7 +1800,7 @@ do_prequeue: dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); if ((offset + used) == skb->len) - copied_early = 1; + copied_early = true; } else #endif @@ -1722,7 +1834,7 @@ skip_copy: goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); - copied_early = 0; + copied_early = false; } continue; @@ -1731,7 +1843,7 @@ skip_copy: ++*seq; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); - copied_early = 0; + copied_early = false; } break; } while (len > 0); @@ -1782,6 +1894,10 @@ out: recv_urg: err = tcp_recv_urg(sk, msg, len, flags); goto out; + +recv_sndq: + err = tcp_peek_sndq(sk, msg, len); + goto out; } EXPORT_SYMBOL(tcp_recvmsg); @@ -1885,10 +2001,10 @@ bool tcp_check_oom(struct sock *sk, int shift) too_many_orphans = tcp_too_many_orphans(sk, shift); out_of_socket_memory = tcp_out_of_memory(sk); - if (too_many_orphans && net_ratelimit()) - pr_info("too many orphaned sockets\n"); - if (out_of_socket_memory && net_ratelimit()) - pr_info("out of memory -- consider tuning tcp_mem\n"); + if (too_many_orphans) + net_info_ratelimited("too many orphaned sockets\n"); + if (out_of_socket_memory) + net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); return too_many_orphans || out_of_socket_memory; } @@ -1934,7 +2050,9 @@ void tcp_close(struct sock *sk, long timeout) * advertise a zero window, then kill -9 the FTP client, wheee... * Note: timeout is always zero in such a case. */ - if (data_was_unread) { + if (unlikely(tcp_sk(sk)->repair)) { + sk->sk_prot->disconnect(sk, 0); + } else if (data_was_unread) { /* Unread data was tossed, zap the connection. */ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); @@ -2052,7 +2170,7 @@ EXPORT_SYMBOL(tcp_close); /* These states need RST on ABORT according to RFC793 */ -static inline int tcp_need_reset(int state) +static inline bool tcp_need_reset(int state) { return (1 << state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | @@ -2073,6 +2191,8 @@ int tcp_disconnect(struct sock *sk, int flags) /* ABORT function of RFC793 */ if (old_state == TCP_LISTEN) { inet_csk_listen_stop(sk); + } else if (unlikely(tp->repair)) { + sk->sk_err = ECONNABORTED; } else if (tcp_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { @@ -2124,6 +2244,54 @@ int tcp_disconnect(struct sock *sk, int flags) } EXPORT_SYMBOL(tcp_disconnect); +static inline bool tcp_can_repair_sock(const struct sock *sk) +{ + return capable(CAP_NET_ADMIN) && + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); +} + +static int tcp_repair_options_est(struct tcp_sock *tp, + struct tcp_repair_opt __user *optbuf, unsigned int len) +{ + struct tcp_repair_opt opt; + + while (len >= sizeof(opt)) { + if (copy_from_user(&opt, optbuf, sizeof(opt))) + return -EFAULT; + + optbuf++; + len -= sizeof(opt); + + switch (opt.opt_code) { + case TCPOPT_MSS: + tp->rx_opt.mss_clamp = opt.opt_val; + break; + case TCPOPT_WINDOW: + if (opt.opt_val > 14) + return -EFBIG; + + tp->rx_opt.snd_wscale = opt.opt_val; + break; + case TCPOPT_SACK_PERM: + if (opt.opt_val != 0) + return -EINVAL; + + tp->rx_opt.sack_ok |= TCP_SACK_SEEN; + if (sysctl_tcp_fack) + tcp_enable_fack(tp); + break; + case TCPOPT_TIMESTAMP: + if (opt.opt_val != 0) + return -EINVAL; + + tp->rx_opt.tstamp_ok = 1; + break; + } + } + + return 0; +} + /* * Socket option code for TCP. */ @@ -2294,6 +2462,55 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EINVAL; else tp->thin_dupack = val; + if (tp->thin_dupack) + tcp_disable_early_retrans(tp); + break; + + case TCP_REPAIR: + if (!tcp_can_repair_sock(sk)) + err = -EPERM; + else if (val == 1) { + tp->repair = 1; + sk->sk_reuse = SK_FORCE_REUSE; + tp->repair_queue = TCP_NO_QUEUE; + } else if (val == 0) { + tp->repair = 0; + sk->sk_reuse = SK_NO_REUSE; + tcp_send_window_probe(sk); + } else + err = -EINVAL; + + break; + + case TCP_REPAIR_QUEUE: + if (!tp->repair) + err = -EPERM; + else if (val < TCP_QUEUES_NR) + tp->repair_queue = val; + else + err = -EINVAL; + break; + + case TCP_QUEUE_SEQ: + if (sk->sk_state != TCP_CLOSE) + err = -EPERM; + else if (tp->repair_queue == TCP_SEND_QUEUE) + tp->write_seq = val; + else if (tp->repair_queue == TCP_RECV_QUEUE) + tp->rcv_nxt = val; + else + err = -EINVAL; + break; + + case TCP_REPAIR_OPTIONS: + if (!tp->repair) + err = -EINVAL; + else if (sk->sk_state == TCP_ESTABLISHED) + err = tcp_repair_options_est(tp, + (struct tcp_repair_opt __user *)optval, + optlen); + else + err = -EPERM; break; case TCP_CORK: @@ -2529,6 +2746,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = tp->mss_cache; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; + if (tp->repair) + val = tp->rx_opt.mss_clamp; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); @@ -2631,6 +2850,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = tp->thin_dupack; break; + case TCP_REPAIR: + val = tp->repair; + break; + + case TCP_REPAIR_QUEUE: + if (tp->repair) + val = tp->repair_queue; + else + return -EINVAL; + break; + + case TCP_QUEUE_SEQ: + if (tp->repair_queue == TCP_SEND_QUEUE) + val = tp->write_seq; + else if (tp->repair_queue == TCP_RECV_QUEUE) + val = tp->rcv_nxt; + else + return -EINVAL; + break; + case TCP_USER_TIMEOUT: val = jiffies_to_msecs(icsk->icsk_user_timeout); break; @@ -2674,7 +2913,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; - unsigned thlen; + unsigned int thlen; unsigned int seq; __be32 delta; unsigned int oldlen; @@ -2932,13 +3171,13 @@ out_free: struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) { struct tcp_md5sig_pool __percpu *pool; - int alloc = 0; + bool alloc = false; retry: spin_lock_bh(&tcp_md5sig_pool_lock); pool = tcp_md5sig_pool; if (tcp_md5sig_users++ == 0) { - alloc = 1; + alloc = true; spin_unlock_bh(&tcp_md5sig_pool_lock); } else if (!pool) { tcp_md5sig_users--; @@ -3032,9 +3271,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, struct scatterlist sg; const struct tcphdr *tp = tcp_hdr(skb); struct hash_desc *desc = &hp->md5_desc; - unsigned i; - const unsigned head_data_len = skb_headlen(skb) > header_len ? - skb_headlen(skb) - header_len : 0; + unsigned int i; + const unsigned int head_data_len = skb_headlen(skb) > header_len ? + skb_headlen(skb) - header_len : 0; const struct skb_shared_info *shi = skb_shinfo(skb); struct sk_buff *frag_iter; @@ -3222,9 +3461,15 @@ extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - thash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &thash_entries); + if (ret) + return 0; + return 1; } __setup("thash_entries=", set_thash_entries); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 272a84593c8..04dbd7ae7c6 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) /* RFC2861 Check whether we are limited by application or congestion window * This is the inverse of cwnd check in tcp_tso_should_defer */ -int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) +bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) { const struct tcp_sock *tp = tcp_sk(sk); u32 left; if (in_flight >= tp->snd_cwnd) - return 1; + return true; left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size) - return 1; + return true; return left <= tcp_max_tso_deferred_mss(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index fe3ecf484b4..57bdd17dff4 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -15,7 +15,7 @@ /* Tcp Hybla structure. */ struct hybla { - u8 hybla_en; + bool hybla_en; u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ u32 rho; /* Rho parameter, integer part */ u32 rho2; /* Rho * Rho, integer part */ @@ -24,8 +24,7 @@ struct hybla { u32 minrtt; /* Minimum smoothed round trip time value seen */ }; -/* Hybla reference round trip time (default= 1/40 sec = 25 ms), - expressed in jiffies */ +/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); @@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk) ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8); ca->rho = ca->rho_3ls >> 3; ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; - ca->rho2 = ca->rho2_7ls >>7; + ca->rho2 = ca->rho2_7ls >> 7; } static void hybla_init(struct sock *sk) @@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk) ca->rho_3ls = 0; ca->rho2_7ls = 0; ca->snd_cwnd_cents = 0; - ca->hybla_en = 1; + ca->hybla_en = true; tp->snd_cwnd = 2; tp->snd_cwnd_clamp = 65535; @@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk) static void hybla_state(struct sock *sk, u8 ca_state) { struct hybla *ca = inet_csk_ca(sk); + ca->hybla_en = (ca_state == TCP_CA_Open); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 257b61789ee..cfa2aa12834 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -99,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_abc __read_mostly; +int sysctl_tcp_early_retrans __read_mostly = 2; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@ -175,7 +176,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); + unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; @@ -195,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk) * and the session is not interactive. */ -static inline int tcp_in_quickack_mode(const struct sock *sk) +static inline bool tcp_in_quickack_mode(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); + return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } @@ -252,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) tp->ecn_flags &= ~TCP_ECN_OK; } -static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) +static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) - return 1; - return 0; + return true; + return false; } /* Buffer size and advertised window tuning. @@ -906,6 +908,7 @@ static void tcp_init_metrics(struct sock *sk) if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { tcp_disable_fack(tp); + tcp_disable_early_retrans(tp); tp->reordering = dst_metric(dst, RTAX_REORDERING); } @@ -937,7 +940,7 @@ static void tcp_init_metrics(struct sock *sk) tcp_set_rto(sk); reset: if (tp->srtt == 0) { - /* RFC2988bis: We've failed to get a valid RTT sample from + /* RFC6298: 5.7 We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious @@ -947,7 +950,7 @@ reset: inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been - * retransmitted. In light of RFC2988bis' more aggressive 1sec + * retransmitted. In light of RFC6298 more aggressive 1sec * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK * retransmission has occurred. */ @@ -979,15 +982,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric, NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 - printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", - tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, - tp->reordering, - tp->fackets_out, - tp->sacked_out, - tp->undo_marker ? tp->undo_retrans : 0); + pr_debug("Disorder%d %d %u f%u s%u rr%d\n", + tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, + tp->reordering, + tp->fackets_out, + tp->sacked_out, + tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } + + if (metric > 0) + tcp_disable_early_retrans(tp); } /* This must be called before lost_out is incremented */ @@ -1118,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ -static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, - u32 start_seq, u32 end_seq) +static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, + u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) - return 0; + return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) - return 0; + return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) - return 1; + return true; if (!is_dsack || !tp->undo_marker) - return 0; + return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) - return 0; + return false; if (!before(start_seq, tp->undo_marker)) - return 1; + return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) - return 0; + return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. @@ -1219,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk) tp->lost_retrans_low = new_low_seq; } -static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, - struct tcp_sack_block_wire *sp, int num_sacks, - u32 prior_snd_una) +static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, + struct tcp_sack_block_wire *sp, int num_sacks, + u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); - int dup_sack = 0; + bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { - dup_sack = 1; + dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { @@ -1238,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { - dup_sack = 1; + dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); @@ -1269,9 +1275,10 @@ struct tcp_sacktag_state { * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, - u32 start_seq, u32 end_seq) + u32 start_seq, u32 end_seq) { - int in_sack, err; + int err; + bool in_sack; unsigned int pkt_len; unsigned int mss; @@ -1317,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, - int dup_sack, int pcount) + bool dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; @@ -1397,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk, /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ -static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, - struct tcp_sacktag_state *state, - unsigned int pcount, int shifted, int mss, - int dup_sack) +static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_sacktag_state *state, + unsigned int pcount, int shifted, int mss, + bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); @@ -1450,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); - return 0; + return false; } /* Whole SKB was eaten :-) */ @@ -1473,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); - return 1; + return true; } /* I wish gso_size would have a bit more sane initialization than @@ -1496,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb) static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, - int dup_sack) + bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; @@ -1635,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, - int dup_sack_in) + bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; - int dup_sack = dup_sack_in; + bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; @@ -1657,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) - dup_sack = 1; + dup_sack = true; } /* skb reference here is a bit tricky to get right, since @@ -1762,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; - int found_dup_sack = 0; + bool found_dup_sack = false; int i, j; int first_sack_index; @@ -1793,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { - int dup_sack = !i && found_dup_sack; + bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); @@ -1860,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; - int dup_sack = (found_dup_sack && (i == first_sack_index)); + bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) @@ -1962,9 +1969,9 @@ out: } /* Limits sacked_out so that sum with lost_out isn't ever larger than - * packets_out. Returns zero if sacked_out adjustement wasn't necessary. + * packets_out. Returns false if sacked_out adjustement wasn't necessary. */ -static int tcp_limit_reno_sacked(struct tcp_sock *tp) +static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; @@ -1973,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp) if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; - return 1; + return true; } - return 0; + return false; } /* If we receive more dupacks than we expected counting segments @@ -2029,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp) /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ -int tcp_use_frto(struct sock *sk) +bool tcp_use_frto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; if (!sysctl_tcp_frto) - return 0; + return false; /* MTU probe and F-RTO won't really play nicely along currently */ if (icsk->icsk_mtup.probe_size) - return 0; + return false; if (tcp_is_sackfrto(tp)) - return 1; + return true; /* Avoid expensive walking of rexmit queue if possible */ if (tp->retrans_out > 1) - return 0; + return false; skb = tcp_write_queue_head(sk); if (tcp_skb_is_last(sk, skb)) - return 1; + return true; skb = tcp_write_queue_next(sk, skb); /* Skips head */ tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - return 0; + return false; /* Short-circuit when first non-SACKed skb has been checked */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) break; } - return 1; + return true; } /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO @@ -2298,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how) * * Do processing similar to RTO timeout. */ -static int tcp_check_sack_reneging(struct sock *sk, int flag) +static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -2309,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag) tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); - return 1; + return true; } - return 0; + return false; } static inline int tcp_fackets_out(const struct tcp_sock *tp) @@ -2339,6 +2346,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } +static bool tcp_pause_early_retransmit(struct sock *sk, int flag) +{ + struct tcp_sock *tp = tcp_sk(sk); + unsigned long delay; + + /* Delay early retransmit and entering fast recovery for + * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples + * available, or RTO is scheduled to fire first. + */ + if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) + return false; + + delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); + if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) + return false; + + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); + tp->early_retrans_delayed = 1; + return true; +} + static inline int tcp_skb_timedout(const struct sock *sk, const struct sk_buff *skb) { @@ -2446,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk) * Main question: may we further continue forward transmission * with the same cwnd? */ -static int tcp_time_to_recover(struct sock *sk) +static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Do not perform any recovery during F-RTO algorithm */ if (tp->frto_counter) - return 0; + return false; /* Trick#1: The loss is proven. */ if (tp->lost_out) - return 1; + return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) - return 1; + return true; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ if (tcp_is_fack(tp) && tcp_head_timedout(sk)) - return 1; + return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? @@ -2479,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk) /* We have nothing to send. This connection is limited * either by receiver window or by application. */ - return 1; + return true; } /* If a thin stream is detected, retransmit after first @@ -2490,9 +2518,19 @@ static int tcp_time_to_recover(struct sock *sk) if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) - return 1; + return true; - return 0; + /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious + * retransmissions due to small network reorderings, we implement + * Mitigation A.3 in the RFC and delay the retransmission for a short + * interval if appropriate. + */ + if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && + (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && + !tcp_may_send_now(sk)) + return !tcp_pause_early_retransmit(sk, flag); + + return false; } /* New heuristics: it is possible only after we switched to restart timer @@ -2680,22 +2718,22 @@ static void DBGUNDO(struct sock *sk, const char *msg) struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", - msg, - &inet->inet_daddr, ntohs(inet->inet_dport), - tp->snd_cwnd, tcp_left_out(tp), - tp->snd_ssthresh, tp->prior_ssthresh, - tp->packets_out); + pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", + msg, + &inet->inet_daddr, ntohs(inet->inet_dport), + tp->snd_cwnd, tcp_left_out(tp), + tp->snd_ssthresh, tp->prior_ssthresh, + tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", - msg, - &np->daddr, ntohs(inet->inet_dport), - tp->snd_cwnd, tcp_left_out(tp), - tp->snd_ssthresh, tp->prior_ssthresh, - tp->packets_out); + pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", + msg, + &np->daddr, ntohs(inet->inet_dport), + tp->snd_cwnd, tcp_left_out(tp), + tp->snd_ssthresh, tp->prior_ssthresh, + tp->packets_out); } #endif } @@ -2731,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp) } /* People celebrate: "We love our President!" */ -static int tcp_try_undo_recovery(struct sock *sk) +static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -2756,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk) * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); - return 1; + return true; } tcp_set_ca_state(sk, TCP_CA_Open); - return 0; + return false; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ @@ -2789,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk) * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ -static int tcp_any_retrans_done(const struct sock *sk) +static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) - return 1; + return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) - return 1; + return true; - return 0; + return false; } /* Undo during fast recovery after partial ACK. */ @@ -2835,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) } /* Undo during loss recovery after partial ACK. */ -static int tcp_try_undo_loss(struct sock *sk) +static bool tcp_try_undo_loss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -2857,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk) tp->undo_marker = 0; if (tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); - return 1; + return true; } - return 0; + return false; } static inline void tcp_complete_cwr(struct sock *sk) @@ -3025,6 +3063,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } +static void tcp_enter_recovery(struct sock *sk, bool ece_ack) +{ + struct tcp_sock *tp = tcp_sk(sk); + int mib_idx; + + if (tcp_is_reno(tp)) + mib_idx = LINUX_MIB_TCPRENORECOVERY; + else + mib_idx = LINUX_MIB_TCPSACKRECOVERY; + + NET_INC_STATS_BH(sock_net(sk), mib_idx); + + tp->high_seq = tp->snd_nxt; + tp->prior_ssthresh = 0; + tp->undo_marker = tp->snd_una; + tp->undo_retrans = tp->retrans_out; + + if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { + if (!ece_ack) + tp->prior_ssthresh = tcp_current_ssthresh(sk); + tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + TCP_ECN_queue_cwr(tp); + } + + tp->bytes_acked = 0; + tp->snd_cwnd_cnt = 0; + tp->prior_cwnd = tp->snd_cwnd; + tp->prr_delivered = 0; + tp->prr_out = 0; + tcp_set_ca_state(sk, TCP_CA_Recovery); +} + /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and @@ -3044,7 +3114,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); - int fast_rexmit = 0, mib_idx; + int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; @@ -3128,7 +3198,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); - if (!tcp_time_to_recover(sk)) { + if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag); return; } @@ -3145,32 +3215,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } /* Otherwise enter Recovery state */ - - if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENORECOVERY; - else - mib_idx = LINUX_MIB_TCPSACKRECOVERY; - - NET_INC_STATS_BH(sock_net(sk), mib_idx); - - tp->high_seq = tp->snd_nxt; - tp->prior_ssthresh = 0; - tp->undo_marker = tp->snd_una; - tp->undo_retrans = tp->retrans_out; - - if (icsk->icsk_ca_state < TCP_CA_CWR) { - if (!(flag & FLAG_ECE)) - tp->prior_ssthresh = tcp_current_ssthresh(sk); - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - TCP_ECN_queue_cwr(tp); - } - - tp->bytes_acked = 0; - tp->snd_cwnd_cnt = 0; - tp->prior_cwnd = tp->snd_cwnd; - tp->prr_delivered = 0; - tp->prr_out = 0; - tcp_set_ca_state(sk, TCP_CA_Recovery); + tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } @@ -3252,16 +3297,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ -static void tcp_rearm_rto(struct sock *sk) +void tcp_rearm_rto(struct sock *sk) { - const struct tcp_sock *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); + u32 rto = inet_csk(sk)->icsk_rto; + /* Offset the time elapsed after installing regular RTO */ + if (tp->early_retrans_delayed) { + struct sk_buff *skb = tcp_write_queue_head(sk); + const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; + s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); + /* delta may not be positive if the socket is locked + * when the delayed ER timer fires and is rescheduled. + */ + if (delta > 0) + rto = delta; + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); } + tp->early_retrans_delayed = 0; +} + +/* This function is called when the delayed ER timer fires. TCP enters + * fast recovery and performs fast-retransmit. + */ +void tcp_resume_early_retransmit(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + tcp_rearm_rto(sk); + + /* Stop if ER is disabled after the delayed ER timer is scheduled */ + if (!tp->do_early_retrans) + return; + + tcp_enter_recovery(sk, false); + tcp_update_scoreboard(sk, 1); + tcp_xmit_retransmit_queue(sk); } /* If we get here, the whole TSO packet has not been acked. */ @@ -3296,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; u32 now = tcp_time_stamp; - int fully_acked = 1; + int fully_acked = true; int flag = 0; u32 pkts_acked = 0; u32 reord = tp->packets_out; @@ -3320,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (!acked_pcount) break; - fully_acked = 0; + fully_acked = false; } else { acked_pcount = tcp_skb_pcount(skb); } @@ -3437,18 +3513,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { - printk(KERN_DEBUG "Leak l=%u %d\n", - tp->lost_out, icsk->icsk_ca_state); + pr_debug("Leak l=%u %d\n", + tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { - printk(KERN_DEBUG "Leak s=%u %d\n", - tp->sacked_out, icsk->icsk_ca_state); + pr_debug("Leak s=%u %d\n", + tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { - printk(KERN_DEBUG "Leak r=%u %d\n", - tp->retrans_out, icsk->icsk_ca_state); + pr_debug("Leak r=%u %d\n", + tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } @@ -3599,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) * to prove that the RTO is indeed spurious. It transfers the control * from F-RTO to the conventional RTO recovery */ -static int tcp_process_frto(struct sock *sk, int flag) +static bool tcp_process_frto(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); @@ -3615,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag) if (!before(tp->snd_una, tp->frto_highmark)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); - return 1; + return true; } if (!tcp_is_sackfrto(tp)) { @@ -3624,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag) * data, winupdate */ if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) - return 1; + return true; if (!(flag & FLAG_DATA_ACKED)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), flag); - return 1; + return true; } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); - return 1; + return true; } if ((tp->frto_counter >= 2) && @@ -3646,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag) /* RFC4138 shortcoming (see comment above) */ if (!(flag & FLAG_FORWARD_PROGRESS) && (flag & FLAG_NOT_DUP)) - return 1; + return true; tcp_enter_frto_loss(sk, 3, flag); - return 1; + return true; } } @@ -3661,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag) if (!tcp_may_send_now(sk)) tcp_enter_frto_loss(sk, 2, flag); - return 1; + return true; } else { switch (sysctl_tcp_frto_response) { case 2: @@ -3678,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag) tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); } - return 0; + return false; } /* This routine deals with incoming acks, but not outgoing ones. */ @@ -3696,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_sacked = tp->sacked_out; int pkts_acked = 0; int newly_acked_sacked = 0; - int frto_cwnd = 0; + bool frto_cwnd = false; /* If the ack is older than previous acks * then we can probably ignore it. @@ -3710,6 +3786,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (after(ack, tp->snd_nxt)) goto invalid_ack; + if (tp->early_retrans_delayed) + tcp_rearm_rto(sk); + if (after(ack, prior_snd_una)) flag |= FLAG_SND_UNA_ADVANCED; @@ -3875,10 +3954,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { - if (net_ratelimit()) - pr_info("%s: Illegal window scaling value %d >14 received\n", - __func__, - snd_wscale); + net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", + __func__, + snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; @@ -3949,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o } EXPORT_SYMBOL(tcp_parse_options); -static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) +static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); @@ -3960,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; tp->rx_opt.rcv_tsecr = ntohl(*ptr); - return 1; + return true; } - return 0; + return false; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ -static int tcp_fast_parse_options(const struct sk_buff *skb, - const struct tcphdr *th, - struct tcp_sock *tp, const u8 **hvpp) +static bool tcp_fast_parse_options(const struct sk_buff *skb, + const struct tcphdr *th, + struct tcp_sock *tp, const u8 **hvpp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; - return 0; + return false; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) - return 1; + return true; } tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); - return 1; + return true; } #ifdef CONFIG_TCP_MD5SIG @@ -4225,7 +4303,7 @@ static void tcp_fin(struct sock *sk) } } -static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, +static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { @@ -4233,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; - return 1; + return true; } - return 0; + return false; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) @@ -4431,10 +4509,10 @@ static void tcp_ofo_queue(struct sock *sk) } } -static int tcp_prune_ofo_queue(struct sock *sk); +static bool tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); -static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) +static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, size)) { @@ -4453,6 +4531,41 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) return 0; } +/** + * tcp_try_coalesce - try to merge skb to prior one + * @sk: socket + * @to: prior buffer + * @from: buffer to add in queue + * @fragstolen: pointer to boolean + * + * Before queueing skb @from after @to, try to merge them + * to reduce overall memory use and queue lengths, if cost is small. + * Packets in ofo or receive queues can stay a long time. + * Better try to coalesce them right now to avoid future collapses. + * Returns true if caller should free @from instead of queueing it + */ +static bool tcp_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from, + bool *fragstolen) +{ + int delta; + + *fragstolen = false; + + if (tcp_hdr(from)->fin) + return false; + if (!skb_try_coalesce(to, from, fragstolen, &delta)) + return false; + + atomic_add(delta, &sk->sk_rmem_alloc); + sk_mem_charge(sk, delta); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; + TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; + return true; +} + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -4491,23 +4604,13 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { - /* Packets in ofo can stay in queue a long time. - * Better try to coalesce them right now - * to avoid future tcp_collapse_ofo_queue(), - * probably the most expensive function in tcp stack. - */ - if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPRCVCOALESCE); - BUG_ON(skb_copy_bits(skb, 0, - skb_put(skb1, skb->len), - skb->len)); - TCP_SKB_CB(skb1)->end_seq = end_seq; - TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq; - __kfree_skb(skb); - skb = NULL; - } else { + bool fragstolen; + + if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + } else { + kfree_skb_partial(skb, fragstolen); + skb = NULL; } if (!tp->rx_opt.num_sacks || @@ -4583,12 +4686,65 @@ end: skb_set_owner_r(skb, sk); } +static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, + bool *fragstolen) +{ + int eaten; + struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); + + __skb_pull(skb, hdrlen); + eaten = (tail && + tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; + tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + if (!eaten) { + __skb_queue_tail(&sk->sk_receive_queue, skb); + skb_set_owner_r(skb, sk); + } + return eaten; +} + +int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct sk_buff *skb; + struct tcphdr *th; + bool fragstolen; + + if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) + goto err; + + skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); + if (!skb) + goto err; + + th = (struct tcphdr *)skb_put(skb, sizeof(*th)); + skb_reset_transport_header(skb); + memset(th, 0, sizeof(*th)); + + if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) + goto err_free; + + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; + TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; + TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; + + if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { + WARN_ON_ONCE(fragstolen); /* should not happen */ + __kfree_skb(skb); + } + return size; + +err_free: + kfree_skb(skb); +err: + return -ENOMEM; +} static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; + bool fragstolen = false; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; @@ -4633,8 +4789,7 @@ queue_and_out: tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; - skb_set_owner_r(skb, sk); - __skb_queue_tail(&sk->sk_receive_queue, skb); + eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) @@ -4658,7 +4813,7 @@ queue_and_out: tcp_fast_path_check(sk); if (eaten > 0) - __kfree_skb(skb); + kfree_skb_partial(skb, fragstolen); else if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; @@ -4878,10 +5033,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk) * Purge the out-of-order queue. * Return true if queue was pruned. */ -static int tcp_prune_ofo_queue(struct sock *sk) +static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - int res = 0; + bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); @@ -4895,7 +5050,7 @@ static int tcp_prune_ofo_queue(struct sock *sk) if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); - res = 1; + res = true; } return res; } @@ -4972,7 +5127,7 @@ void tcp_cwnd_application_limited(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -static int tcp_should_expand_sndbuf(const struct sock *sk) +static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); @@ -4980,21 +5135,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk) * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) - return 0; + return false; /* If we are under global TCP memory pressure, do not expand. */ if (sk_under_memory_pressure(sk)) - return 0; + return false; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) - return 0; + return false; /* If we filled the congestion window, do not expand. */ if (tp->packets_out >= tp->snd_cwnd) - return 0; + return false; - return 1; + return true; } /* When incoming ACK allowed to free some skb from write_queue, @@ -5220,16 +5375,16 @@ static inline int tcp_checksum_complete_user(struct sock *sk, } #ifdef CONFIG_NET_DMA -static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, +static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int dma_cookie; - int copied_early = 0; + bool copied_early = false; if (tp->ucopy.wakeup) - return 0; + return false; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); @@ -5245,7 +5400,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, goto out; tp->ucopy.dma_cookie = dma_cookie; - copied_early = 1; + copied_early = true; tp->ucopy.len -= chunk; tp->copied_seq += chunk; @@ -5437,6 +5592,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, } else { int eaten = 0; int copied_early = 0; + bool fragstolen = false; if (tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) { @@ -5494,10 +5650,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - __skb_pull(skb, tcp_header_len); - __skb_queue_tail(&sk->sk_receive_queue, skb); - skb_set_owner_r(skb, sk); - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + eaten = tcp_queue_rcv(sk, skb, tcp_header_len, + &fragstolen); } tcp_event_data_recv(sk, skb); @@ -5519,7 +5673,7 @@ no_ack: else #endif if (eaten) - __kfree_skb(skb); + kfree_skb_partial(skb, fragstolen); else sk->sk_data_ready(sk, 0); return 0; @@ -5563,6 +5717,44 @@ discard: } EXPORT_SYMBOL(tcp_rcv_established); +void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); + + tcp_set_state(sk, TCP_ESTABLISHED); + + if (skb != NULL) + security_inet_conn_established(sk, skb); + + /* Make sure socket is routed, for correct metrics. */ + icsk->icsk_af_ops->rebuild_header(sk); + + tcp_init_metrics(sk); + + tcp_init_congestion_control(sk); + + /* Prevent spurious tcp_cwnd_restart() on first data + * packet. + */ + tp->lsndtime = tcp_time_stamp; + + tcp_init_buffer_space(sk); + + if (sock_flag(sk, SOCK_KEEPOPEN)) + inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); + + if (!tp->rx_opt.snd_wscale) + __tcp_fast_path_on(tp, tp->snd_wnd); + else + tp->pred_flags = 0; + + if (!sock_flag(sk, SOCK_DEAD)) { + sk->sk_state_change(sk); + sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); + } +} + static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { @@ -5695,36 +5887,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, } smp_mb(); - tcp_set_state(sk, TCP_ESTABLISHED); - - security_inet_conn_established(sk, skb); - - /* Make sure socket is routed, for correct metrics. */ - icsk->icsk_af_ops->rebuild_header(sk); - - tcp_init_metrics(sk); - tcp_init_congestion_control(sk); - - /* Prevent spurious tcp_cwnd_restart() on first data - * packet. - */ - tp->lsndtime = tcp_time_stamp; - - tcp_init_buffer_space(sk); - - if (sock_flag(sk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); - - if (!tp->rx_opt.snd_wscale) - __tcp_fast_path_on(tp, tp->snd_wnd); - else - tp->pred_flags = 0; - - if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); - sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - } + tcp_finish_connect(sk, skb); if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || @@ -5738,8 +5902,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; - icsk->icsk_ack.ato = TCP_ATO_MIN; - tcp_incr_quickack(sk); tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0cb86ceb652..a43b87dfe80 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -138,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) } EXPORT_SYMBOL_GPL(tcp_twsk_unique); +static int tcp_repair_connect(struct sock *sk) +{ + tcp_connect_init(sk); + tcp_finish_connect(sk, NULL); + + return 0; +} + /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { @@ -196,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; - tp->write_seq = 0; + if (likely(!tp->repair)) + tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && @@ -247,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); - if (!tp->write_seq) + if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, @@ -255,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_id = tp->write_seq ^ jiffies; - err = tcp_connect(sk); + if (likely(!tp->repair)) + err = tcp_connect(sk); + else + err = tcp_repair_connect(sk); + rt = NULL; if (err) goto failure; @@ -853,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) } /* - * Return 1 if a syncookie should be sent + * Return true if a syncookie should be sent */ -int tcp_syn_flood_action(struct sock *sk, +bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb, const char *proto) { const char *msg = "Dropping request"; - int want_cookie = 0; + bool want_cookie = false; struct listen_sock *lopt; @@ -868,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk, #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { msg = "Sending cookies"; - want_cookie = 1; + want_cookie = true; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif @@ -1183,7 +1196,7 @@ clear_hash_noput: } EXPORT_SYMBOL(tcp_v4_md5_hash_skb); -static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) +static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) { /* * This gets called for each TCP segment that arrives @@ -1206,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) - return 0; + return false; if (hash_expected && !hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); - return 1; + return true; } if (!hash_expected && hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); - return 1; + return true; } /* Okay, so this is hash_expected and hash_location - @@ -1226,15 +1239,14 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { - if (net_ratelimit()) { - pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", - &iph->saddr, ntohs(th->source), - &iph->daddr, ntohs(th->dest), - genhash ? " tcp_v4_calc_md5_hash failed" : ""); - } - return 1; + net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", + &iph->saddr, ntohs(th->source), + &iph->daddr, ntohs(th->dest), + genhash ? " tcp_v4_calc_md5_hash failed" + : ""); + return true; } - return 0; + return false; } #endif @@ -1268,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; - int want_cookie = 0; + bool want_cookie = false; /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -1327,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; - want_cookie = 0; /* not our kind of cookie */ + want_cookie = false; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { @@ -1355,7 +1367,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; if (!want_cookie || tmp_opt.tstamp_ok) - TCP_ECN_create_request(req, tcp_hdr(skb)); + TCP_ECN_create_request(req, skb); if (want_cookie) { isn = cookie_v4_init_sequence(sk, skb, &req->mss); @@ -1739,7 +1751,8 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb))) { + } else if (unlikely(sk_add_backlog(sk, skb, + sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; @@ -1875,64 +1888,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - skb_queue_head_init(&tp->out_of_order_queue); - tcp_init_xmit_timers(sk); - tcp_prequeue_init(tp); - - icsk->icsk_rto = TCP_TIMEOUT_INIT; - tp->mdev = TCP_TIMEOUT_INIT; - - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - tp->snd_cwnd = TCP_INIT_CWND; - - /* See draft-stevens-tcpca-spec-01 for discussion of the - * initialization of these values. - */ - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; - - tp->reordering = sysctl_tcp_reordering; - icsk->icsk_ca_ops = &tcp_init_congestion_ops; - - sk->sk_state = TCP_CLOSE; - - sk->sk_write_space = sk_stream_write_space; - sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; - icsk->icsk_sync_mss = tcp_sync_mss; + #ifdef CONFIG_TCP_MD5SIG - tp->af_specific = &tcp_sock_ipv4_specific; + tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif - /* TCP Cookie Transactions */ - if (sysctl_tcp_cookie_size > 0) { - /* Default, cookies without s_data_payload. */ - tp->cookie_values = - kzalloc(sizeof(*tp->cookie_values), - sk->sk_allocation); - if (tp->cookie_values != NULL) - kref_init(&tp->cookie_values->kref); - } - /* Presumed zeroed, in order of appearance: - * cookie_in_always, cookie_out_never, - * s_data_constant, s_data_in, s_data_out - */ - sk->sk_sndbuf = sysctl_tcp_wmem[1]; - sk->sk_rcvbuf = sysctl_tcp_rmem[1]; - - local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); - local_bh_enable(); - return 0; } @@ -2109,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos) return rc; } -static inline int empty_bucket(struct tcp_iter_state *st) +static inline bool empty_bucket(struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 3cabafb5cdd..b85d9fe7d66 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row); * state. */ -static int tcp_remember_stamp(struct sock *sk) +static bool tcp_remember_stamp(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk) } if (release_it) inet_putpeer(peer); - return 1; + return true; } - return 0; + return false; } -static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) +static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) { struct sock *sk = (struct sock *) tw; struct inet_peer *peer; @@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) peer->tcp_ts = tcptw->tw_ts_recent; } inet_putpeer(peer); - return 1; + return true; } - return 0; + return false; } -static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) +static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) - return 1; + return true; if (after(end_seq, s_win) && before(seq, e_win)) - return 1; + return true; return seq == e_win && seq == end_seq; } @@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; const u8 *hash_location; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); - int paws_reject = 0; + bool paws_reject = false; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { @@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct inet_timewait_sock *tw = NULL; const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); - int recycle_ok = 0; + bool recycle_ok = false; if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) recycle_ok = tcp_remember_stamp(sk); @@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->sacked_out = 0; newtp->fackets_out = 0; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + tcp_enable_early_retrans(newtp); /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control @@ -574,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); - int paws_reject = 0; + bool paws_reject = false; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7ac6423117a..803cbfe82fb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -34,6 +34,8 @@ * */ +#define pr_fmt(fmt) "TCP: " fmt + #include <net/tcp.h> #include <linux/compiler.h> @@ -78,9 +80,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tp->frto_counter = 3; tp->packets_out += tcp_skb_pcount(skb); - if (!prior_packets) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); + if (!prior_packets || tp->early_retrans_delayed) + tcp_rearm_rto(sk); } /* SND.NXT, if window was not shrunk. @@ -369,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) TCP_SKB_CB(skb)->end_seq = seq; } -static inline int tcp_urg_mode(const struct tcp_sock *tp) +static inline bool tcp_urg_mode(const struct tcp_sock *tp) { return tp->snd_una != tp->snd_up; } @@ -563,13 +564,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, /* Compute TCP options for SYN packets. This is not the final * network wire format yet. */ -static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, +static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; - unsigned remaining = MAX_TCP_OPTION_SPACE; + unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? tcp_cookie_size_check(cvp->cookie_desired) : 0; @@ -663,15 +664,15 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } /* Set up TCP options for SYN-ACKs. */ -static unsigned tcp_synack_options(struct sock *sk, +static unsigned int tcp_synack_options(struct sock *sk, struct request_sock *req, - unsigned mss, struct sk_buff *skb, + unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, struct tcp_extend_values *xvp) { struct inet_request_sock *ireq = inet_rsk(req); - unsigned remaining = MAX_TCP_OPTION_SPACE; + unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? xvp->cookie_plus : 0; @@ -742,13 +743,13 @@ static unsigned tcp_synack_options(struct sock *sk, /* Compute TCP options for ESTABLISHED sockets. This is not the * final wire format yet. */ -static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, +static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); - unsigned size = 0; + unsigned int size = 0; unsigned int eff_sacks; #ifdef CONFIG_TCP_MD5SIG @@ -770,9 +771,9 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; if (unlikely(eff_sacks)) { - const unsigned remaining = MAX_TCP_OPTION_SPACE - size; + const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; opts->num_sack_blocks = - min_t(unsigned, eff_sacks, + min_t(unsigned int, eff_sacks, (remaining - TCPOLEN_SACK_BASE_ALIGNED) / TCPOLEN_SACK_PERBLOCK); size += TCPOLEN_SACK_BASE_ALIGNED + @@ -801,7 +802,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, struct tcp_sock *tp; struct tcp_skb_cb *tcb; struct tcp_out_options opts; - unsigned tcp_options_size, tcp_header_size; + unsigned int tcp_options_size, tcp_header_size; struct tcp_md5sig_key *md5; struct tcphdr *th; int err; @@ -1150,7 +1151,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) } /* Calculate MSS. Not accounting for SACKs here. */ -int tcp_mtu_to_mss(const struct sock *sk, int pmtu) +int tcp_mtu_to_mss(struct sock *sk, int pmtu) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1161,6 +1162,14 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu) */ mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); + /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ + if (icsk->icsk_af_ops->net_frag_header_len) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_allfrag(dst)) + mss_now -= icsk->icsk_af_ops->net_frag_header_len; + } + /* Clamp it (mss_clamp does not include tcp options) */ if (mss_now > tp->rx_opt.mss_clamp) mss_now = tp->rx_opt.mss_clamp; @@ -1179,7 +1188,7 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu) } /* Inverse of above */ -int tcp_mss_to_mtu(const struct sock *sk, int mss) +int tcp_mss_to_mtu(struct sock *sk, int mss) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1190,6 +1199,13 @@ int tcp_mss_to_mtu(const struct sock *sk, int mss) icsk->icsk_ext_hdr_len + icsk->icsk_af_ops->net_header_len; + /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ + if (icsk->icsk_af_ops->net_frag_header_len) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_allfrag(dst)) + mtu += icsk->icsk_af_ops->net_frag_header_len; + } return mtu; } @@ -1259,7 +1275,7 @@ unsigned int tcp_current_mss(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; - unsigned header_len; + unsigned int header_len; struct tcp_out_options opts; struct tcp_md5sig_key *md5; @@ -1375,33 +1391,33 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, } /* Minshall's variant of the Nagle send check. */ -static inline int tcp_minshall_check(const struct tcp_sock *tp) +static inline bool tcp_minshall_check(const struct tcp_sock *tp) { return after(tp->snd_sml, tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt); } -/* Return 0, if packet can be sent now without violation Nagle's rules: +/* Return false, if packet can be sent now without violation Nagle's rules: * 1. It is full sized. * 2. Or it contains FIN. (already checked by caller) * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. * 4. Or TCP_CORK is not set, and all sent packets are ACKed. * With Minshall's modification: all sent small packets are ACKed. */ -static inline int tcp_nagle_check(const struct tcp_sock *tp, +static inline bool tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned mss_now, int nonagle) + unsigned int mss_now, int nonagle) { return skb->len < mss_now && ((nonagle & TCP_NAGLE_CORK) || (!nonagle && tp->packets_out && tcp_minshall_check(tp))); } -/* Return non-zero if the Nagle test allows this packet to be +/* Return true if the Nagle test allows this packet to be * sent now. */ -static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned int cur_mss, int nonagle) +static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle) { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). @@ -1410,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff * argument based upon the location of SKB in the send queue. */ if (nonagle & TCP_NAGLE_PUSH) - return 1; + return true; /* Don't use the nagle rule for urgent data (or for the final FIN). * Nagle can be ignored during F-RTO too (see RFC4138). */ if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) - return 1; + return true; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) - return 1; + return true; - return 0; + return false; } /* Does at least the first segment of SKB fit into the send window? */ -static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned int cur_mss) +static bool tcp_snd_wnd_test(const struct tcp_sock *tp, + const struct sk_buff *skb, + unsigned int cur_mss) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; @@ -1460,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, } /* Test if sending is allowed right now. */ -int tcp_may_send_now(struct sock *sk) +bool tcp_may_send_now(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); @@ -1530,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, * * This algorithm is from John Heffner. */ -static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) +static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1590,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) /* Ok, it looks like it is advisable to defer. */ tp->tso_deferred = 1 | (jiffies << 1); - return 1; + return true; send_now: tp->tso_deferred = 0; - return 0; + return false; } /* Create a new MTU probe if we are ready. @@ -1736,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk) * snd_up-64k-mss .. snd_up cannot be large. However, taking into * account rare use of URG, this is not a big flaw. * - * Returns 1, if no segments are in flight and we have queued segments, but - * cannot send anything now because of SWS or another problem. + * Returns true, if no segments are in flight and we have queued segments, + * but cannot send anything now because of SWS or another problem. */ -static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, - int push_one, gfp_t gfp) +static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -1754,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { - return 0; + return false; } else if (result > 0) { sent_pkts = 1; } @@ -1813,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (likely(sent_pkts)) { tcp_cwnd_validate(sk); - return 0; + return false; } return !tp->packets_out && tcp_send_head(sk); } @@ -2012,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) } /* Check if coalescing SKBs is legal. */ -static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) +static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) { if (tcp_skb_pcount(skb) > 1) - return 0; + return false; /* TODO: SACK collapsing could be used to remove this condition */ if (skb_shinfo(skb)->nr_frags != 0) - return 0; + return false; if (skb_cloned(skb)) - return 0; + return false; if (skb == tcp_send_head(sk)) - return 0; + return false; /* Some heurestics for collapsing over SACK'd could be invented */ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) - return 0; + return false; - return 1; + return true; } /* Collapse packets in the retransmit queue to make to create @@ -2038,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = to, *tmp; - int first = 1; + bool first = true; if (!sysctl_tcp_retrans_collapse) return; @@ -2052,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, space -= skb->len; if (first) { - first = 0; + first = false; continue; } @@ -2167,8 +2184,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) #if FASTRETRANS_DEBUG > 0 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { - if (net_ratelimit()) - printk(KERN_DEBUG "retrans_out leaked.\n"); + net_dbg_ratelimited("retrans_out leaked\n"); } #endif if (!tp->retrans_out) @@ -2193,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Check if we forward retransmits are possible in the current * window/congestion state. */ -static int tcp_can_forward_retransmit(struct sock *sk) +static bool tcp_can_forward_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Forward retransmissions are possible only during Recovery. */ if (icsk->icsk_ca_state != TCP_CA_Recovery) - return 0; + return false; /* No forward retransmissions in Reno are possible. */ if (tcp_is_reno(tp)) - return 0; + return false; /* Yeah, we have to make difficult choice between forward transmission * and retransmission... Both ways have their merits... @@ -2215,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk) */ if (tcp_may_send_now(sk)) - return 0; + return false; - return 1; + return true; } /* This gets called after a retransmit timeout, and the initially @@ -2402,7 +2418,7 @@ int tcp_send_synack(struct sock *sk) skb = tcp_write_queue_head(sk); if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { - printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); + pr_debug("%s: wrong queue state\n", __func__); return -EFAULT; } if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { @@ -2562,7 +2578,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, EXPORT_SYMBOL(tcp_make_synack); /* Do all connect socket setups that can be done AF independent. */ -static void tcp_connect_init(struct sock *sk) +void tcp_connect_init(struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2617,9 +2633,12 @@ static void tcp_connect_init(struct sock *sk) tp->snd_una = tp->write_seq; tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; - tp->rcv_nxt = 0; - tp->rcv_wup = 0; - tp->copied_seq = 0; + tp->snd_nxt = tp->write_seq; + + if (likely(!tp->repair)) + tp->rcv_nxt = 0; + tp->rcv_wup = tp->rcv_nxt; + tp->copied_seq = tp->rcv_nxt; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_retransmits = 0; @@ -2642,7 +2661,6 @@ int tcp_connect(struct sock *sk) /* Reserve space for headers. */ skb_reserve(buff, MAX_TCP_HEADER); - tp->snd_nxt = tp->write_seq; tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); TCP_ECN_send_syn(sk, buff); @@ -2791,6 +2809,15 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } +void tcp_send_window_probe(struct sock *sk) +{ + if (sk->sk_state == TCP_ESTABLISHED) { + tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; + tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; + tcp_xmit_probe_skb(sk, 0); + } +} + /* Initiate keepalive or window probe from timer. */ int tcp_write_wakeup(struct sock *sk) { diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index a981cdc0a6e..4526fe68e60 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -91,7 +91,7 @@ static inline int tcp_probe_avail(void) * Note: arguments must match tcp_rcv_established()! */ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - struct tcphdr *th, unsigned len) + struct tcphdr *th, unsigned int len) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); @@ -138,7 +138,7 @@ static struct jprobe tcp_jprobe = { .entry = jtcp_rcv_established, }; -static int tcpprobe_open(struct inode * inode, struct file * file) +static int tcpprobe_open(struct inode *inode, struct file *file) { /* Reset (empty) log */ spin_lock_bh(&tcp_probe.lock); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 34d4a02c2f1..e911e6c523e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + if (tp->early_retrans_delayed) { + tcp_resume_early_retransmit(sk); + return; + } + if (!tp->packets_out) goto out; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fe141052a1b..609397ee78f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -107,6 +107,7 @@ #include <net/checksum.h> #include <net/xfrm.h> #include <trace/events/udp.h> +#include <linux/static_key.h> #include "udp_impl.h" struct udp_table udp_table __read_mostly; @@ -206,7 +207,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, if (!snum) { int low, high, remaining; - unsigned rand; + unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); @@ -846,7 +847,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * Get and verify the address. */ if (msg->msg_name) { - struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; + struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { @@ -1379,6 +1380,14 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } +static struct static_key udp_encap_needed __read_mostly; +void udp_encap_enable(void) +{ + if (!static_key_enabled(&udp_encap_needed)) + static_key_slow_inc(&udp_encap_needed); +} +EXPORT_SYMBOL(udp_encap_enable); + /* returns: * -1: error * 0: success @@ -1400,7 +1409,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto drop; nf_reset(skb); - if (up->encap_type) { + if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* @@ -1470,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto drop; - if (sk_rcvqueues_full(sk, skb)) + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; rc = 0; @@ -1479,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb)) { + else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } @@ -1760,6 +1769,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; + udp_encap_enable(); break; default: err = -ENOPROTOOPT; @@ -2163,9 +2173,15 @@ void udp4_proc_exit(void) static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - uhash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &uhash_entries); + if (ret) + return 0; + if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index aaad650d47d..5a681e298b9 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -25,7 +25,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); extern int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); +extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); extern void udp_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index a0b4c5da8d4..0d3426cb5c4 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -152,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_AH: if (pskb_may_pull(skb, xprth + 8 - skb->data)) { - __be32 *ah_hdr = (__be32*)xprth; + __be32 *ah_hdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ah_hdr[1]; } @@ -298,8 +298,8 @@ void __init xfrm4_init(int rt_max_size) xfrm4_state_init(); xfrm4_policy_init(); #ifdef CONFIG_SYSCTL - sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, - xfrm4_policy_table); + sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4", + xfrm4_policy_table); #endif } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 36d7437ac05..5728695b544 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -69,7 +69,7 @@ config IPV6_OPTIMISTIC_DAD config INET6_AH tristate "IPv6: AH transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 @@ -81,7 +81,7 @@ config INET6_AH config INET6_ESP tristate "IPv6: ESP transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_AUTHENC select CRYPTO_HMAC diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7d5cb975cc6..8f6411c9718 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -38,6 +38,8 @@ * status etc. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> @@ -66,6 +68,7 @@ #include <net/sock.h> #include <net/snmp.h> +#include <net/af_ieee802154.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/ndisc.h> @@ -149,7 +152,7 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event); static int addrconf_ifdown(struct net_device *dev, int how); -static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); +static void addrconf_dad_start(struct inet6_ifaddr *ifp); static void addrconf_dad_timer(unsigned long data); static void addrconf_dad_completed(struct inet6_ifaddr *ifp); static void addrconf_dad_run(struct inet6_dev *idev); @@ -326,20 +329,19 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) WARN_ON(idev->mc_list != NULL); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL"); + pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif dev_put(dev); if (!idev->dead) { - pr_warning("Freeing alive inet6 device %p\n", idev); + pr_warn("Freeing alive inet6 device %p\n", idev); return; } snmp6_free_dev(idev); kfree_rcu(idev, rcu); } - EXPORT_SYMBOL(in6_dev_finish_destroy); -static struct inet6_dev * ipv6_add_dev(struct net_device *dev) +static struct inet6_dev *ipv6_add_dev(struct net_device *dev) { struct inet6_dev *ndev; @@ -372,7 +374,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) if (snmp6_alloc_dev(ndev) < 0) { ADBG((KERN_WARNING - "%s(): cannot allocate memory for statistics; dev=%s.\n", + "%s: cannot allocate memory for statistics; dev=%s.\n", __func__, dev->name)); neigh_parms_release(&nd_tbl, ndev->nd_parms); dev_put(dev); @@ -382,7 +384,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) if (snmp6_register_dev(ndev) < 0) { ADBG((KERN_WARNING - "%s(): cannot create /proc/net/dev_snmp6/%s\n", + "%s: cannot create /proc/net/dev_snmp6/%s\n", __func__, dev->name)); neigh_parms_release(&nd_tbl, ndev->nd_parms); ndev->dead = 1; @@ -400,9 +402,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { - printk(KERN_INFO - "%s: Disabled Multicast RS\n", - dev->name); + pr_info("%s: Disabled Multicast RS\n", dev->name); ndev->cnf.rtr_solicits = 0; } #endif @@ -441,7 +441,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) return ndev; } -static struct inet6_dev * ipv6_find_idev(struct net_device *dev) +static struct inet6_dev *ipv6_find_idev(struct net_device *dev) { struct inet6_dev *idev; @@ -542,7 +542,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) WARN_ON(!hlist_unhashed(&ifp->addr_lst)); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); + pr_debug("%s\n", __func__); #endif in6_dev_put(ifp->idev); @@ -551,7 +551,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); if (ifp->state != INET6_IFADDR_STATE_DEAD) { - pr_warning("Freeing alive inet6 address %p\n", ifp); + pr_warn("Freeing alive inet6 address %p\n", ifp); return; } dst_release(&ifp->rt->dst); @@ -841,8 +841,7 @@ retry: in6_dev_hold(idev); if (idev->cnf.use_tempaddr <= 0) { write_unlock(&idev->lock); - printk(KERN_INFO - "ipv6_create_tempaddr(): use_tempaddr is disabled.\n"); + pr_info("%s: use_tempaddr is disabled\n", __func__); in6_dev_put(idev); ret = -1; goto out; @@ -852,8 +851,8 @@ retry: idev->cnf.use_tempaddr = -1; /*XXX*/ spin_unlock_bh(&ifp->lock); write_unlock(&idev->lock); - printk(KERN_WARNING - "ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n"); + pr_warn("%s: regeneration time exceeded - disabled temporary address support\n", + __func__); in6_dev_put(idev); ret = -1; goto out; @@ -863,8 +862,8 @@ retry: if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) { spin_unlock_bh(&ifp->lock); write_unlock(&idev->lock); - printk(KERN_WARNING - "ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n"); + pr_warn("%s: regeneration of randomized interface id failed\n", + __func__); in6_ifa_put(ifp); in6_dev_put(idev); ret = -1; @@ -914,8 +913,7 @@ retry: if (!ift || IS_ERR(ift)) { in6_ifa_put(ifp); in6_dev_put(idev); - printk(KERN_INFO - "ipv6_create_tempaddr(): retry temporary address regeneration.\n"); + pr_info("%s: retry temporary address regeneration\n", __func__); tmpaddr = &addr; write_lock(&idev->lock); goto retry; @@ -929,7 +927,7 @@ retry: ift->tstamp = tmp_tstamp; spin_unlock_bh(&ift->lock); - addrconf_dad_start(ift, 0); + addrconf_dad_start(ift); in6_ifa_put(ift); in6_dev_put(idev); out: @@ -1332,7 +1330,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) rcu_read_unlock(); return onlink; } - EXPORT_SYMBOL(ipv6_chk_prefix); struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, @@ -1416,9 +1413,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) return; } - if (net_ratelimit()) - printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", - ifp->idev->dev->name, &ifp->addr); + net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n", + ifp->idev->dev->name, &ifp->addr); if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { struct in6_addr addr; @@ -1431,7 +1427,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) /* DAD failed for link-local based on MAC address */ idev->cnf.disable_ipv6 = 1; - printk(KERN_INFO "%s: IPv6 being disabled!\n", + pr_info("%s: IPv6 being disabled!\n", ifp->idev->dev->name); } } @@ -1516,13 +1512,21 @@ static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev) return 0; } +static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) +{ + if (dev->addr_len != IEEE802154_ADDR_LEN) + return -1; + memcpy(eui, dev->dev_addr, 8); + return 0; +} + static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) { /* XXX: inherit EUI-64 from other interface -- yoshfuji */ if (dev->addr_len != ARCNET_ALEN) return -1; memset(eui, 0, 7); - eui[7] = *(u8*)dev->dev_addr; + eui[7] = *(u8 *)dev->dev_addr; return 0; } @@ -1569,7 +1573,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) switch (dev->type) { case ARPHRD_ETHER: case ARPHRD_FDDI: - case ARPHRD_IEEE802_TR: return addrconf_ifid_eui48(eui, dev); case ARPHRD_ARCNET: return addrconf_ifid_arcnet(eui, dev); @@ -1579,6 +1582,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) return addrconf_ifid_sit(eui, dev); case ARPHRD_IPGRE: return addrconf_ifid_gre(eui, dev); + case ARPHRD_IEEE802154: + return addrconf_ifid_eui64(eui, dev); } return -1; } @@ -1652,9 +1657,8 @@ static void ipv6_regen_rndid(unsigned long data) idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - idev->cnf.max_desync_factor * HZ; if (time_before(expires, jiffies)) { - printk(KERN_WARNING - "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n", - idev->dev->name); + pr_warn("%s: too short regeneration interval; timer disabled for %s\n", + __func__, idev->dev->name); goto out; } @@ -1667,7 +1671,8 @@ out: in6_dev_put(idev); } -static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) { +static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) +{ int ret = 0; if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) @@ -1837,16 +1842,15 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) prefered_lft = ntohl(pinfo->prefered); if (prefered_lft > valid_lft) { - if (net_ratelimit()) - printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n"); + net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n"); return; } in6_dev = in6_dev_get(dev); if (in6_dev == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name); + net_dbg_ratelimited("addrconf: device %s not configured\n", + dev->name); return; } @@ -1908,7 +1912,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) /* Try to figure out our local address for this prefix */ if (pinfo->autoconf && in6_dev->cnf.autoconf) { - struct inet6_ifaddr * ifp; + struct inet6_ifaddr *ifp; struct in6_addr addr; int create = 0, update_lft = 0; @@ -1921,9 +1925,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) } goto ok; } - if (net_ratelimit()) - printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n", - pinfo->prefix_len); + net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n", + pinfo->prefix_len); in6_dev_put(in6_dev); return; @@ -1957,7 +1960,7 @@ ok: update_lft = create = 1; ifp->cstamp = jiffies; - addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT); + addrconf_dad_start(ifp); } if (ifp) { @@ -2236,7 +2239,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p * that the Optimistic flag should not be set for * manually configured addresses */ - addrconf_dad_start(ifp, 0); + addrconf_dad_start(ifp); in6_ifa_put(ifp); addrconf_verify(0); return 0; @@ -2362,9 +2365,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) } for_each_netdev(net, dev) { - struct in_device * in_dev = __in_dev_get_rtnl(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { - struct in_ifaddr * ifa; + struct in_ifaddr *ifa; int flag = scope; @@ -2401,7 +2404,7 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init loopback: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2410,7 +2413,7 @@ static void init_loopback(struct net_device *dev) static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) { - struct inet6_ifaddr * ifp; + struct inet6_ifaddr *ifp; u32 addr_flags = IFA_F_PERMANENT; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD @@ -2423,7 +2426,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); if (!IS_ERR(ifp)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); - addrconf_dad_start(ifp, 0); + addrconf_dad_start(ifp); in6_ifa_put(ifp); } } @@ -2431,15 +2434,15 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr static void addrconf_dev_config(struct net_device *dev) { struct in6_addr addr; - struct inet6_dev * idev; + struct inet6_dev *idev; ASSERT_RTNL(); if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_FDDI) && - (dev->type != ARPHRD_IEEE802_TR) && (dev->type != ARPHRD_ARCNET) && - (dev->type != ARPHRD_INFINIBAND)) { + (dev->type != ARPHRD_INFINIBAND) && + (dev->type != ARPHRD_IEEE802154)) { /* Alas, we support only Ethernet autoconfiguration. */ return; } @@ -2469,7 +2472,7 @@ static void addrconf_sit_config(struct net_device *dev) */ if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init sit: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2499,12 +2502,12 @@ static void addrconf_gre_config(struct net_device *dev) struct inet6_dev *idev; struct in6_addr addr; - pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name); + pr_info("%s(%s)\n", __func__, dev->name); ASSERT_RTNL(); if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init gre: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2544,7 +2547,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) if (!ipv6_inherit_linklocal(idev, link_dev)) return; } - printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n"); + pr_debug("init ip6-ip6: add_linklocal failed\n"); } /* @@ -2560,14 +2563,14 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) idev = addrconf_add_dev(dev); if (IS_ERR(idev)) { - printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); + pr_debug("init ip6-ip6: add_dev failed\n"); return; } ip6_tnl_add_linklocal(idev); } static int addrconf_notify(struct notifier_block *this, unsigned long event, - void * data) + void *data) { struct net_device *dev = (struct net_device *) data; struct inet6_dev *idev = __in6_dev_get(dev); @@ -2591,9 +2594,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (event == NETDEV_UP) { if (!addrconf_qdisc_ok(dev)) { /* device is not ready yet. */ - printk(KERN_INFO - "ADDRCONF(NETDEV_UP): %s: " - "link is not ready\n", + pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", dev->name); break; } @@ -2618,10 +2619,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, idev->if_flags |= IF_READY; } - printk(KERN_INFO - "ADDRCONF(NETDEV_CHANGE): %s: " - "link becomes ready\n", - dev->name); + pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", + dev->name); run_pending = 1; } @@ -2892,8 +2891,7 @@ static void addrconf_rs_timer(unsigned long data) * Note: we do not support deprecated "all on-link" * assumption any longer. */ - printk(KERN_DEBUG "%s: no IPv6 routers present\n", - idev->dev->name); + pr_debug("%s: no IPv6 routers present\n", idev->dev->name); } out: @@ -2918,7 +2916,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) addrconf_mod_timer(ifp, AC_DAD, rand_num); } -static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) +static void addrconf_dad_start(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net_device *dev = idev->dev; @@ -3791,7 +3789,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) return inet6_dump_addr(skb, cb, type); } -static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, +static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(in_skb->sk); @@ -3986,14 +3984,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev) struct nlattr *nla; struct ifla_cacheinfo ci; - NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); - + if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags)) + goto nla_put_failure; ci.max_reasm_len = IPV6_MAXPLEN; ci.tstamp = cstamp_delta(idev->tstamp); ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); - NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); if (nla == NULL) goto nla_put_failure; @@ -4058,15 +4056,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + (dev->addr_len && + nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink))) + goto nla_put_failure; protoinfo = nla_nest_start(skb, IFLA_PROTINFO); if (protoinfo == NULL) goto nla_put_failure; @@ -4179,12 +4175,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, if (pinfo->autoconf) pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; - NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); - + if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix)) + goto nla_put_failure; ci.preferred_time = ntohl(pinfo->prefered); ci.valid_time = ntohl(pinfo->valid); - NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -4368,7 +4364,6 @@ static struct addrconf_sysctl_table { struct ctl_table_header *sysctl_header; ctl_table addrconf_vars[DEVCONF_MAX+1]; - char *dev_name; } addrconf_sysctl __read_mostly = { .sysctl_header = NULL, .addrconf_vars = { @@ -4597,17 +4592,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, { int i; struct addrconf_sysctl_table *t; - -#define ADDRCONF_CTL_PATH_DEV 3 - - struct ctl_path addrconf_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv6", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; - + char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) @@ -4619,27 +4604,15 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, t->addrconf_vars[i].extra2 = net; } - /* - * Make a copy of dev_name, because '.procname' is regarded as const - * by sysctl and we wouldn't want anyone to change it under our feet - * (see SIOCSIFNAME). - */ - t->dev_name = kstrdup(dev_name, GFP_KERNEL); - if (!t->dev_name) - goto free; - - addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name; + snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name); - t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path, - t->addrconf_vars); + t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars); if (t->sysctl_header == NULL) - goto free_procname; + goto free; p->sysctl = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); out: @@ -4656,7 +4629,6 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) t = p->sysctl; p->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); - kfree(t->dev_name); kfree(t); } @@ -4775,8 +4747,8 @@ int __init addrconf_init(void) err = ipv6_addr_label_init(); if (err < 0) { - printk(KERN_CRIT "IPv6 Addrconf:" - " cannot initialize default policy table: %d.\n", err); + pr_crit("%s: cannot initialize default policy table: %d\n", + __func__, err); goto out; } diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 399287e595d..d051e5f4bf3 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -8,9 +8,9 @@ #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) -static inline unsigned ipv6_addr_scope2type(unsigned scope) +static inline unsigned int ipv6_addr_scope2type(unsigned int scope) { - switch(scope) { + switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 2d8ddba9ee5..eb6a63632d3 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -129,7 +129,7 @@ static void ip6addrlbl_free_rcu(struct rcu_head *h) ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu)); } -static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p) +static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p) { return atomic_inc_not_zero(&p->refcnt); } @@ -141,20 +141,20 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p) } /* Find label */ -static int __ip6addrlbl_match(struct net *net, - struct ip6addrlbl_entry *p, - const struct in6_addr *addr, - int addrtype, int ifindex) +static bool __ip6addrlbl_match(struct net *net, + const struct ip6addrlbl_entry *p, + const struct in6_addr *addr, + int addrtype, int ifindex) { if (!net_eq(ip6addrlbl_net(p), net)) - return 0; + return false; if (p->ifindex && p->ifindex != ifindex) - return 0; + return false; if (p->addrtype && p->addrtype != addrtype) - return 0; + return false; if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen)) - return 0; - return 1; + return false; + return true; } static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, @@ -350,7 +350,7 @@ static int __net_init ip6addrlbl_net_init(struct net *net) int err = 0; int i; - ADDRLABEL(KERN_DEBUG "%s()\n", __func__); + ADDRLABEL(KERN_DEBUG "%s\n", __func__); for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { int ret = ip6addrlbl_add(net, @@ -456,8 +456,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } -static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh, - int prefixlen, int ifindex, u32 lseq) +static void ip6addrlbl_putmsg(struct nlmsghdr *nlh, + int prefixlen, int ifindex, u32 lseq) { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 8ed1b930e75..e22e6d88bac 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -18,6 +18,7 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv6: " fmt #include <linux/module.h> #include <linux/capability.h> @@ -77,7 +78,7 @@ struct ipv6_params ipv6_defaults = { .autoconf = 1, }; -static int disable_ipv6_mod = 0; +static int disable_ipv6_mod; module_param_named(disable, disable_ipv6_mod, int, 0444); MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); @@ -180,7 +181,7 @@ lookup_protocol: err = 0; sk->sk_no_check = answer_no_check; if (INET_PROTOSW_REUSE & answer_flags) - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; @@ -256,7 +257,7 @@ out_rcu_unlock: /* bind for INET6 API */ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr; + struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -390,7 +391,6 @@ out_unlock: rcu_read_unlock(); goto out; } - EXPORT_SYMBOL(inet6_bind); int inet6_release(struct socket *sock) @@ -408,7 +408,6 @@ int inet6_release(struct socket *sock) return inet_release(sock); } - EXPORT_SYMBOL(inet6_release); void inet6_destroy_sock(struct sock *sk) @@ -419,10 +418,12 @@ void inet6_destroy_sock(struct sock *sk) /* Release rx options */ - if ((skb = xchg(&np->pktoptions, NULL)) != NULL) + skb = xchg(&np->pktoptions, NULL); + if (skb != NULL) kfree_skb(skb); - if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) + skb = xchg(&np->rxpmtu, NULL); + if (skb != NULL) kfree_skb(skb); /* Free flowlabels */ @@ -430,10 +431,10 @@ void inet6_destroy_sock(struct sock *sk) /* Free tx options */ - if ((opt = xchg(&np->opt, NULL)) != NULL) + opt = xchg(&np->opt, NULL); + if (opt != NULL) sock_kfree_s(sk, opt, opt->tot_len); } - EXPORT_SYMBOL_GPL(inet6_destroy_sock); /* @@ -443,7 +444,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { - struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr; + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -474,7 +475,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, *uaddr_len = sizeof(*sin); return 0; } - EXPORT_SYMBOL(inet6_getname); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) @@ -482,8 +482,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct sock *sk = sock->sk; struct net *net = sock_net(sk); - switch(cmd) - { + switch (cmd) { case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); @@ -509,7 +508,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) /*NOTREACHED*/ return 0; } - EXPORT_SYMBOL(inet6_ioctl); const struct proto_ops inet6_stream_ops = { @@ -615,25 +613,21 @@ out: return ret; out_permanent: - printk(KERN_ERR "Attempt to override permanent protocol %d.\n", - protocol); + pr_err("Attempt to override permanent protocol %d\n", protocol); goto out; out_illegal: - printk(KERN_ERR - "Ignoring attempt to register invalid socket type %d.\n", + pr_err("Ignoring attempt to register invalid socket type %d\n", p->type); goto out; } - EXPORT_SYMBOL(inet6_register_protosw); void inet6_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { - printk(KERN_ERR - "Attempt to unregister permanent protocol %d.\n", + pr_err("Attempt to unregister permanent protocol %d\n", p->protocol); } else { spin_lock_bh(&inetsw6_lock); @@ -643,7 +637,6 @@ inet6_unregister_protosw(struct inet_protosw *p) synchronize_net(); } } - EXPORT_SYMBOL(inet6_unregister_protosw); int inet6_sk_rebuild_header(struct sock *sk) @@ -683,13 +676,12 @@ int inet6_sk_rebuild_header(struct sock *sk) return 0; } - EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); -int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb) { - struct ipv6_pinfo *np = inet6_sk(sk); - struct inet6_skb_parm *opt = IP6CB(skb); + const struct ipv6_pinfo *np = inet6_sk(sk); + const struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || @@ -701,11 +693,10 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) np->rxopt.bits.osrcrt)) || ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) - return 1; + return true; } - return 0; + return false; } - EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) @@ -1070,13 +1061,11 @@ static int __init inet6_init(void) BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); /* Register the socket-side information for inet6_create. */ - for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) + for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { - printk(KERN_INFO - "IPv6: Loaded, but administratively disabled, " - "reboot required to enable\n"); + pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; } @@ -1111,11 +1100,6 @@ static int __init inet6_init(void) if (err) goto out_sock_register_fail; -#ifdef CONFIG_SYSCTL - err = ipv6_static_sysctl_register(); - if (err) - goto static_sysctl_fail; -#endif tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; /* @@ -1242,10 +1226,6 @@ ipmr_fail: icmp_fail: unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: -#ifdef CONFIG_SYSCTL - ipv6_static_sysctl_unregister(); -static_sysctl_fail: -#endif sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); out_sock_register_fail: @@ -1272,9 +1252,6 @@ static void __exit inet6_exit(void) /* Disallow any further netlink messages */ rtnl_unregister_all(PF_INET6); -#ifdef CONFIG_SYSCTL - ipv6_sysctl_unregister(); -#endif udpv6_exit(); udplitev6_exit(); tcpv6_exit(); @@ -1302,9 +1279,6 @@ static void __exit inet6_exit(void) rawv6_exit(); unregister_pernet_subsys(&inet6_net_ops); -#ifdef CONFIG_SYSCTL - ipv6_static_sysctl_unregister(); -#endif proto_unregister(&rawv6_prot); proto_unregister(&udplitev6_prot); proto_unregister(&udpv6_prot); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 2ae79dbeec2..f1a4a2c28ed 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -24,6 +24,8 @@ * This file is derived from net/ipv4/ah.c. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <crypto/hash.h> #include <linux/module.h> #include <linux/slab.h> @@ -111,7 +113,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, __alignof__(struct scatterlist)); } -static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) +static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) { u8 *opt = (u8 *)opthdr; int len = ipv6_optlen(opthdr); @@ -125,7 +127,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) switch (opt[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: @@ -143,10 +145,10 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) len -= optlen; } if (len == 0) - return 1; + return true; bad: - return 0; + return false; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) @@ -169,7 +171,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des switch (opt[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: @@ -189,8 +191,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des hao = (struct ipv6_destopt_hao *)&opt[off]; if (hao->length != sizeof(hao->addr)) { - if (net_ratelimit()) - printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length); + net_warn_ratelimited("destopt hao: invalid header length: %u\n", + hao->length); goto bad; } final_addr = hao->addr; @@ -659,9 +661,9 @@ static int ah6_init_state(struct xfrm_state *x) if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { - printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_ahash_digestsize(ahash), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("AH: %s digestsize %u != %hu\n", + x->aalg->alg_name, crypto_ahash_digestsize(ahash), + aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } @@ -727,12 +729,12 @@ static const struct inet6_protocol ah6_protocol = { static int __init ah6_init(void) { if (xfrm_register_type(&ah6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) { - printk(KERN_INFO "ipv6 ah init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah6_type, AF_INET6); return -EAGAIN; } @@ -743,10 +745,10 @@ static int __init ah6_init(void) static void __exit ah6_fini(void) { if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0) - printk(KERN_INFO "ipv6 ah close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index db00d27ffb1..cdf02be5f19 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -342,7 +342,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) * check if the interface has this anycast address * called with rcu_read_lock() */ -static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr) +static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev; struct ifacaddr6 *aca; @@ -356,16 +356,16 @@ static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *add read_unlock_bh(&idev->lock); return aca != NULL; } - return 0; + return false; } /* * check if given interface (or any, if dev==0) has this anycast address */ -int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, - const struct in6_addr *addr) +bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, + const struct in6_addr *addr) { - int found = 0; + bool found = false; rcu_read_lock(); if (dev) @@ -373,7 +373,7 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, else for_each_netdev_rcu(net, dev) if (ipv6_chk_acast_dev(dev, addr)) { - found = 1; + found = true; break; } rcu_read_unlock(); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 76832c8dc89..be2b67d631e 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -22,6 +22,7 @@ #include <linux/ipv6.h> #include <linux/route.h> #include <linux/slab.h> +#include <linux/export.h> #include <net/ipv6.h> #include <net/ndisc.h> @@ -33,9 +34,9 @@ #include <linux/errqueue.h> #include <asm/uaccess.h> -static inline int ipv6_mapped_addr_any(const struct in6_addr *a) +static bool ipv6_mapped_addr_any(const struct in6_addr *a) { - return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0)); + return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); } int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) @@ -98,7 +99,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sin.sin_port = usin->sin6_port; err = ip4_datagram_connect(sk, - (struct sockaddr*) &sin, + (struct sockaddr *) &sin, sizeof(sin)); ipv4_connected: @@ -202,6 +203,7 @@ out: fl6_sock_release(flowlabel); return err; } +EXPORT_SYMBOL_GPL(ip6_datagram_connect); void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) @@ -414,6 +416,7 @@ out_free_skb: out: return err; } +EXPORT_SYMBOL_GPL(ipv6_recv_error); /* * Handle IPV6_RECVPATHMTU @@ -515,10 +518,10 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) u8 nexthdr = ipv6_hdr(skb)->nexthdr; while (off <= opt->lastopt) { - unsigned len; + unsigned int len; u8 *ptr = nh + off; - switch(nexthdr) { + switch (nexthdr) { case IPPROTO_DSTOPTS: nexthdr = ptr[0]; len = (ptr[1] + 1) << 3; @@ -827,9 +830,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk, int tc; err = -EINVAL; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) goto exit_f; - } tc = *(int *)CMSG_DATA(cmsg); if (tc < -1 || tc > 0xff) @@ -846,9 +848,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk, int df; err = -EINVAL; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) goto exit_f; - } df = *(int *)CMSG_DATA(cmsg); if (df < 0 || df > 1) @@ -870,3 +871,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk, exit_f: return err; } +EXPORT_SYMBOL_GPL(datagram_send_ctl); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 1ac7938dd9e..1e62b7557b0 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -24,6 +24,8 @@ * This file is derived from net/ipv4/esp.c */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> @@ -442,8 +444,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", - ntohl(esph->spi), &iph->daddr); + pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n", + ntohl(esph->spi), &iph->daddr); xfrm_state_put(x); } @@ -651,11 +653,11 @@ static const struct inet6_protocol esp6_protocol = { static int __init esp6_init(void) { if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { - printk(KERN_INFO "ipv6 esp init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp6_type, AF_INET6); return -EAGAIN; } @@ -666,9 +668,9 @@ static int __init esp6_init(void) static void __exit esp6_fini(void) { if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) - printk(KERN_INFO "ipv6 esp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp6_init); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 3d641b6e9b0..6447dc49429 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -75,7 +75,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) return offset; switch (opttype) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: @@ -96,14 +96,14 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv); /* * Parsing tlv encoded headers. * - * Parsing function "func" returns 1, if parsing succeed - * and 0, if it failed. + * Parsing function "func" returns true, if parsing succeed + * and false, if it failed. * It MUST NOT touch skb->h. */ struct tlvtype_proc { int type; - int (*func)(struct sk_buff *skb, int offset); + bool (*func)(struct sk_buff *skb, int offset); }; /********************* @@ -112,11 +112,11 @@ struct tlvtype_proc { /* An unknown option is detected, decide what to do */ -static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) +static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) { switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ - return 1; + return true; case 1: /* drop packet */ break; @@ -129,21 +129,22 @@ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) break; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); - return 0; + return false; } kfree_skb(skb); - return 0; + return false; } /* Parse tlv encoded option header (hop-by-hop or destination) */ -static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) +static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) { - struct tlvtype_proc *curr; + const struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; + int padlen = 0; if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; @@ -153,13 +154,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) while (len > 0) { int optlen = nh[off + 1] + 2; + int i; switch (nh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; + padlen++; + if (padlen > 7) + goto bad; break; case IPV6_TLV_PADN: + /* RFC 2460 states that the purpose of PadN is + * to align the containing header to multiples + * of 8. 7 is therefore the highest valid value. + * See also RFC 4942, Section 2.1.9.5. + */ + padlen += optlen; + if (padlen > 7) + goto bad; + /* RFC 4942 recommends receiving hosts to + * actively check PadN payload to contain + * only zeroes. + */ + for (i = 2; i < optlen; i++) { + if (nh[off + i] != 0) + goto bad; + } break; default: /* Other TLV code so scan list */ @@ -170,25 +191,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) /* type specific length/alignment checks will be performed in the func(). */ - if (curr->func(skb, off) == 0) - return 0; + if (curr->func(skb, off) == false) + return false; break; } } if (curr->type < 0) { if (ip6_tlvopt_unknown(skb, off) == 0) - return 0; + return false; } + padlen = 0; break; } off += optlen; len -= optlen; } + /* This case will not be caught by above check since its padding + * length is smaller than 7: + * 1 byte NH + 1 byte Length + 6 bytes Padding + */ + if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8)) + goto bad; + if (len == 0) - return 1; + return true; bad: kfree_skb(skb); - return 0; + return false; } /***************************** @@ -196,7 +225,7 @@ bad: *****************************/ #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -static int ipv6_dest_hao(struct sk_buff *skb, int optoff) +static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); @@ -250,15 +279,15 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff) if (skb->tstamp.tv64 == 0) __net_timestamp(skb); - return 1; + return true; discard: kfree_skb(skb); - return 0; + return false; } #endif -static struct tlvtype_proc tlvprocdestopt_lst[] = { +static const struct tlvtype_proc tlvprocdestopt_lst[] = { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) { .type = IPV6_TLV_HAO, @@ -563,23 +592,23 @@ static inline struct net *ipv6_skb_net(struct sk_buff *skb) /* Router Alert as of RFC 2711 */ -static int ipv6_hop_ra(struct sk_buff *skb, int optoff) +static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->ra = optoff; - return 1; + return true; } LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb(skb); - return 0; + return false; } /* Jumbo payload */ -static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) +static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); struct net *net = ipv6_skb_net(skb); @@ -598,13 +627,13 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); - return 0; + return false; } if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); - return 0; + return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { @@ -616,14 +645,14 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; - return 1; + return true; drop: kfree_skb(skb); - return 0; + return false; } -static struct tlvtype_proc tlvprochopopt_lst[] = { +static const struct tlvtype_proc tlvprochopopt_lst[] = { { .type = IPV6_TLV_ROUTERALERT, .func = ipv6_hop_ra, @@ -722,7 +751,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } - EXPORT_SYMBOL(ipv6_push_nfrag_opts); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) @@ -738,20 +766,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); if (opt2) { - long dif = (char*)opt2 - (char*)opt; + long dif = (char *)opt2 - (char *)opt; memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) - *((char**)&opt2->hopopt) += dif; + *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) - *((char**)&opt2->dst0opt) += dif; + *((char **)&opt2->dst0opt) += dif; if (opt2->dst1opt) - *((char**)&opt2->dst1opt) += dif; + *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) - *((char**)&opt2->srcrt) += dif; + *((char **)&opt2->srcrt) += dif; } return opt2; } - EXPORT_SYMBOL_GPL(ipv6_dup_options); static int ipv6_renew_option(void *ohdr, @@ -869,6 +896,7 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, return opt; } +EXPORT_SYMBOL_GPL(ipv6_fixup_options); /** * fl6_update_dst - update flowi destination address with info given @@ -892,5 +920,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; return orig; } - EXPORT_SYMBOL_GPL(fl6_update_dst); diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 72957f4a7c6..f73d59a1413 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -9,7 +9,7 @@ * find out if nexthdr is a well-known extension header or a protocol */ -int ipv6_ext_hdr(u8 nexthdr) +bool ipv6_ext_hdr(u8 nexthdr) { /* * find out if nexthdr is an extension header or a protocol @@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr) (nexthdr == NEXTHDR_NONE) || (nexthdr == NEXTHDR_DEST); } +EXPORT_SYMBOL(ipv6_ext_hdr); /* * Skip any extension headers. This is used by the ICMP module. @@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, *nexthdrp = nexthdr; return start; } - -EXPORT_SYMBOL(ipv6_ext_hdr); EXPORT_SYMBOL(ipv6_skip_exthdr); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b6c57315206..0ff1cfd55bc 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -22,8 +22,7 @@ #include <net/ip6_route.h> #include <net/netlink.h> -struct fib6_rule -{ +struct fib6_rule { struct fib_rule common; struct rt6key src; struct rt6key dst; @@ -215,14 +214,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = rule6->src.plen; frh->tos = rule6->tclass; - if (rule6->dst.plen) - NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr), - &rule6->dst.addr); - - if (rule6->src.plen) - NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), - &rule6->src.addr); - + if ((rule6->dst.plen && + nla_put(skb, FRA_DST, sizeof(struct in6_addr), + &rule6->dst.addr)) || + (rule6->src.plen && + nla_put(skb, FRA_SRC, sizeof(struct in6_addr), + &rule6->src.addr))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 27ac95a6342..091a2971c7b 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -29,6 +29,8 @@ * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> @@ -129,7 +131,7 @@ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) * --ANK (980726) */ -static int is_ineligible(struct sk_buff *skb) +static bool is_ineligible(const struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; @@ -137,11 +139,11 @@ static int is_ineligible(struct sk_buff *skb) __be16 frag_off; if (len < 0) - return 1; + return true; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); if (ptr < 0) - return 0; + return false; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, @@ -149,9 +151,9 @@ static int is_ineligible(struct sk_buff *skb) sizeof(_type), &_type); if (tp == NULL || !(*tp & ICMPV6_INFOMSG_MASK)) - return 1; + return true; } - return 0; + return false; } /* @@ -206,14 +208,14 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, * highest-order two bits set to 10 */ -static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) +static bool opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) - return 1; + return true; return (*op & 0xC0) == 0x80; } @@ -498,7 +500,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, - np->tclass, NULL, &fl6, (struct rt6_info*)dst, + np->tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); @@ -579,7 +581,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, - (struct rt6_info*)dst, MSG_DONTWAIT, + (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { @@ -820,9 +822,7 @@ static int __net_init icmpv6_sk_init(struct net *net) err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - printk(KERN_ERR - "Failed to initialize the ICMP6 control socket " - "(err %d).\n", + pr_err("Failed to initialize the ICMP6 control socket (err %d)\n", err); goto fail; } @@ -881,7 +881,7 @@ int __init icmpv6_init(void) return 0; fail: - printk(KERN_ERR "Failed to register ICMP6 protocol\n"); + pr_err("Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } @@ -950,7 +950,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err) return fatal; } - EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 02dd203d9ea..e6cee5292a0 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -28,7 +28,7 @@ #include <net/inet6_connection_sock.h> int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb) + const struct inet_bind_bucket *tb, bool relax) { const struct sock *sk2; const struct hlist_node *node; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 93717435013..0c220a41662 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -18,6 +18,9 @@ * routing table. * Ville Nuorvala: Fixed routing subtrees. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> @@ -38,7 +41,7 @@ #define RT6_DEBUG 2 #if RT6_DEBUG >= 3 -#define RT6_TRACE(x...) printk(KERN_DEBUG x) +#define RT6_TRACE(x...) pr_debug(x) #else #define RT6_TRACE(x...) do { ; } while (0) #endif @@ -451,12 +454,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { if (!allow_create) { if (replace_required) { - pr_warn("IPv6: Can't replace route, " - "no match found\n"); + pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); } goto insert_above; } @@ -499,11 +500,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, * That would keep IPv6 consistent with IPv4 */ if (replace_required) { - pr_warn("IPv6: Can't replace route, no match found\n"); + pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); } /* * We walked to the bottom of tree. @@ -696,7 +696,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, */ if (!replace) { if (!add) - pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); add: rt->dst.rt6_next = iter; @@ -715,7 +715,7 @@ add: if (!found) { if (add) goto add; - pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n"); + pr_warn("NLM_F_REPLACE set, but no existing node found!\n"); return -ENOENT; } *ins = rt; @@ -768,7 +768,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) replace_required = 1; } if (!allow_create && !replace_required) - pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); + pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), @@ -1420,7 +1420,8 @@ static int fib6_clean_node(struct fib6_walker_t *w) res = fib6_del(rt, &info); if (res) { #if RT6_DEBUG >= 2 - printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); + pr_debug("%s: del failed: rt=%p@%p err=%d\n", + __func__, rt, rt->rt6i_node, res); #endif continue; } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index b7867a1215b..9772fbd8a3f 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -294,6 +294,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, opt_space->opt_flen = fopt->opt_flen; return opt_space; } +EXPORT_SYMBOL_GPL(fl6_merge_options); static unsigned long check_linger(unsigned long ttl) { @@ -432,32 +433,32 @@ static int mem_check(struct sock *sk) return 0; } -static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) +static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) { if (h1 == h2) - return 0; + return false; if (h1 == NULL || h2 == NULL) - return 1; + return true; if (h1->hdrlen != h2->hdrlen) - return 1; + return true; return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1)); } -static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) +static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) { if (o1 == o2) - return 0; + return false; if (o1 == NULL || o2 == NULL) - return 1; + return true; if (o1->opt_nflen != o2->opt_nflen) - return 1; + return true; if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt)) - return 1; + return true; if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt)) - return 1; + return true; if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt)) - return 1; - return 0; + return true; + return false; } static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, @@ -705,9 +706,9 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v) struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", - (unsigned)ntohl(fl->label), + (unsigned int)ntohl(fl->label), fl->share, - (unsigned)fl->owner, + (int)fl->owner, atomic_read(&fl->users), fl->linger/HZ, (long)(fl->expires - jiffies)/HZ, diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 1ca5d45a12e..21a15dfe4a9 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -170,7 +170,8 @@ static int ip6_input_finish(struct sk_buff *skb) { const struct inet6_protocol *ipprot; unsigned int nhoff; - int nexthdr, raw; + int nexthdr; + bool raw; u8 hash; struct inet6_dev *idev; struct net *net = dev_net(skb_dst(skb)->dev); @@ -251,7 +252,7 @@ int ip6_input(struct sk_buff *skb) int ip6_mc_input(struct sk_buff *skb) { const struct ipv6hdr *hdr; - int deliver; + bool deliver; IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, @@ -287,7 +288,7 @@ int ip6_mc_input(struct sk_buff *skb) * is for MLD (0x0000). */ if ((ptr[2] | ptr[3]) == 0) { - deliver = 0; + deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ @@ -312,7 +313,7 @@ int ip6_mc_input(struct sk_buff *skb) case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: - deliver = 1; + deliver = true; break; } goto out; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b7ca46161cb..d99fdc69962 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -210,7 +210,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -ENOBUFS; } - kfree_skb(skb); + consume_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } @@ -252,8 +252,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, dst->dev, dst_output); } - if (net_ratelimit()) - printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); + net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); @@ -644,7 +643,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ - if (!skb->local_df && skb->len > mtu) { + if (unlikely(!skb->local_df && skb->len > mtu)) { + if (skb->sk && dst_allfrag(skb_dst(skb))) + sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); + skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), @@ -789,6 +791,10 @@ slow_path_clean: } slow_path: + if ((skb->ip_summed == CHECKSUM_PARTIAL) && + skb_checksum_help(skb)) + goto fail; + left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ @@ -889,7 +895,7 @@ slow_path: } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); - kfree_skb(skb); + consume_skb(skb); return err; fail: @@ -1199,7 +1205,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int copy; int err; int offset = 0; - int csummode = CHECKSUM_NONE; __u8 tx_flags = 0; if (flags&MSG_PROBE) @@ -1412,7 +1417,7 @@ alloc_new_skb: /* * Fill in the control structures */ - skb->ip_summed = csummode; + skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; /* reserve for fragmentation and ipsec header */ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + @@ -1455,7 +1460,6 @@ alloc_new_skb: transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; - csummode = CHECKSUM_NONE; /* * Put the packet on the pending queue @@ -1535,6 +1539,7 @@ error: IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } +EXPORT_SYMBOL_GPL(ip6_append_data); static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { @@ -1638,6 +1643,7 @@ error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } +EXPORT_SYMBOL_GPL(ip6_push_pending_frames); void ip6_flush_pending_frames(struct sock *sk) { @@ -1652,3 +1658,4 @@ void ip6_flush_pending_frames(struct sock *sk) ip6_cork_release(inet_sk(sk), inet6_sk(sk)); } +EXPORT_SYMBOL_GPL(ip6_flush_pending_frames); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index aa21da6a09c..c9015fad8d6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -18,6 +18,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> @@ -60,7 +62,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_NETDEV("ip6tnl0"); #ifdef IP6_TNL_DEBUG -#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) +#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) #else #define IP6_TNL_TRACE(x...) do {;} while(0) #endif @@ -198,7 +200,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) { const struct in6_addr *remote = &p->raddr; const struct in6_addr *local = &p->laddr; - unsigned h = 0; + unsigned int h = 0; int prio = 0; if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { @@ -460,19 +462,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Path to destination invalid " - "or inactive!\n", t->parms.name); + net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", + t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Too small hop limit or " - "routing loop in tunnel!\n", - t->parms.name); + net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } break; @@ -484,17 +481,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Too small encapsulation " - "limit or routing loop in " - "tunnel!\n", t->parms.name); + net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } - } else if (net_ratelimit()) { - printk(KERN_WARNING - "%s: Recipient unable to parse tunneled " - "packet!\n ", t->parms.name); + } else { + net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", + t->parms.name); } break; case ICMPV6_PKT_TOOBIG: @@ -825,7 +818,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) * 0 else **/ -static inline int +static inline bool ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) { return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); @@ -845,15 +838,12 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) - printk(KERN_WARNING - "%s xmit: Local address not yet configured!\n", - p->name); + pr_warn("%s xmit: Local address not yet configured!\n", + p->name); else if (!ipv6_addr_is_multicast(&p->raddr) && unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) - printk(KERN_WARNING - "%s xmit: Routing loop! " - "Remote address found on this node!\n", - p->name); + pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", + p->name); else ret = 1; rcu_read_unlock(); @@ -919,10 +909,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, if (tdev == dev) { stats->collisions++; - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Local routing loop detected!\n", - t->parms.name); + net_warn_ratelimited("%s: Local routing loop detected!\n", + t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); @@ -954,7 +942,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, if (skb->sk) skb_set_owner_w(new_skb, skb->sk); - kfree_skb(skb); + consume_skb(skb); skb = new_skb; } skb_dst_drop(skb); @@ -1553,13 +1541,13 @@ static int __init ip6_tunnel_init(void) err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); if (err < 0) { - printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); + pr_err("%s: can't register ip4ip6\n", __func__); goto out_ip4ip6; } err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); if (err < 0) { - printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); + pr_err("%s: can't register ip6ip6\n", __func__); goto out_ip6ip6; } @@ -1580,10 +1568,10 @@ out_pernet: static void __exit ip6_tunnel_cleanup(void) { if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) - printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n"); + pr_info("%s: can't deregister ip4ip6\n", __func__); if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) - printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); + pr_info("%s: can't deregister ip6ip6\n", __func__); unregister_pernet_device(&ip6_tnl_net_ops); } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 8110362e0af..b15dc08643a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1147,8 +1147,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, */ ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); if (ret < 0) { - if (net_ratelimit()) - printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); + net_warn_ratelimited("mroute6: pending queue full, dropping entries\n"); kfree_skb(skb); } @@ -1351,7 +1350,7 @@ int __init ip6_mr_init(void) goto reg_notif_fail; #ifdef CONFIG_IPV6_PIMSM_V2 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) { - printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n"); + pr_err("%s: can't add PIM protocol\n", __func__); err = -EAGAIN; goto add_proto_fail; } @@ -2215,14 +2214,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, rtm->rtm_src_len = 128; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; - NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + if (nla_put_u32(skb, RTA_TABLE, mrt->id)) + goto nla_put_failure; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; - NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); - NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); - + if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) || + nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp)) + goto nla_put_failure; if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index bba658d9a03..5cb75bfe45b 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -30,6 +30,9 @@ * The decompression of IP datagram MUST be done after the reassembly, * AH/ESP processing. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> @@ -69,8 +72,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n", - spi, &iph->daddr); + pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n", + spi, &iph->daddr); xfrm_state_put(x); } @@ -190,11 +193,11 @@ static const struct inet6_protocol ipcomp6_protocol = static int __init ipcomp6_init(void) { if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) { - printk(KERN_INFO "ipcomp6 init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); return -EAGAIN; } @@ -204,9 +207,9 @@ static int __init ipcomp6_init(void) static void __exit ipcomp6_fini(void) { if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) - printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipcomp6_init); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63dd1f89ed7..ba6d13d1f1e 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -678,7 +678,6 @@ done: } case MCAST_MSFILTER: { - extern int sysctl_mld_max_msf; struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) @@ -943,7 +942,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, } static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned flags) + char __user *optval, int __user *optlen, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index b2869cab209..6d0f5dc8e3a 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -606,13 +606,13 @@ done: return err; } -int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, - const struct in6_addr *src_addr) +bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, + const struct in6_addr *src_addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc; struct ip6_sf_socklist *psl; - int rv = 1; + bool rv = true; rcu_read_lock(); for_each_pmc_rcu(np, mc) { @@ -621,7 +621,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, } if (!mc) { rcu_read_unlock(); - return 1; + return true; } read_lock(&mc->sflock); psl = mc->sflist; @@ -635,9 +635,9 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, break; } if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) - rv = 0; + rv = false; if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) - rv = 0; + rv = false; } read_unlock(&mc->sflock); rcu_read_unlock(); @@ -931,15 +931,15 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) /* * identify MLD packets for MLD filter exceptions */ -int ipv6_is_mld(struct sk_buff *skb, int nexthdr) +bool ipv6_is_mld(struct sk_buff *skb, int nexthdr) { struct icmp6hdr *pic; if (nexthdr != IPPROTO_ICMPV6) - return 0; + return false; if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) - return 0; + return false; pic = icmp6_hdr(skb); @@ -948,22 +948,22 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr) case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: - return 1; + return true; default: break; } - return 0; + return false; } /* * check if the interface/address pair is valid */ -int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, - const struct in6_addr *src_addr) +bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, + const struct in6_addr *src_addr) { struct inet6_dev *idev; struct ifmcaddr6 *mc; - int rv = 0; + bool rv = false; rcu_read_lock(); idev = __in6_dev_get(dev); @@ -990,7 +990,7 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; spin_unlock_bh(&mc->mca_lock); } else - rv = 1; /* don't filter unspecified source */ + rv = true; /* don't filter unspecified source */ } read_unlock_bh(&idev->lock); } @@ -1046,8 +1046,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) } /* mark EXCLUDE-mode sources */ -static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, - const struct in6_addr *srcs) +static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, + const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; @@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) - continue; + break; if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { scount++; break; @@ -1070,12 +1070,12 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, } pmc->mca_flags &= ~MAF_GSQUERY; if (scount == nsrcs) /* all sources excluded */ - return 0; - return 1; + return false; + return true; } -static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, - const struct in6_addr *srcs) +static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, + const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; @@ -1099,10 +1099,10 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, } if (!scount) { pmc->mca_flags &= ~MAF_GSQUERY; - return 0; + return false; } pmc->mca_flags |= MAF_GSQUERY; - return 1; + return true; } /* called with rcu_read_lock() */ @@ -1276,17 +1276,17 @@ int igmp6_event_report(struct sk_buff *skb) return 0; } -static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, - int gdeleted, int sdeleted) +static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, + int gdeleted, int sdeleted) { switch (type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: if (gdeleted || sdeleted) - return 0; + return false; if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { if (pmc->mca_sfmode == MCAST_INCLUDE) - return 1; + return true; /* don't include if this source is excluded * in all filters */ @@ -1295,29 +1295,29 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; } - return 0; + return false; case MLD2_CHANGE_TO_INCLUDE: if (gdeleted || sdeleted) - return 0; + return false; return psf->sf_count[MCAST_INCLUDE] != 0; case MLD2_CHANGE_TO_EXCLUDE: if (gdeleted || sdeleted) - return 0; + return false; if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || psf->sf_count[MCAST_INCLUDE]) - return 0; + return false; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; case MLD2_ALLOW_NEW_SOURCES: if (gdeleted || !psf->sf_crcount) - return 0; + return false; return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; case MLD2_BLOCK_OLD_SOURCES: if (pmc->mca_sfmode == MCAST_INCLUDE) return gdeleted || (psf->sf_crcount && sdeleted); return psf->sf_crcount && !gdeleted && !sdeleted; } - return 0; + return false; } static int @@ -2627,8 +2627,7 @@ static int __net_init igmp6_net_init(struct net *net) err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - printk(KERN_ERR - "Failed to initialize the IGMP6 control socket (err %d).\n", + pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", err); goto out; } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 7e1e0fbfef2..5b087c31d87 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -22,6 +22,8 @@ * Masahide NAKAMURA @USAGI */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/skbuff.h> #include <linux/time.h> @@ -44,7 +46,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen) if (!data) return NULL; if (padlen == 1) { - data[0] = IPV6_TLV_PAD0; + data[0] = IPV6_TLV_PAD1; } else if (padlen > 1) { data[0] = IPV6_TLV_PADN; data[1] = padlen - 2; @@ -307,13 +309,12 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, static int mip6_destopt_init_state(struct xfrm_state *x) { if (x->id.spi) { - printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, - x->id.spi); + pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi); return -EINVAL; } if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { - printk(KERN_INFO "%s: state's mode is not %u: %u\n", - __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); + pr_info("%s: state's mode is not %u: %u\n", + __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); return -EINVAL; } @@ -443,13 +444,12 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, static int mip6_rthdr_init_state(struct xfrm_state *x) { if (x->id.spi) { - printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, - x->id.spi); + pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi); return -EINVAL; } if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { - printk(KERN_INFO "%s: state's mode is not %u: %u\n", - __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); + pr_info("%s: state's mode is not %u: %u\n", + __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); return -EINVAL; } @@ -481,18 +481,18 @@ static const struct xfrm_type mip6_rthdr_type = static int __init mip6_init(void) { - printk(KERN_INFO "Mobile IPv6\n"); + pr_info("Mobile IPv6\n"); if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { - printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__); + pr_info("%s: can't add xfrm type(destopt)\n", __func__); goto mip6_destopt_xfrm_fail; } if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { - printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__); + pr_info("%s: can't add xfrm type(rthdr)\n", __func__); goto mip6_rthdr_xfrm_fail; } if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { - printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__); + pr_info("%s: can't add rawv6 mh filter\n", __func__); goto mip6_rawv6_mh_fail; } @@ -510,11 +510,11 @@ static int __init mip6_init(void) static void __exit mip6_fini(void) { if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) - printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__); + pr_info("%s: can't remove rawv6 mh filter\n", __func__); if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) - printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__); + pr_info("%s: can't remove xfrm type(rthdr)\n", __func__); if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) - printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__); + pr_info("%s: can't remove xfrm type(destopt)\n", __func__); } module_init(mip6_init); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 176b469322a..54f62d3b8dd 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -15,6 +15,7 @@ /* * Changes: * + * Alexey I. Froloff : RFC6106 (DNSSL) support * Pierre Ynard : export userland ND options * through netlink (RDNSS support) * Lars Fenneberg : fixed MTU setting on receipt @@ -26,27 +27,7 @@ * YOSHIFUJI Hideaki @USAGI : Verify ND options properly */ -/* Set to 3 to get tracing... */ -#define ND_DEBUG 1 - -#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0) -#define ND_NOPRINTK(x...) do { ; } while(0) -#define ND_PRINTK0 ND_PRINTK -#define ND_PRINTK1 ND_NOPRINTK -#define ND_PRINTK2 ND_NOPRINTK -#define ND_PRINTK3 ND_NOPRINTK -#if ND_DEBUG >= 1 -#undef ND_PRINTK1 -#define ND_PRINTK1 ND_PRINTK -#endif -#if ND_DEBUG >= 2 -#undef ND_PRINTK2 -#define ND_PRINTK2 ND_PRINTK -#endif -#if ND_DEBUG >= 3 -#undef ND_PRINTK3 -#define ND_PRINTK3 ND_PRINTK -#endif +#define pr_fmt(fmt) "ICMPv6: " fmt #include <linux/module.h> #include <linux/errno.h> @@ -91,6 +72,15 @@ #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> +/* Set to 3 to get tracing... */ +#define ND_DEBUG 1 + +#define ND_PRINTK(val, level, fmt, ...) \ +do { \ + if (val <= ND_DEBUG) \ + net_##level##_ratelimited(fmt, ##__VA_ARGS__); \ +} while (0) + static u32 ndisc_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); @@ -228,7 +218,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS; + return opt->nd_opt_type == ND_OPT_RDNSS || + opt->nd_opt_type == ND_OPT_DNSSL; } static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, @@ -263,10 +254,9 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, case ND_OPT_MTU: case ND_OPT_REDIRECT_HDR: if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { - ND_PRINTK2(KERN_WARNING - "%s(): duplicated ND6 option found: type=%d\n", - __func__, - nd_opt->nd_opt_type); + ND_PRINTK(2, warn, + "%s: duplicated ND6 option found: type=%d\n", + __func__, nd_opt->nd_opt_type); } else { ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; } @@ -294,10 +284,11 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, * to accommodate future extension to the * protocol. */ - ND_PRINTK2(KERN_NOTICE - "%s(): ignored unsupported option; type=%d, len=%d\n", - __func__, - nd_opt->nd_opt_type, nd_opt->nd_opt_len); + ND_PRINTK(2, notice, + "%s: ignored unsupported option; type=%d, len=%d\n", + __func__, + nd_opt->nd_opt_type, + nd_opt->nd_opt_len); } } opt_len -= l; @@ -325,9 +316,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, case ARPHRD_FDDI: ipv6_eth_mc_map(addr, buf); return 0; - case ARPHRD_IEEE802_TR: - ipv6_tr_mc_map(addr,buf); - return 0; case ARPHRD_ARCNET: ipv6_arcnet_mc_map(addr, buf); return 0; @@ -360,7 +348,7 @@ static int ndisc_constructor(struct neighbour *neigh) struct net_device *dev = neigh->dev; struct inet6_dev *in6_dev; struct neigh_parms *parms; - int is_multicast = ipv6_addr_is_multicast(addr); + bool is_multicast = ipv6_addr_is_multicast(addr); in6_dev = in6_dev_get(dev); if (in6_dev == NULL) { @@ -456,9 +444,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, len + hlen + tlen), 1, &err); if (!skb) { - ND_PRINTK0(KERN_ERR - "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", - __func__, err); + ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n", + __func__, err); return NULL; } @@ -694,8 +681,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { - ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n", - __func__, target); + ND_PRINTK(1, dbg, + "%s: trying to ucast probe in NUD_INVALID: %pI6\n", + __func__, target); } ndisc_send_ns(dev, neigh, target, target, saddr); } else if ((probes -= neigh->parms->app_probes) < 0) { @@ -737,12 +725,11 @@ static void ndisc_recv_ns(struct sk_buff *skb) struct inet6_dev *idev = NULL; struct neighbour *neigh; int dad = ipv6_addr_any(saddr); - int inc; + bool inc; int is_router = -1; if (ipv6_addr_is_multicast(&msg->target)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: multicast target address"); + ND_PRINTK(2, warn, "NS: multicast target address\n"); return; } @@ -755,22 +742,20 @@ static void ndisc_recv_ns(struct sk_buff *skb) daddr->s6_addr32[1] == htonl(0x00000000) && daddr->s6_addr32[2] == htonl(0x00000001) && daddr->s6_addr [12] == 0xff )) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: bad DAD packet (wrong destination)\n"); + ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid ND options\n"); + ND_PRINTK(2, warn, "NS: invalid ND options\n"); return; } if (ndopts.nd_opts_src_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "NS: invalid link-layer address length\n"); return; } @@ -780,8 +765,8 @@ static void ndisc_recv_ns(struct sk_buff *skb) * in the message. */ if (dad) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: bad DAD packet (link-layer address option)\n"); + ND_PRINTK(2, warn, + "NS: bad DAD packet (link-layer address option)\n"); return; } } @@ -793,20 +778,6 @@ static void ndisc_recv_ns(struct sk_buff *skb) if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { if (dad) { - if (dev->type == ARPHRD_IEEE802_TR) { - const unsigned char *sadr; - sadr = skb_mac_header(skb); - if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && - sadr[9] == dev->dev_addr[1] && - sadr[10] == dev->dev_addr[2] && - sadr[11] == dev->dev_addr[3] && - sadr[12] == dev->dev_addr[4] && - sadr[13] == dev->dev_addr[5]) { - /* looped-back to us */ - goto out; - } - } - /* * We are colliding with another node * who is doing DAD @@ -913,34 +884,30 @@ static void ndisc_recv_na(struct sk_buff *skb) struct neighbour *neigh; if (skb->len < sizeof(struct nd_msg)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: packet too short\n"); + ND_PRINTK(2, warn, "NA: packet too short\n"); return; } if (ipv6_addr_is_multicast(&msg->target)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: target address is multicast.\n"); + ND_PRINTK(2, warn, "NA: target address is multicast\n"); return; } if (ipv6_addr_is_multicast(daddr) && msg->icmph.icmp6_solicited) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: solicited NA is multicasted.\n"); + ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid ND option\n"); + ND_PRINTK(2, warn, "NS: invalid ND option\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "NA: invalid link-layer address length\n"); return; } } @@ -961,9 +928,9 @@ static void ndisc_recv_na(struct sk_buff *skb) unsolicited advertisement. */ if (skb->pkt_type != PACKET_LOOPBACK) - ND_PRINTK1(KERN_WARNING - "ICMPv6 NA: someone advertises our address %pI6 on %s!\n", - &ifp->addr, ifp->idev->dev->name); + ND_PRINTK(1, warn, + "NA: someone advertises our address %pI6 on %s!\n", + &ifp->addr, ifp->idev->dev->name); in6_ifa_put(ifp); return; } @@ -1025,8 +992,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) idev = __in6_dev_get(skb->dev); if (!idev) { - if (net_ratelimit()) - ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); + ND_PRINTK(1, err, "RS: can't find in6 device\n"); return; } @@ -1043,8 +1009,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) /* Parse ND options */ if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) { - if (net_ratelimit()) - ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n"); + ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n"); goto out; } @@ -1099,8 +1064,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); - NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), - &ipv6_hdr(ra)->saddr); + if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), + &ipv6_hdr(ra)->saddr)) + goto nla_put_failure; nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); @@ -1141,20 +1107,17 @@ static void ndisc_router_discovery(struct sk_buff *skb) optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: source address is not link-local.\n"); + ND_PRINTK(2, warn, "RA: source address is not link-local\n"); return; } if (optlen < 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: packet too short\n"); + ND_PRINTK(2, warn, "RA: packet too short\n"); return; } #ifdef CONFIG_IPV6_NDISC_NODETYPE if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: from host or unauthorized router\n"); + ND_PRINTK(2, warn, "RA: from host or unauthorized router\n"); return; } #endif @@ -1165,15 +1128,13 @@ static void ndisc_router_discovery(struct sk_buff *skb) in6_dev = __in6_dev_get(skb->dev); if (in6_dev == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: can't find inet6 device for %s.\n", - skb->dev->name); + ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n", + skb->dev->name); return; } if (!ndisc_parse_options(opt, optlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMP6 RA: invalid ND options\n"); + ND_PRINTK(2, warn, "RA: invalid ND options\n"); return; } @@ -1226,9 +1187,9 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (rt) { neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (!neigh) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() got default router without neighbour.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s got default router without neighbour\n", + __func__); dst_release(&rt->dst); return; } @@ -1239,22 +1200,21 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (rt == NULL && lifetime) { - ND_PRINTK3(KERN_DEBUG - "ICMPv6 RA: adding default router.\n"); + ND_PRINTK(3, dbg, "RA: adding default router\n"); rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); if (rt == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() failed to add default route.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s failed to add default route\n", + __func__); return; } neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (neigh == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() got default router without neighbour.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s got default router without neighbour\n", + __func__); dst_release(&rt->dst); return; } @@ -1322,8 +1282,8 @@ skip_linkparms: lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, skb->dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "RA: invalid link-layer address length\n"); goto out; } } @@ -1387,9 +1347,7 @@ skip_routeinfo: mtu = ntohl(n); if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid mtu: %d\n", - mtu); + ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu); } else if (in6_dev->cnf.mtu6 != mtu) { in6_dev->cnf.mtu6 = mtu; @@ -1410,8 +1368,7 @@ skip_routeinfo: } if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid RA options"); + ND_PRINTK(2, warn, "RA: invalid RA options\n"); } out: if (rt) @@ -1436,15 +1393,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) switch (skb->ndisc_nodetype) { case NDISC_NODETYPE_HOST: case NDISC_NODETYPE_NODEFAULT: - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: from host or unauthorized router\n"); + ND_PRINTK(2, warn, + "Redirect: from host or unauthorized router\n"); return; } #endif if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: source address is not link-local.\n"); + ND_PRINTK(2, warn, + "Redirect: source address is not link-local\n"); return; } @@ -1452,8 +1409,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); if (optlen < 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: packet too short\n"); + ND_PRINTK(2, warn, "Redirect: packet too short\n"); return; } @@ -1462,8 +1418,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) dest = target + 1; if (ipv6_addr_is_multicast(dest)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: destination address is multicast.\n"); + ND_PRINTK(2, warn, + "Redirect: destination address is multicast\n"); return; } @@ -1471,8 +1427,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) on_link = 1; } else if (ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + ND_PRINTK(2, warn, + "Redirect: target address is not link-local unicast\n"); return; } @@ -1488,16 +1444,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) */ if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: invalid ND options\n"); + ND_PRINTK(2, warn, "Redirect: invalid ND options\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "Redirect: invalid link-layer address length\n"); return; } } @@ -1532,16 +1487,15 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: no link-local address on %s\n", - dev->name); + ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", + dev->name); return; } if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + ND_PRINTK(2, warn, + "Redirect: target address is not link-local unicast\n"); return; } @@ -1560,8 +1514,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: destination is not a neighbour.\n"); + ND_PRINTK(2, warn, + "Redirect: destination is not a neighbour\n"); goto release; } if (!rt->rt6i_peer) @@ -1572,8 +1526,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) if (dev->addr_len) { struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target); if (!neigh) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: no neigh for target address\n"); + ND_PRINTK(2, warn, + "Redirect: no neigh for target address\n"); goto release; } @@ -1601,9 +1555,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) len + hlen + tlen), 1, &err); if (buff == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n", - __func__, err); + ND_PRINTK(0, err, + "Redirect: %s failed to allocate an skb, err=%d\n", + __func__, err); goto release; } @@ -1688,16 +1642,14 @@ int ndisc_rcv(struct sk_buff *skb) __skb_push(skb, skb->data - skb_transport_header(skb)); if (ipv6_hdr(skb)->hop_limit != 255) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NDISC: invalid hop-limit: %d\n", - ipv6_hdr(skb)->hop_limit); + ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n", + ipv6_hdr(skb)->hop_limit); return 0; } if (msg->icmph.icmp6_code != 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NDISC: invalid ICMPv6 code: %d\n", - msg->icmph.icmp6_code); + ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n", + msg->icmph.icmp6_code); return 0; } @@ -1764,11 +1716,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl, static int warned; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); - printk(KERN_WARNING - "process `%s' is using deprecated sysctl (%s) " - "net.ipv6.neigh.%s.%s; " - "Use net.ipv6.neigh.%s.%s_ms " - "instead.\n", + pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n", warncomm, func, dev_name, ctl->procname, dev_name, ctl->procname); @@ -1822,9 +1770,9 @@ static int __net_init ndisc_net_init(struct net *net) err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - ND_PRINTK0(KERN_ERR - "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", - err); + ND_PRINTK(0, err, + "NDISC: Failed to initialize the control socket (err %d)\n", + err); return err; } diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index d33cddd16fb..10135342799 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -25,28 +25,6 @@ config NF_CONNTRACK_IPV6 To compile it as a module, choose M here. If unsure, say N. -config IP6_NF_QUEUE - tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)" - depends on INET && IPV6 && NETFILTER - depends on NETFILTER_ADVANCED - ---help--- - - This option adds a queue handler to the kernel for IPv6 - packets which enables users to receive the filtered packets - with QUEUE target using libipq. - - This option enables the old IPv6-only "ip6_queue" implementation - which has been obsoleted by the new "nfnetlink_queue" code (see - CONFIG_NETFILTER_NETLINK_QUEUE). - - (C) Fernando Anton 2001 - IPv64 Project - Work based in IPv64 draft by Arturo Azcorra. - Universidad Carlos III de Madrid - Universidad Politecnica de Alcala de Henares - email: <fanton@it.uc3m.es>. - - To compile it as a module, choose M here. If unsure, say N. - config IP6_NF_IPTABLES tristate "IP6 tables support (required for filtering)" depends on INET && IPV6 diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index d4dfd0a2109..534d3f216f7 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c deleted file mode 100644 index a34c9e4c792..00000000000 --- a/net/ipv6/netfilter/ip6_queue.c +++ /dev/null @@ -1,641 +0,0 @@ -/* - * This is a module which is used for queueing IPv6 packets and - * communicating with userspace via netlink. - * - * (C) 2001 Fernando Anton, this code is GPL. - * IPv64 Project - Work based in IPv64 draft by Arturo Azcorra. - * Universidad Carlos III de Madrid - Leganes (Madrid) - Spain - * Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain - * email: fanton@it.uc3m.es - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/module.h> -#include <linux/skbuff.h> -#include <linux/init.h> -#include <linux/ipv6.h> -#include <linux/notifier.h> -#include <linux/netdevice.h> -#include <linux/netfilter.h> -#include <linux/netlink.h> -#include <linux/spinlock.h> -#include <linux/sysctl.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <net/net_namespace.h> -#include <net/sock.h> -#include <net/ipv6.h> -#include <net/ip6_route.h> -#include <net/netfilter/nf_queue.h> -#include <linux/netfilter_ipv4/ip_queue.h> -#include <linux/netfilter_ipv4/ip_tables.h> -#include <linux/netfilter_ipv6/ip6_tables.h> - -#define IPQ_QMAX_DEFAULT 1024 -#define IPQ_PROC_FS_NAME "ip6_queue" -#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen" - -typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); - -static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; -static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_SPINLOCK(queue_lock); -static int peer_pid __read_mostly; -static unsigned int copy_range __read_mostly; -static unsigned int queue_total; -static unsigned int queue_dropped = 0; -static unsigned int queue_user_dropped = 0; -static struct sock *ipqnl __read_mostly; -static LIST_HEAD(queue_list); -static DEFINE_MUTEX(ipqnl_mutex); - -static inline void -__ipq_enqueue_entry(struct nf_queue_entry *entry) -{ - list_add_tail(&entry->list, &queue_list); - queue_total++; -} - -static inline int -__ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status = 0; - - switch(mode) { - case IPQ_COPY_NONE: - case IPQ_COPY_META: - copy_mode = mode; - copy_range = 0; - break; - - case IPQ_COPY_PACKET: - if (range > 0xFFFF) - range = 0xFFFF; - copy_range = range; - copy_mode = mode; - break; - - default: - status = -EINVAL; - - } - return status; -} - -static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data); - -static inline void -__ipq_reset(void) -{ - peer_pid = 0; - net_disable_timestamp(); - __ipq_set_mode(IPQ_COPY_NONE, 0); - __ipq_flush(NULL, 0); -} - -static struct nf_queue_entry * -ipq_find_dequeue_entry(unsigned long id) -{ - struct nf_queue_entry *entry = NULL, *i; - - spin_lock_bh(&queue_lock); - - list_for_each_entry(i, &queue_list, list) { - if ((unsigned long)i == id) { - entry = i; - break; - } - } - - if (entry) { - list_del(&entry->list); - queue_total--; - } - - spin_unlock_bh(&queue_lock); - return entry; -} - -static void -__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - struct nf_queue_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &queue_list, list) { - if (!cmpfn || cmpfn(entry, data)) { - list_del(&entry->list); - queue_total--; - nf_reinject(entry, NF_DROP); - } - } -} - -static void -ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - spin_lock_bh(&queue_lock); - __ipq_flush(cmpfn, data); - spin_unlock_bh(&queue_lock); -} - -static struct sk_buff * -ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) -{ - sk_buff_data_t old_tail; - size_t size = 0; - size_t data_len = 0; - struct sk_buff *skb; - struct ipq_packet_msg *pmsg; - struct nlmsghdr *nlh; - struct timeval tv; - - switch (ACCESS_ONCE(copy_mode)) { - case IPQ_COPY_META: - case IPQ_COPY_NONE: - size = NLMSG_SPACE(sizeof(*pmsg)); - break; - - case IPQ_COPY_PACKET: - if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) - return NULL; - - data_len = ACCESS_ONCE(copy_range); - if (data_len == 0 || data_len > entry->skb->len) - data_len = entry->skb->len; - - size = NLMSG_SPACE(sizeof(*pmsg) + data_len); - break; - - default: - *errp = -EINVAL; - return NULL; - } - - skb = alloc_skb(size, GFP_ATOMIC); - if (!skb) - goto nlmsg_failure; - - old_tail = skb->tail; - nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); - pmsg = NLMSG_DATA(nlh); - memset(pmsg, 0, sizeof(*pmsg)); - - pmsg->packet_id = (unsigned long )entry; - pmsg->data_len = data_len; - tv = ktime_to_timeval(entry->skb->tstamp); - pmsg->timestamp_sec = tv.tv_sec; - pmsg->timestamp_usec = tv.tv_usec; - pmsg->mark = entry->skb->mark; - pmsg->hook = entry->hook; - pmsg->hw_protocol = entry->skb->protocol; - - if (entry->indev) - strcpy(pmsg->indev_name, entry->indev->name); - else - pmsg->indev_name[0] = '\0'; - - if (entry->outdev) - strcpy(pmsg->outdev_name, entry->outdev->name); - else - pmsg->outdev_name[0] = '\0'; - - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { - pmsg->hw_type = entry->skb->dev->type; - pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); - } - - if (data_len) - if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) - BUG(); - - nlh->nlmsg_len = skb->tail - old_tail; - return skb; - -nlmsg_failure: - kfree_skb(skb); - *errp = -EINVAL; - printk(KERN_ERR "ip6_queue: error creating packet message\n"); - return NULL; -} - -static int -ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) -{ - int status = -EINVAL; - struct sk_buff *nskb; - - if (copy_mode == IPQ_COPY_NONE) - return -EAGAIN; - - nskb = ipq_build_packet_message(entry, &status); - if (nskb == NULL) - return status; - - spin_lock_bh(&queue_lock); - - if (!peer_pid) - goto err_out_free_nskb; - - if (queue_total >= queue_maxlen) { - queue_dropped++; - status = -ENOSPC; - if (net_ratelimit()) - printk (KERN_WARNING "ip6_queue: fill at %d entries, " - "dropping packet(s). Dropped: %d\n", queue_total, - queue_dropped); - goto err_out_free_nskb; - } - - /* netlink_unicast will either free the nskb or attach it to a socket */ - status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); - if (status < 0) { - queue_user_dropped++; - goto err_out_unlock; - } - - __ipq_enqueue_entry(entry); - - spin_unlock_bh(&queue_lock); - return status; - -err_out_free_nskb: - kfree_skb(nskb); - -err_out_unlock: - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e) -{ - int diff; - struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload; - struct sk_buff *nskb; - - if (v->data_len < sizeof(*user_iph)) - return 0; - diff = v->data_len - e->skb->len; - if (diff < 0) { - if (pskb_trim(e->skb, v->data_len)) - return -ENOMEM; - } else if (diff > 0) { - if (v->data_len > 0xFFFF) - return -EINVAL; - if (diff > skb_tailroom(e->skb)) { - nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), - diff, GFP_ATOMIC); - if (!nskb) { - printk(KERN_WARNING "ip6_queue: OOM " - "in mangle, dropping packet\n"); - return -ENOMEM; - } - kfree_skb(e->skb); - e->skb = nskb; - } - skb_put(e->skb, diff); - } - if (!skb_make_writable(e->skb, v->data_len)) - return -ENOMEM; - skb_copy_to_linear_data(e->skb, v->payload, v->data_len); - e->skb->ip_summed = CHECKSUM_NONE; - - return 0; -} - -static int -ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) -{ - struct nf_queue_entry *entry; - - if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) - return -EINVAL; - - entry = ipq_find_dequeue_entry(vmsg->id); - if (entry == NULL) - return -ENOENT; - else { - int verdict = vmsg->value; - - if (vmsg->data_len && vmsg->data_len == len) - if (ipq_mangle_ipv6(vmsg, entry) < 0) - verdict = NF_DROP; - - nf_reinject(entry, verdict); - return 0; - } -} - -static int -ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status; - - spin_lock_bh(&queue_lock); - status = __ipq_set_mode(mode, range); - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_receive_peer(struct ipq_peer_msg *pmsg, - unsigned char type, unsigned int len) -{ - int status = 0; - - if (len < sizeof(*pmsg)) - return -EINVAL; - - switch (type) { - case IPQM_MODE: - status = ipq_set_mode(pmsg->msg.mode.value, - pmsg->msg.mode.range); - break; - - case IPQM_VERDICT: - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; - default: - status = -EINVAL; - } - return status; -} - -static int -dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) -{ - if (entry->indev) - if (entry->indev->ifindex == ifindex) - return 1; - - if (entry->outdev) - if (entry->outdev->ifindex == ifindex) - return 1; -#ifdef CONFIG_BRIDGE_NETFILTER - if (entry->skb->nf_bridge) { - if (entry->skb->nf_bridge->physindev && - entry->skb->nf_bridge->physindev->ifindex == ifindex) - return 1; - if (entry->skb->nf_bridge->physoutdev && - entry->skb->nf_bridge->physoutdev->ifindex == ifindex) - return 1; - } -#endif - return 0; -} - -static void -ipq_dev_drop(int ifindex) -{ - ipq_flush(dev_cmp, ifindex); -} - -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) - -static inline void -__ipq_rcv_skb(struct sk_buff *skb) -{ - int status, type, pid, flags; - unsigned int nlmsglen, skblen; - struct nlmsghdr *nlh; - bool enable_timestamp = false; - - skblen = skb->len; - if (skblen < sizeof(*nlh)) - return; - - nlh = nlmsg_hdr(skb); - nlmsglen = nlh->nlmsg_len; - if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) - return; - - pid = nlh->nlmsg_pid; - flags = nlh->nlmsg_flags; - - if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) - RCV_SKB_FAIL(-EINVAL); - - if (flags & MSG_TRUNC) - RCV_SKB_FAIL(-ECOMM); - - type = nlh->nlmsg_type; - if (type < NLMSG_NOOP || type >= IPQM_MAX) - RCV_SKB_FAIL(-EINVAL); - - if (type <= IPQM_BASE) - return; - - if (!capable(CAP_NET_ADMIN)) - RCV_SKB_FAIL(-EPERM); - - spin_lock_bh(&queue_lock); - - if (peer_pid) { - if (peer_pid != pid) { - spin_unlock_bh(&queue_lock); - RCV_SKB_FAIL(-EBUSY); - } - } else { - enable_timestamp = true; - peer_pid = pid; - } - - spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); - - status = ipq_receive_peer(NLMSG_DATA(nlh), type, - nlmsglen - NLMSG_LENGTH(0)); - if (status < 0) - RCV_SKB_FAIL(status); - - if (flags & NLM_F_ACK) - netlink_ack(skb, nlh, 0); -} - -static void -ipq_rcv_skb(struct sk_buff *skb) -{ - mutex_lock(&ipqnl_mutex); - __ipq_rcv_skb(skb); - mutex_unlock(&ipqnl_mutex); -} - -static int -ipq_rcv_dev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *dev = ptr; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - /* Drop any packets associated with the downed device */ - if (event == NETDEV_DOWN) - ipq_dev_drop(dev->ifindex); - return NOTIFY_DONE; -} - -static struct notifier_block ipq_dev_notifier = { - .notifier_call = ipq_rcv_dev_event, -}; - -static int -ipq_rcv_nl_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netlink_notify *n = ptr; - - if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { - spin_lock_bh(&queue_lock); - if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) - __ipq_reset(); - spin_unlock_bh(&queue_lock); - } - return NOTIFY_DONE; -} - -static struct notifier_block ipq_nl_notifier = { - .notifier_call = ipq_rcv_nl_event, -}; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *ipq_sysctl_header; - -static ctl_table ipq_table[] = { - { - .procname = NET_IPQ_QMAX_NAME, - .data = &queue_maxlen, - .maxlen = sizeof(queue_maxlen), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { } -}; -#endif - -#ifdef CONFIG_PROC_FS -static int ip6_queue_show(struct seq_file *m, void *v) -{ - spin_lock_bh(&queue_lock); - - seq_printf(m, - "Peer PID : %d\n" - "Copy mode : %hu\n" - "Copy range : %u\n" - "Queue length : %u\n" - "Queue max. length : %u\n" - "Queue dropped : %u\n" - "Netfilter dropped : %u\n", - peer_pid, - copy_mode, - copy_range, - queue_total, - queue_maxlen, - queue_dropped, - queue_user_dropped); - - spin_unlock_bh(&queue_lock); - return 0; -} - -static int ip6_queue_open(struct inode *inode, struct file *file) -{ - return single_open(file, ip6_queue_show, NULL); -} - -static const struct file_operations ip6_queue_proc_fops = { - .open = ip6_queue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; -#endif - -static const struct nf_queue_handler nfqh = { - .name = "ip6_queue", - .outfn = &ipq_enqueue_packet, -}; - -static int __init ip6_queue_init(void) -{ - int status = -ENOMEM; - struct proc_dir_entry *proc __maybe_unused; - - netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0, - ipq_rcv_skb, NULL, THIS_MODULE); - if (ipqnl == NULL) { - printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); - goto cleanup_netlink_notifier; - } - -#ifdef CONFIG_PROC_FS - proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net, - &ip6_queue_proc_fops); - if (!proc) { - printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); - goto cleanup_ipqnl; - } -#endif - register_netdevice_notifier(&ipq_dev_notifier); -#ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table); -#endif - status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh); - if (status < 0) { - printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); - goto cleanup_sysctl; - } - return status; - -cleanup_sysctl: -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - -cleanup_ipqnl: __maybe_unused - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - -cleanup_netlink_notifier: - netlink_unregister_notifier(&ipq_nl_notifier); - return status; -} - -static void __exit ip6_queue_fini(void) -{ - nf_unregister_queue_handlers(&nfqh); - - ipq_flush(NULL, 0); - -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - - netlink_unregister_notifier(&ipq_nl_notifier); -} - -MODULE_DESCRIPTION("IPv6 packet queue handler"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW); - -module_init(ip6_queue_init); -module_exit(ip6_queue_fini); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9d4e1555931..d7cb04506c3 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -133,7 +133,7 @@ ip6_packet_match(const struct sk_buff *skb, int protohdr; unsigned short _frag_off; - protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); + protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; @@ -181,8 +181,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6) static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_info("error: `%s'\n", (const char *)par->targinfo); + net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } @@ -362,6 +361,7 @@ ip6t_do_table(struct sk_buff *skb, const struct xt_entry_match *ematch; IP_NF_ASSERT(e); + acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: @@ -396,7 +396,7 @@ ip6t_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } if (*stackptr <= origptr) @@ -2278,6 +2278,10 @@ static void __exit ip6_tables_fini(void) * if target < 0. "last header" is transport protocol header, ESP, or * "No next header". * + * Note that *offset is used as input/output parameter. an if it is not zero, + * then it must be a valid offset to an inner IPv6 header. This can be used + * to explore inner IPv6 header, eg. ICMPv6 error messages. + * * If target header is found, its offset is set in *offset and return protocol * number. Otherwise, return -1. * @@ -2289,17 +2293,33 @@ static void __exit ip6_tables_fini(void) * *offset is meaningless and fragment offset is stored in *fragoff if fragoff * isn't NULL. * + * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG + * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and + * target < 0, then this function will stop at the AH header. */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff) + int target, unsigned short *fragoff, int *flags) { unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); u8 nexthdr = ipv6_hdr(skb)->nexthdr; - unsigned int len = skb->len - start; + unsigned int len; if (fragoff) *fragoff = 0; + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + while (nexthdr != target) { struct ipv6_opt_hdr _hdr, *hp; unsigned int hdrlen; @@ -2316,6 +2336,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, if (nexthdr == NEXTHDR_FRAGMENT) { unsigned short _frag_off; __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= IP6T_FH_F_FRAG; fp = skb_header_pointer(skb, start+offsetof(struct frag_hdr, frag_off), @@ -2336,9 +2359,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, return -ENOENT; } hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0)) + break; hdrlen = (hp->hdrlen + 2) << 2; - else + } else hdrlen = ipv6_optlen(hp); nexthdr = hp->nexthdr; diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index aad2fa41cf4..fd4fb34c51c 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -114,8 +114,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) GFP_ATOMIC); if (!nskb) { - if (net_ratelimit()) - pr_debug("cannot alloc skb\n"); + net_dbg_ratelimited("cannot alloc skb\n"); dst_release(dst); return; } @@ -210,8 +209,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par) send_reset(net, skb); break; default: - if (net_ratelimit()) - pr_info("case %u not handled yet\n", reject->with); + net_info_ratelimited("case %u not handled yet\n", reject->with); break; } diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index 89cccc5a9c9..04099ab7d2e 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c @@ -41,11 +41,11 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par) struct ip_auth_hdr _ah; const struct ip_auth_hdr *ah; const struct ip6t_ah *ahinfo = par->matchinfo; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index eda898fda6c..3b5735e56bf 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c @@ -40,10 +40,10 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) struct frag_hdr _frag; const struct frag_hdr *fh; const struct ip6t_frag *fraginfo = par->matchinfo; - unsigned int ptr; + unsigned int ptr = 0; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 59df051eaef..01df142bb02 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -50,7 +50,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) const struct ipv6_opt_hdr *oh; const struct ip6t_opts *optinfo = par->matchinfo; unsigned int temp; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; bool ret = false; u8 _opttype; @@ -62,7 +62,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) err = ipv6_find_hdr(skb, &ptr, (par->match == &hbh_mt6_reg[0]) ? - NEXTHDR_HOP : NEXTHDR_DEST, NULL); + NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index d8488c50a8e..2c99b94eeca 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -42,14 +42,14 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) const struct ipv6_rt_hdr *rh; const struct ip6t_rt *rtinfo = par->matchinfo; unsigned int temp; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; bool ret = false; struct in6_addr _addr; const struct in6_addr *ap; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 00d19173db7..4d782405f12 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -42,8 +42,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) /* root is playing with raw sockets. */ if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) { - if (net_ratelimit()) - pr_warning("ip6t_hook: happy cracking.\n"); + net_warn_ratelimited("ip6t_hook: happy cracking\n"); return NF_ACCEPT; } #endif diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 4111050a9fc..3224ef90a21 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -232,8 +232,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum, { /* root is playing with raw sockets. */ if (skb->len < sizeof(struct ipv6hdr)) { - if (net_ratelimit()) - pr_notice("ipv6_conntrack_local: packet too short\n"); + net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); return NF_ACCEPT; } return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); @@ -278,10 +277,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { static int ipv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, - &tuple->src.u3.ip6); - NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, - &tuple->dst.u3.ip6); + if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, + &tuple->src.u3.ip6) || + nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, + &tuple->dst.u3.ip6)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 92cc9f2931a..3e81904fbbc 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -234,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, static int icmpv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id); - NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type); - NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code); - + if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) || + nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) || + nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code)) + goto nla_put_failure; return 0; nla_put_failure: @@ -300,8 +300,8 @@ icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)); - + if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 38f00b0298d..c9c78c2e666 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -444,12 +444,11 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) return head; out_oversize: - if (net_ratelimit()) - printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len); + net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n", + payload_len); goto out_fail; out_oom: - if (net_ratelimit()) - printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n"); + net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n"); out_fail: return NULL; } @@ -626,8 +625,8 @@ int nf_ct_frag6_init(void) inet_frags_init(&nf_frags); #ifdef CONFIG_SYSCTL - nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, - nf_ct_frag6_sysctl_table); + nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter", + nf_ct_frag6_sysctl_table); if (!nf_ct_frag6_sysctl_header) { inet_frags_fini(&nf_frags); return -ENOMEM; @@ -640,7 +639,7 @@ int nf_ct_frag6_init(void) void nf_ct_frag6_cleanup(void) { #ifdef CONFIG_SYSCTL - unregister_sysctl_table(nf_ct_frag6_sysctl_header); + unregister_net_sysctl_table(nf_ct_frag6_sysctl_header); nf_ct_frag6_sysctl_header = NULL; #endif inet_frags_fini(&nf_frags); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5bddea77884..93d69836fde 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, const struct in6_addr *rmt_addr, int dif) { struct hlist_node *node; - int is_multicast = ipv6_addr_is_multicast(loc_addr); + bool is_multicast = ipv6_addr_is_multicast(loc_addr); sk_for_each_from(sk, node) if (inet_sk(sk)->inet_num == num) { @@ -153,12 +153,12 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister); * * Caller owns SKB so we must make clones. */ -static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) +static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) { const struct in6_addr *saddr; const struct in6_addr *daddr; struct sock *sk; - int delivered = 0; + bool delivered = false; __u8 hash; struct net *net; @@ -179,7 +179,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) while (sk) { int filtered; - delivered = 1; + delivered = true; switch (nexthdr) { case IPPROTO_ICMPV6: filtered = icmpv6_filter(sk, skb); @@ -225,7 +225,7 @@ out: return delivered; } -int raw6_local_deliver(struct sk_buff *skb, int nexthdr) +bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 9447bd69873..4ff9af628e7 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -134,15 +134,16 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q) return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); } -int ip6_frag_match(struct inet_frag_queue *q, void *a) +bool ip6_frag_match(struct inet_frag_queue *q, void *a) { struct frag_queue *fq; struct ip6_create_arg *arg = a; fq = container_of(q, struct frag_queue, q); - return (fq->id == arg->id && fq->user == arg->user && - ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst)); + return fq->id == arg->id && + fq->user == arg->user && + ipv6_addr_equal(&fq->saddr, arg->src) && + ipv6_addr_equal(&fq->daddr, arg->dst); } EXPORT_SYMBOL(ip6_frag_match); @@ -414,6 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; + int sum_truesize; fq_kill(fq); @@ -433,7 +435,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; - kfree_skb(fq->q.fragments); + consume_skb(fq->q.fragments); fq->q.fragments = head; } @@ -483,20 +485,33 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); - skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; + sum_truesize = head->truesize; + for (fp = head->next; fp;) { + bool headstolen; + int delta; + struct sk_buff *next = fp->next; + + sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + + if (skb_try_coalesce(head, fp, &headstolen, &delta)) { + kfree_skb_partial(fp, headstolen); + } else { + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = fp; + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + } + fp = next; } - atomic_sub(head->truesize, &fq->q.net->mem); + atomic_sub(sum_truesize, &fq->q.net->mem); head->next = NULL; head->dev = dev; @@ -518,12 +533,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, return 1; out_oversize: - if (net_ratelimit()) - printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); + net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: - if (net_ratelimit()) - printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); + net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); @@ -646,7 +659,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) table[2].data = &net->ipv6.frags.timeout; } - hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); + hdr = register_net_sysctl(net, "net/ipv6", table); if (hdr == NULL) goto err_reg; @@ -674,7 +687,7 @@ static struct ctl_table_header *ip6_ctl_header; static int ip6_frags_sysctl_register(void) { - ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, + ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6", ip6_frags_ctl_table); return ip6_ctl_header == NULL ? -ENOMEM : 0; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index bc4888d902b..999a982ad3f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -24,6 +24,8 @@ * Fixed routing subtrees. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/capability.h> #include <linux/errno.h> #include <linux/export.h> @@ -82,7 +84,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex, - unsigned pref); + unsigned int pref); static struct rt6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex); @@ -331,22 +333,22 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, } } -static __inline__ int rt6_check_expired(const struct rt6_info *rt) +static bool rt6_check_expired(const struct rt6_info *rt) { struct rt6_info *ort = NULL; if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) - return 1; + return true; } else if (rt->dst.from) { ort = (struct rt6_info *) rt->dst.from; return (ort->rt6i_flags & RTF_EXPIRES) && time_after(jiffies, ort->dst.expires); } - return 0; + return false; } -static inline int rt6_need_strict(const struct in6_addr *daddr) +static bool rt6_need_strict(const struct in6_addr *daddr) { return ipv6_addr_type(daddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); @@ -794,9 +796,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, goto retry; } - if (net_ratelimit()) - printk(KERN_WARNING - "ipv6: Neighbour table overflow.\n"); + net_warn_ratelimited("Neighbour table overflow\n"); dst_free(&rt->dst); return NULL; } @@ -1282,7 +1282,7 @@ int ip6_route_add(struct fib6_config *cfg) !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { table = fib6_get_table(net, cfg->fc_table); if (!table) { - printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n"); + pr_warn("NLM_F_CREATE should be specified when creating new route\n"); table = fib6_new_table(net, cfg->fc_table); } } else { @@ -1643,9 +1643,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, rt = ip6_route_redirect(dest, src, saddr, neigh->dev); if (rt == net->ipv6.ip6_null_entry) { - if (net_ratelimit()) - printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " - "for redirect target\n"); + net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); goto out; } @@ -1887,7 +1885,7 @@ out: static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex, - unsigned pref) + unsigned int pref) { struct fib6_config cfg = { .fc_table = RT6_TABLE_INFO, @@ -2106,9 +2104,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, int err; if (!rt) { - if (net_ratelimit()) - pr_warning("IPv6: Maximum number of routes reached," - " consider increasing route/max_size.\n"); + net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n"); return ERR_PTR(-ENOMEM); } @@ -2217,10 +2213,9 @@ void rt6_ifdown(struct net *net, struct net_device *dev) icmp6_clean_all(fib6_ifdown, &adn); } -struct rt6_mtu_change_arg -{ +struct rt6_mtu_change_arg { struct net_device *dev; - unsigned mtu; + unsigned int mtu; }; static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) @@ -2262,7 +2257,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) return 0; } -void rt6_mtu_change(struct net_device *dev, unsigned mtu) +void rt6_mtu_change(struct net_device *dev, unsigned int mtu) { struct rt6_mtu_change_arg arg = { .dev = dev, @@ -2430,7 +2425,8 @@ static int rt6_fill_node(struct net *net, else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; - NLA_PUT_U32(skb, RTA_TABLE, table); + if (nla_put_u32(skb, RTA_TABLE, table)) + goto nla_put_failure; if (rt->rt6i_flags & RTF_REJECT) rtm->rtm_type = RTN_UNREACHABLE; else if (rt->rt6i_flags & RTF_LOCAL) @@ -2453,16 +2449,20 @@ static int rt6_fill_node(struct net *net, rtm->rtm_flags |= RTM_F_CLONED; if (dst) { - NLA_PUT(skb, RTA_DST, 16, dst); + if (nla_put(skb, RTA_DST, 16, dst)) + goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) - NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr); + if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr)) + goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { - NLA_PUT(skb, RTA_SRC, 16, src); + if (nla_put(skb, RTA_SRC, 16, src)) + goto nla_put_failure; rtm->rtm_src_len = 128; - } else if (rtm->rtm_src_len) - NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); + } else if (rtm->rtm_src_len && + nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr)) + goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE @@ -2480,17 +2480,20 @@ static int rt6_fill_node(struct net *net, } } else #endif - NLA_PUT_U32(skb, RTA_IIF, iif); + if (nla_put_u32(skb, RTA_IIF, iif)) + goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; - if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0) - NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); + if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && + nla_put(skb, RTA_PREFSRC, 16, &saddr_buf)) + goto nla_put_failure; } if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; saddr_buf = rt->rt6i_prefsrc.addr; - NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); + if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf)) + goto nla_put_failure; } if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) @@ -2506,11 +2509,11 @@ static int rt6_fill_node(struct net *net, } rcu_read_unlock(); - if (rt->dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); - - NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); - + if (rt->dst.dev && + nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + goto nla_put_failure; + if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) + goto nla_put_failure; if (!(rt->rt6i_flags & RTF_EXPIRES)) expires = 0; else if (rt->dst.expires - jiffies < INT_MAX) @@ -2615,6 +2618,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { + dst_release(&rt->dst); err = -ENOBUFS; goto errout; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index c4ffd174352..60415711563 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -17,6 +17,8 @@ * Fred Templin <fred.l.templin@boeing.com>: isatap support */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> @@ -87,35 +89,51 @@ struct sit_net { /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipip6_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->rx_errors = dev->stats.rx_errors; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + + return tot; } + /* * Must be invoked with rcu_read_lock */ -static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, +static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, struct net_device *dev, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); @@ -686,12 +704,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "sit: nexthop == NULL\n"); + net_dbg_ratelimited("sit: nexthop == NULL\n"); goto tx_error; } - addr6 = (const struct in6_addr*)&neigh->primary_key; + addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if ((addr_type & IPV6_ADDR_UNICAST) && @@ -716,12 +733,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "sit: nexthop == NULL\n"); + net_dbg_ratelimited("sit: nexthop == NULL\n"); goto tx_error; } - addr6 = (const struct in6_addr*)&neigh->primary_key; + addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { @@ -1126,7 +1142,7 @@ static const struct net_device_ops ipip6_netdev_ops = { .ndo_start_xmit = ipip6_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, .ndo_change_mtu = ipip6_tunnel_change_mtu, - .ndo_get_stats = ipip6_get_stats, + .ndo_get_stats64= ipip6_get_stats64, }; static void ipip6_dev_free(struct net_device *dev) @@ -1287,7 +1303,7 @@ static int __init sit_init(void) { int err; - printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); + pr_info("IPv6 over IPv4 tunneling driver\n"); err = register_pernet_device(&sit_net_ops); if (err < 0) @@ -1295,7 +1311,7 @@ static int __init sit_init(void) err = xfrm4_tunnel_register(&sit_handler, AF_INET6); if (err < 0) { unregister_pernet_device(&sit_net_ops); - printk(KERN_INFO "sit init: Can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); } return err; } diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 166a57c47d3..e85c48bd404 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -16,32 +16,8 @@ #include <net/addrconf.h> #include <net/inet_frag.h> -static struct ctl_table empty[1]; - -static ctl_table ipv6_static_skeleton[] = { - { - .procname = "neigh", - .maxlen = 0, - .mode = 0555, - .child = empty, - }, - { } -}; - static ctl_table ipv6_table_template[] = { { - .procname = "route", - .maxlen = 0, - .mode = 0555, - .child = ipv6_route_table_template - }, - { - .procname = "icmp", - .maxlen = 0, - .mode = 0555, - .child = ipv6_icmp_table_template - }, - { .procname = "bindv6only", .data = &init_net.ipv6.sysctl.bindv6only, .maxlen = sizeof(int), @@ -62,13 +38,6 @@ static ctl_table ipv6_rotable[] = { { } }; -struct ctl_path net_ipv6_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv6", }, - { }, -}; -EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); - static int __net_init ipv6_sysctl_net_init(struct net *net) { struct ctl_table *ipv6_table; @@ -81,28 +50,37 @@ static int __net_init ipv6_sysctl_net_init(struct net *net) GFP_KERNEL); if (!ipv6_table) goto out; + ipv6_table[0].data = &net->ipv6.sysctl.bindv6only; ipv6_route_table = ipv6_route_sysctl_init(net); if (!ipv6_route_table) goto out_ipv6_table; - ipv6_table[0].child = ipv6_route_table; ipv6_icmp_table = ipv6_icmp_sysctl_init(net); if (!ipv6_icmp_table) goto out_ipv6_route_table; - ipv6_table[1].child = ipv6_icmp_table; - ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; - - net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, - ipv6_table); - if (!net->ipv6.sysctl.table) + net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table); + if (!net->ipv6.sysctl.hdr) goto out_ipv6_icmp_table; + net->ipv6.sysctl.route_hdr = + register_net_sysctl(net, "net/ipv6/route", ipv6_route_table); + if (!net->ipv6.sysctl.route_hdr) + goto out_unregister_ipv6_table; + + net->ipv6.sysctl.icmp_hdr = + register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table); + if (!net->ipv6.sysctl.icmp_hdr) + goto out_unregister_route_table; + err = 0; out: return err; - +out_unregister_route_table: + unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr); +out_unregister_ipv6_table: + unregister_net_sysctl_table(net->ipv6.sysctl.hdr); out_ipv6_icmp_table: kfree(ipv6_icmp_table); out_ipv6_route_table: @@ -118,11 +96,13 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net) struct ctl_table *ipv6_route_table; struct ctl_table *ipv6_icmp_table; - ipv6_table = net->ipv6.sysctl.table->ctl_table_arg; - ipv6_route_table = ipv6_table[0].child; - ipv6_icmp_table = ipv6_table[1].child; + ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg; + ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg; + ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->ipv6.sysctl.table); + unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr); + unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr); + unregister_net_sysctl_table(net->ipv6.sysctl.hdr); kfree(ipv6_table); kfree(ipv6_route_table); @@ -140,7 +120,7 @@ int ipv6_sysctl_register(void) { int err = -ENOMEM; - ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable); + ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable); if (ip6_header == NULL) goto out; @@ -160,18 +140,3 @@ void ipv6_sysctl_unregister(void) unregister_net_sysctl_table(ip6_header); unregister_pernet_subsys(&ipv6_sysctl_net_ops); } - -static struct ctl_table_header *ip6_base; - -int ipv6_static_sysctl_register(void) -{ - ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton); - if (ip6_base == NULL) - return -ENOMEM; - return 0; -} - -void ipv6_static_sysctl_unregister(void) -{ - unregister_net_sysctl_table(ip6_base); -} diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 98256cf72f9..554d5999abc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -723,12 +723,10 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { - if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", - genhash ? "failed" : "mismatch", - &ip6h->saddr, ntohs(th->source), - &ip6h->daddr, ntohs(th->dest)); - } + net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", + genhash ? "failed" : "mismatch", + &ip6h->saddr, ntohs(th->source), + &ip6h->daddr, ntohs(th->dest)); return 1; } return 0; @@ -1057,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; - int want_cookie = 0; + bool want_cookie = false; if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); @@ -1118,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; - want_cookie = 0; /* not our kind of cookie */ + want_cookie = false; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { @@ -1140,7 +1138,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq->rmt_addr = ipv6_hdr(skb)->saddr; treq->loc_addr = ipv6_hdr(skb)->daddr; if (!want_cookie || tmp_opt.tstamp_ok) - TCP_ECN_create_request(req, tcp_hdr(skb)); + TCP_ECN_create_request(req, skb); treq->iif = sk->sk_bound_dev_if; @@ -1353,7 +1351,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newnp->pktoptions = NULL; if (treq->pktopts != NULL) { newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); - kfree_skb(treq->pktopts); + consume_skb(treq->pktopts); treq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); @@ -1658,7 +1656,8 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb))) { + } else if (unlikely(sk_add_backlog(sk, skb, + sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; @@ -1777,6 +1776,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { .syn_recv_sock = tcp_v6_syn_recv_sock, .get_peer = tcp_v6_get_peer, .net_header_len = sizeof(struct ipv6hdr), + .net_frag_header_len = sizeof(struct frag_hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, @@ -1833,64 +1833,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - - skb_queue_head_init(&tp->out_of_order_queue); - tcp_init_xmit_timers(sk); - tcp_prequeue_init(tp); - - icsk->icsk_rto = TCP_TIMEOUT_INIT; - tp->mdev = TCP_TIMEOUT_INIT; - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - tp->snd_cwnd = 2; - - /* See draft-stevens-tcpca-spec-01 for discussion of the - * initialization of these values. - */ - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; - - tp->reordering = sysctl_tcp_reordering; - - sk->sk_state = TCP_CLOSE; + tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; - icsk->icsk_ca_ops = &tcp_init_congestion_ops; - icsk->icsk_sync_mss = tcp_sync_mss; - sk->sk_write_space = sk_stream_write_space; - sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); #ifdef CONFIG_TCP_MD5SIG - tp->af_specific = &tcp_sock_ipv6_specific; + tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; #endif - /* TCP Cookie Transactions */ - if (sysctl_tcp_cookie_size > 0) { - /* Default, cookies without s_data_payload. */ - tp->cookie_values = - kzalloc(sizeof(*tp->cookie_values), - sk->sk_allocation); - if (tp->cookie_values != NULL) - kref_init(&tp->cookie_values->kref); - } - /* Presumed zeroed, in order of appearance: - * cookie_in_always, cookie_out_never, - * s_data_constant, s_data_in, s_data_out - */ - sk->sk_sndbuf = sysctl_tcp_wmem[1]; - sk->sk_rcvbuf = sysctl_tcp_rmem[1]; - - local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); - local_bh_enable(); - return 0; } diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 4f3cec12aa8..4b0f50d9a96 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -19,6 +19,8 @@ * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> */ +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/module.h> @@ -160,11 +162,11 @@ static const struct inet6_protocol tunnel46_protocol = { static int __init tunnel6_init(void) { if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) { - printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); + pr_err("%s: can't add protocol\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) { - printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); + pr_err("%s: can't add protocol\n", __func__); inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6); return -EAGAIN; } @@ -174,9 +176,9 @@ static int __init tunnel6_init(void) static void __exit tunnel6_fini(void) { if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP)) - printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); + pr_err("%s: can't remove protocol\n", __func__); if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6)) - printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); + pr_err("%s: can't remove protocol\n", __func__); } module_init(tunnel6_init); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 37b0699e95e..f05099fc590 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); - unsigned int hash2_partial = + unsigned int hash2_partial = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); /* precompute partial secondary hash */ @@ -349,7 +349,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, bool slow; if (addr_len) - *addr_len=sizeof(struct sockaddr_in6); + *addr_len = sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); @@ -496,6 +496,28 @@ out: sock_put(sk); } +static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +{ + int rc; + + if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) + sock_rps_save_rxhash(sk, skb); + + rc = sock_queue_rcv_skb(sk, skb); + if (rc < 0) { + int is_udplite = IS_UDPLITE(sk); + + /* Note that an ENOMEM error is charged twice */ + if (rc == -ENOMEM) + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + kfree_skb(skb); + return -1; + } + return 0; +} + static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info ) @@ -503,18 +525,54 @@ static __inline__ void udpv6_err(struct sk_buff *skb, __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } -int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) +static struct static_key udpv6_encap_needed __read_mostly; +void udpv6_encap_enable(void) +{ + if (!static_key_enabled(&udpv6_encap_needed)) + static_key_slow_inc(&udpv6_encap_needed); +} +EXPORT_SYMBOL(udpv6_encap_enable); + +int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); - if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) - sock_rps_save_rxhash(sk, skb); - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; + if (static_key_false(&udpv6_encap_needed) && up->encap_type) { + int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + + /* + * This is an encapsulation socket so pass the skb to + * the socket's udp_encap_rcv() hook. Otherwise, just + * fall through and pass this up the UDP socket. + * up->encap_rcv() returns the following value: + * =0 if skb was successfully passed to the encap + * handler or was discarded by it. + * >0 if skb should be passed on to UDP. + * <0 if skb should be resubmitted as proto -N + */ + + /* if we're overly short, let UDP handle it */ + encap_rcv = ACCESS_ONCE(up->encap_rcv); + if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { + int ret; + + ret = encap_rcv(sk, skb); + if (ret <= 0) { + UDP_INC_STATS_BH(sock_net(sk), + UDP_MIB_INDATAGRAMS, + is_udplite); + return -ret; + } + } + + /* FALLTHROUGH -- it's a UDP Packet */ + } + /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ @@ -539,21 +597,25 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) goto drop; } + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) + goto drop; + skb_dst_drop(skb); - rc = sock_queue_rcv_skb(sk, skb); - if (rc < 0) { - /* Note that an ENOMEM error is charged twice */ - if (rc == -ENOMEM) - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); - goto drop_no_sk_drops_inc; + + bh_lock_sock(sk); + rc = 0; + if (!sock_owned_by_user(sk)) + rc = __udpv6_queue_rcv_skb(sk, skb); + else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { + bh_unlock_sock(sk); + goto drop; } + bh_unlock_sock(sk); - return 0; + return rc; drop: - atomic_inc(&sk->sk_drops); -drop_no_sk_drops_inc: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } @@ -602,37 +664,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { - unsigned int i; + struct sk_buff *skb1 = NULL; struct sock *sk; - struct sk_buff *skb1; + unsigned int i; for (i = 0; i < count; i++) { - skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); - sk = stack[i]; - if (skb1) { - if (sk_rcvqueues_full(sk, skb1)) { - kfree_skb(skb1); - goto drop; - } - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - udpv6_queue_rcv_skb(sk, skb1); - else if (sk_add_backlog(sk, skb1)) { - kfree_skb(skb1); - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - continue; + if (likely(skb1 == NULL)) + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); + if (!skb1) { + atomic_inc(&sk->sk_drops); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); } -drop: - atomic_inc(&sk->sk_drops); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_INERRORS, IS_UDPLITE(sk)); + + if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) + skb1 = NULL; } + if (unlikely(skb1)) + kfree_skb(skb1); } /* * Note: called only from the BH handler context, @@ -772,39 +824,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); + if (sk != NULL) { + int ret = udpv6_queue_rcv_skb(sk, skb); + sock_put(sk); - if (sk == NULL) { - if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) - goto discard; - - if (udp_lib_checksum_complete(skb)) - goto discard; - UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, - proto == IPPROTO_UDPLITE); - - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; - kfree_skb(skb); return 0; } - /* deliver */ - - if (sk_rcvqueues_full(sk, skb)) { - sock_put(sk); + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; - } - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb)) { - atomic_inc(&sk->sk_drops); - bh_unlock_sock(sk); - sock_put(sk); + + if (udp_lib_checksum_complete(skb)) goto discard; - } - bh_unlock_sock(sk); - sock_put(sk); + + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + + kfree_skb(skb); return 0; short_packet: @@ -1337,7 +1379,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len- offset, 0); + csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; @@ -1471,7 +1513,7 @@ struct proto udpv6_prot = { .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, - .backlog_rcv = udpv6_queue_rcv_skb, + .backlog_rcv = __udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v6_rehash, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8ea65e03273..8625fba96db 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -334,8 +334,8 @@ int __init xfrm6_init(void) goto out_policy; #ifdef CONFIG_SYSCTL - sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, - xfrm6_policy_table); + sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6", + xfrm6_policy_table); #endif out: return ret; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 4fe1db12d2a..ee5a7065aac 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -68,9 +68,9 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; -static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) +static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) { - unsigned h; + unsigned int h; h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); h ^= h >> 16; @@ -80,7 +80,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) return h; } -static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) +static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi) { return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; } diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 9680226640e..dfd6faaf0ea 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -983,10 +983,6 @@ static int ipxitf_create(struct ipx_interface_definition *idef) goto out; switch (idef->ipx_dlink_type) { - case IPX_FRAME_TR_8022: - printk(KERN_WARNING "IPX frame type 802.2TR is " - "obsolete Use 802.2 instead.\n"); - /* fall through */ case IPX_FRAME_8022: dlink_type = htons(ETH_P_802_2); datalink = p8022_datalink; @@ -996,10 +992,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef) dlink_type = htons(ETH_P_IPX); datalink = pEII_datalink; break; - } else - printk(KERN_WARNING "IPX frame type EtherII over " - "token-ring is obsolete. Use SNAP " - "instead.\n"); + } /* fall through */ case IPX_FRAME_SNAP: dlink_type = htons(ETH_P_SNAP); @@ -1275,7 +1268,6 @@ const char *ipx_frame_name(__be16 frame) case ETH_P_802_2: rc = "802.2"; break; case ETH_P_SNAP: rc = "SNAP"; break; case ETH_P_802_3: rc = "802.3"; break; - case ETH_P_TR_802_2: rc = "802.2TR"; break; } return rc; @@ -1909,9 +1901,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) (const unsigned short __user *)argp); break; case SIOCGSTAMP: - rc = -EINVAL; - if (sk) - rc = sock_get_timestamp(sk, argp); + rc = sock_get_timestamp(sk, argp); break; case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index bd6dca00fb8..ad7c03dedaa 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/sysctl.h> +#include <net/net_namespace.h> #ifndef CONFIG_SYSCTL #error This file should not be compiled without CONFIG_SYSCTL defined @@ -27,20 +28,14 @@ static struct ctl_table ipx_table[] = { { }, }; -static struct ctl_path ipx_path[] = { - { .procname = "net", }, - { .procname = "ipx", }, - { } -}; - static struct ctl_table_header *ipx_table_header; void ipx_register_sysctl(void) { - ipx_table_header = register_sysctl_paths(ipx_path, ipx_table); + ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table); } void ipx_unregister_sysctl(void) { - unregister_sysctl_table(ipx_table_header); + unregister_net_sysctl_table(ipx_table_header); } diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index 77c5e6499f8..d0667d68351 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c @@ -54,7 +54,7 @@ */ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) { - unsigned cflag, cval; + unsigned int cflag, cval; int baud; IRDA_DEBUG(2, "%s()\n", __func__ ); diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 2615ffc8e78..de73f6496db 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -235,12 +235,6 @@ static ctl_table irda_table[] = { { } }; -static struct ctl_path irda_path[] = { - { .procname = "net", }, - { .procname = "irda", }, - { } -}; - static struct ctl_table_header *irda_table_header; /* @@ -251,7 +245,7 @@ static struct ctl_table_header *irda_table_header; */ int __init irda_sysctl_register(void) { - irda_table_header = register_sysctl_paths(irda_path, irda_table); + irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table); if (!irda_table_header) return -ENOMEM; @@ -266,7 +260,7 @@ int __init irda_sysctl_register(void) */ void irda_sysctl_unregister(void) { - unregister_sysctl_table(irda_table_header); + unregister_net_sysctl_table(irda_table_header); } diff --git a/net/key/af_key.c b/net/key/af_key.c index 7e5d927b576..34e418508a6 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1714,7 +1714,7 @@ static int key_notify_sa_flush(const struct km_event *c) static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); - unsigned proto; + unsigned int proto; struct km_event c; struct xfrm_audit audit_info; int err, err2; @@ -3547,7 +3547,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb, goto out; err = -EMSGSIZE; - if ((unsigned)len > sk->sk_sndbuf - 32) + if ((unsigned int)len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile index 110e7bc2de5..2870f41ea44 100644 --- a/net/l2tp/Makefile +++ b/net/l2tp/Makefile @@ -10,3 +10,6 @@ obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o +ifneq ($(CONFIG_IPV6),) +obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o +endif diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 89ff8c67943..32b2155e7ab 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -18,6 +18,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/string.h> #include <linux/list.h> @@ -53,6 +55,10 @@ #include <net/inet_common.h> #include <net/xfrm.h> #include <net/protocol.h> +#include <net/inet6_connection_sock.h> +#include <net/inet_ecn.h> +#include <net/ip6_route.h> +#include <net/ip6_checksum.h> #include <asm/byteorder.h> #include <linux/atomic.h> @@ -82,12 +88,6 @@ /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0 -#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ - do { \ - if ((_mask) & (_type)) \ - printk(_lvl "L2TP: " _fmt, ##args); \ - } while (0) - /* Private data stored for received packets in the skb. */ struct l2tp_skb_cb { @@ -137,14 +137,20 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) l2tp_tunnel_free(tunnel); } #ifdef L2TP_REFCNT_DEBUG -#define l2tp_tunnel_inc_refcount(_t) do { \ - printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ - l2tp_tunnel_inc_refcount_1(_t); \ - } while (0) -#define l2tp_tunnel_dec_refcount(_t) do { \ - printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ - l2tp_tunnel_dec_refcount_1(_t); \ - } while (0) +#define l2tp_tunnel_inc_refcount(_t) \ +do { \ + pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_t)->name, \ + atomic_read(&_t->ref_count)); \ + l2tp_tunnel_inc_refcount_1(_t); \ +} while (0) +#define l2tp_tunnel_dec_refcount(_t) +do { \ + pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_t)->name, \ + atomic_read(&_t->ref_count)); \ + l2tp_tunnel_dec_refcount_1(_t); \ +} while (0) #else #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) @@ -326,16 +332,20 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk struct sk_buff *skbp; struct sk_buff *tmp; u32 ns = L2TP_SKB_CB(skb)->ns; + struct l2tp_stats *sstats; spin_lock_bh(&session->reorder_q.lock); + sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { if (L2TP_SKB_CB(skbp)->ns > ns) { __skb_queue_before(&session->reorder_q, skbp, skb); - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", - session->name, ns, L2TP_SKB_CB(skbp)->ns, - skb_queue_len(&session->reorder_q)); - session->stats.rx_oos_packets++; + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", + session->name, ns, L2TP_SKB_CB(skbp)->ns, + skb_queue_len(&session->reorder_q)); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_oos_packets++; + u64_stats_update_end(&sstats->syncp); goto out; } } @@ -352,16 +362,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * { struct l2tp_tunnel *tunnel = session->tunnel; int length = L2TP_SKB_CB(skb)->length; + struct l2tp_stats *tstats, *sstats; /* We're about to requeue the skb, so return resources * to its current owner (a socket receive buffer). */ skb_orphan(skb); - tunnel->stats.rx_packets++; - tunnel->stats.rx_bytes += length; - session->stats.rx_packets++; - session->stats.rx_bytes += length; + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + sstats = &session->stats; + u64_stats_update_begin(&sstats->syncp); + tstats->rx_packets++; + tstats->rx_bytes += length; + sstats->rx_packets++; + sstats->rx_bytes += length; + u64_stats_update_end(&tstats->syncp); + u64_stats_update_end(&sstats->syncp); if (L2TP_SKB_CB(skb)->has_seq) { /* Bump our Nr */ @@ -371,8 +388,8 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * else session->nr &= 0xffffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated nr to %hu\n", session->name, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n", + session->name, session->nr); } /* call private receive handler */ @@ -392,6 +409,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) { struct sk_buff *skb; struct sk_buff *tmp; + struct l2tp_stats *sstats; /* If the pkt at the head of the queue has the nr that we * expect to send up next, dequeue it and any other @@ -399,16 +417,19 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) */ start: spin_lock_bh(&session->reorder_q.lock); + sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skb, tmp) { if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { - session->stats.rx_seq_discards++; - session->stats.rx_errors++; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %u len %d discarded (too old), " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + sstats->rx_errors++; + u64_stats_update_end(&sstats->syncp); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); + session->reorder_skip = 1; __skb_unlink(skb, &session->reorder_q); kfree_skb(skb); if (session->deref) @@ -417,13 +438,20 @@ start: } if (L2TP_SKB_CB(skb)->has_seq) { + if (session->reorder_skip) { + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: advancing nr to next pkt: %u -> %u", + session->name, session->nr, + L2TP_SKB_CB(skb)->ns); + session->reorder_skip = 0; + session->nr = L2TP_SKB_CB(skb)->ns; + } if (L2TP_SKB_CB(skb)->ns != session->nr) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: holding oos pkt %u len %d, " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); goto out; } } @@ -446,21 +474,43 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk, { struct udphdr *uh = udp_hdr(skb); u16 ulen = ntohs(uh->len); - struct inet_sock *inet; __wsum psum; - if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) + if (sk->sk_no_check || skb_csum_unnecessary(skb)) return 0; - inet = inet_sk(sk); - psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, - IPPROTO_UDP, 0); - - if ((skb->ip_summed == CHECKSUM_COMPLETE) && - !csum_fold(csum_add(psum, skb->csum))) - return 0; - - skb->csum = psum; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) { + if (!uh->check) { + LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); + return 1; + } + if ((skb->ip_summed == CHECKSUM_COMPLETE) && + !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, ulen, + IPPROTO_UDP, skb->csum)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return 0; + } + skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len, IPPROTO_UDP, + 0)); + } else +#endif + { + struct inet_sock *inet; + if (!uh->check) + return 0; + inet = inet_sk(sk); + psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, + ulen, IPPROTO_UDP, 0); + + if ((skb->ip_summed == CHECKSUM_COMPLETE) && + !csum_fold(csum_add(psum, skb->csum))) + return 0; + skb->csum = psum; + } return __skb_checksum_complete(skb); } @@ -532,6 +582,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; int offset; u32 ns, nr; + struct l2tp_stats *sstats = &session->stats; /* The ref count is increased since we now hold a pointer to * the session. Take care to decrement the refcnt when exiting @@ -544,10 +595,13 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: cookie mismatch (%u/%u). Discarding.\n", - tunnel->name, tunnel->tunnel_id, session->session_id); - session->stats.rx_cookie_discards++; + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: cookie mismatch (%u/%u). Discarding.\n", + tunnel->name, tunnel->tunnel_id, + session->session_id); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_cookie_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } ptr += session->peer_cookie_len; @@ -573,9 +627,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, L2TP_SKB_CB(skb)->ns = ns; L2TP_SKB_CB(skb)->has_seq = 1; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: recv data ns=%u, nr=%u, session nr=%u\n", - session->name, ns, nr, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: recv data ns=%u, nr=%u, session nr=%u\n", + session->name, ns, nr, session->nr); } } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { u32 l2h = ntohl(*(__be32 *) ptr); @@ -587,9 +641,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, L2TP_SKB_CB(skb)->ns = ns; L2TP_SKB_CB(skb)->has_seq = 1; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: recv data ns=%u, session nr=%u\n", - session->name, ns, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: recv data ns=%u, session nr=%u\n", + session->name, ns, session->nr); } } @@ -602,9 +656,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * configure it so. */ if ((!session->lns_mode) && (!session->send_seq)) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, - "%s: requested to enable seq numbers by LNS\n", - session->name); + l2tp_info(session, L2TP_MSG_SEQ, + "%s: requested to enable seq numbers by LNS\n", + session->name); session->send_seq = -1; l2tp_session_set_header_len(session, tunnel->version); } @@ -613,10 +667,12 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * If user has configured mandatory sequence numbers, discard. */ if (session->recv_seq) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); - session->stats.rx_seq_discards++; + l2tp_warn(session, L2TP_MSG_SEQ, + "%s: recv data has no seq numbers when required. Discarding.\n", + session->name); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } @@ -626,16 +682,18 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * LAC is broken. Discard the frame. */ if ((!session->lns_mode) && (session->send_seq)) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, - "%s: requested to disable seq numbers by LNS\n", - session->name); + l2tp_info(session, L2TP_MSG_SEQ, + "%s: requested to disable seq numbers by LNS\n", + session->name); session->send_seq = 0; l2tp_session_set_header_len(session, tunnel->version); } else if (session->send_seq) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); - session->stats.rx_seq_discards++; + l2tp_warn(session, L2TP_MSG_SEQ, + "%s: recv data has no seq numbers when required. Discarding.\n", + session->name); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } } @@ -689,13 +747,14 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * packets */ if (L2TP_SKB_CB(skb)->ns != session->nr) { - session->stats.rx_seq_discards++; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %u len %d discarded, " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); goto discard; } skb_queue_tail(&session->reorder_q, skb); @@ -716,7 +775,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, return; discard: - session->stats.rx_errors++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_errors++; + u64_stats_update_end(&sstats->syncp); kfree_skb(skb); if (session->deref) @@ -739,9 +800,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, unsigned char *ptr, *optr; u16 hdrflags; u32 tunnel_id, session_id; - int offset; u16 version; int length; + struct l2tp_stats *tstats; if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) goto discard_bad_csum; @@ -751,8 +812,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* Short packet? */ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: recv short packet (len=%d)\n", + tunnel->name, skb->len); goto error; } @@ -762,14 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, if (!pskb_may_pull(skb, length)) goto error; - printk(KERN_DEBUG "%s: recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", skb->data[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); } /* Point to L2TP header */ @@ -781,9 +837,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* Check protocol version */ version = hdrflags & L2TP_HDR_VER_MASK; if (version != tunnel->version) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: recv protocol version mismatch: got %d expected %d\n", - tunnel->name, version, tunnel->version); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: recv protocol version mismatch: got %d expected %d\n", + tunnel->name, version, tunnel->version); goto error; } @@ -792,8 +848,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* If type is control packet, it is handled by userspace. */ if (hdrflags & L2TP_HDRFLAG_T) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: recv control packet, len=%d\n", tunnel->name, length); + l2tp_dbg(tunnel, L2TP_MSG_DATA, + "%s: recv control packet, len=%d\n", + tunnel->name, length); goto error; } @@ -821,9 +878,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); if (!session || !session->recv_skb) { /* Not found? Pass to userspace to deal with */ - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: no session found (%u/%u). Passing up.\n", - tunnel->name, tunnel_id, session_id); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: no session found (%u/%u). Passing up.\n", + tunnel->name, tunnel_id, session_id); goto error; } @@ -834,7 +891,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, discard_bad_csum: LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); - tunnel->stats.rx_errors++; + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + tstats->rx_errors++; + u64_stats_update_end(&tstats->syncp); kfree_skb(skb); return 0; @@ -860,8 +920,8 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (tunnel == NULL) goto pass_up; - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: received %d bytes\n", tunnel->name, skb->len); + l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n", + tunnel->name, skb->len); if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) goto pass_up_put; @@ -903,8 +963,8 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) *bufp++ = 0; session->ns++; session->ns &= 0xffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated ns to %u\n", session->name, session->ns); + l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n", + session->name, session->ns); } return bufp - optr; @@ -940,8 +1000,9 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) l2h = 0x40000000 | session->ns; session->ns++; session->ns &= 0xffffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated ns to %u\n", session->name, session->ns); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: updated ns to %u\n", + session->name, session->ns); } *((__be32 *) bufp) = htonl(l2h); @@ -960,46 +1021,50 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; unsigned int len = skb->len; int error; + struct l2tp_stats *tstats, *sstats; /* Debug */ if (session->send_seq) - PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes, ns=%u\n", session->name, - data_len, session->ns - 1); + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n", + session->name, data_len, session->ns - 1); else - PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes\n", session->name, data_len); + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n", + session->name, data_len); if (session->debug & L2TP_MSG_DATA) { - int i; int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; unsigned char *datap = skb->data + uhlen; - printk(KERN_DEBUG "%s: xmit:", session->name); - for (i = 0; i < (len - uhlen); i++) { - printk(" %02X", *datap++); - if (i == 31) { - printk(" ..."); - break; - } - } - printk("\n"); + pr_debug("%s: xmit\n", session->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, + datap, min_t(size_t, 32, len - uhlen)); } /* Queue the packet to IP for output */ skb->local_df = 1; - error = ip_queue_xmit(skb, fl); +#if IS_ENABLED(CONFIG_IPV6) + if (skb->sk->sk_family == PF_INET6) + error = inet6_csk_xmit(skb, NULL); + else +#endif + error = ip_queue_xmit(skb, fl); /* Update stats */ + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + sstats = &session->stats; + u64_stats_update_begin(&sstats->syncp); if (error >= 0) { - tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += len; - session->stats.tx_packets++; - session->stats.tx_bytes += len; + tstats->tx_packets++; + tstats->tx_bytes += len; + sstats->tx_packets++; + sstats->tx_bytes += len; } else { - tunnel->stats.tx_errors++; - session->stats.tx_errors++; + tstats->tx_errors++; + sstats->tx_errors++; } + u64_stats_update_end(&tstats->syncp); + u64_stats_update_end(&sstats->syncp); return 0; } @@ -1021,6 +1086,31 @@ static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) skb->destructor = l2tp_sock_wfree; } +#if IS_ENABLED(CONFIG_IPV6) +static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb, + int udp_len) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct udphdr *uh = udp_hdr(skb); + + if (!skb_dst(skb) || !skb_dst(skb)->dev || + !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { + __wsum csum = skb_checksum(skb, 0, udp_len, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len, + IPPROTO_UDP, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + } else { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, + udp_len, IPPROTO_UDP, 0); + } +} +#endif + /* If caller requires the skb to have a ppp header, the header must be * inserted in the skb data before calling this function. */ @@ -1089,6 +1179,11 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len uh->check = 0; /* Calculate UDP checksum if configured to do so */ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) + l2tp_xmit_ipv6_csum(sk, skb, udp_len); + else +#endif if (sk->sk_no_check == UDP_CSUM_NOXMIT) skb->ip_summed = CHECKSUM_NONE; else if ((skb_dst(skb) && skb_dst(skb)->dev) && @@ -1141,8 +1236,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) if (tunnel == NULL) goto end; - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); /* Close all sessions */ l2tp_tunnel_closeall(tunnel); @@ -1184,8 +1278,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) BUG_ON(tunnel == NULL); - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing all sessions...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n", + tunnel->name); write_lock_bh(&tunnel->hlist_lock); for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { @@ -1193,8 +1287,8 @@ again: hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { session = hlist_entry(walk, struct l2tp_session, hlist); - PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing session\n", session->name); + l2tp_info(session, L2TP_MSG_CONTROL, + "%s: closing session\n", session->name); hlist_del_init(&session->hlist); @@ -1247,8 +1341,7 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) BUG_ON(atomic_read(&tunnel->ref_count) != 0); BUG_ON(tunnel->sock != NULL); - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: free...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); /* Remove from tunnel list */ spin_lock_bh(&pn->l2tp_tunnel_list_lock); @@ -1268,31 +1361,69 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t { int err = -EINVAL; struct sockaddr_in udp_addr; +#if IS_ENABLED(CONFIG_IPV6) + struct sockaddr_in6 udp6_addr; + struct sockaddr_l2tpip6 ip6_addr; +#endif struct sockaddr_l2tpip ip_addr; struct socket *sock = NULL; switch (cfg->encap) { case L2TP_ENCAPTYPE_UDP: - err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); - if (err < 0) - goto out; +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->local_ip6 && cfg->peer_ip6) { + err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp); + if (err < 0) + goto out; - sock = *sockp; + sock = *sockp; - memset(&udp_addr, 0, sizeof(udp_addr)); - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->local_ip; - udp_addr.sin_port = htons(cfg->local_udp_port); - err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); - if (err < 0) - goto out; + memset(&udp6_addr, 0, sizeof(udp6_addr)); + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = htons(cfg->local_udp_port); + err = kernel_bind(sock, (struct sockaddr *) &udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto out; - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->peer_ip; - udp_addr.sin_port = htons(cfg->peer_udp_port); - err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); - if (err < 0) - goto out; + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = htons(cfg->peer_udp_port); + err = kernel_connect(sock, + (struct sockaddr *) &udp6_addr, + sizeof(udp6_addr), 0); + if (err < 0) + goto out; + } else +#endif + { + err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); + if (err < 0) + goto out; + + sock = *sockp; + + memset(&udp_addr, 0, sizeof(udp_addr)); + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = htons(cfg->local_udp_port); + err = kernel_bind(sock, (struct sockaddr *) &udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto out; + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = htons(cfg->peer_udp_port); + err = kernel_connect(sock, + (struct sockaddr *) &udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto out; + } if (!cfg->use_udp_checksums) sock->sk->sk_no_check = UDP_CSUM_NOXMIT; @@ -1300,27 +1431,61 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t break; case L2TP_ENCAPTYPE_IP: - err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); - if (err < 0) - goto out; +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->local_ip6 && cfg->peer_ip6) { + err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP, + sockp); + if (err < 0) + goto out; - sock = *sockp; + sock = *sockp; - memset(&ip_addr, 0, sizeof(ip_addr)); - ip_addr.l2tp_family = AF_INET; - ip_addr.l2tp_addr = cfg->local_ip; - ip_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); - if (err < 0) - goto out; + memset(&ip6_addr, 0, sizeof(ip6_addr)); + ip6_addr.l2tp_family = AF_INET6; + memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, + sizeof(ip6_addr.l2tp_addr)); + ip6_addr.l2tp_conn_id = tunnel_id; + err = kernel_bind(sock, (struct sockaddr *) &ip6_addr, + sizeof(ip6_addr)); + if (err < 0) + goto out; - ip_addr.l2tp_family = AF_INET; - ip_addr.l2tp_addr = cfg->peer_ip; - ip_addr.l2tp_conn_id = peer_tunnel_id; - err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); - if (err < 0) - goto out; + ip6_addr.l2tp_family = AF_INET6; + memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6, + sizeof(ip6_addr.l2tp_addr)); + ip6_addr.l2tp_conn_id = peer_tunnel_id; + err = kernel_connect(sock, + (struct sockaddr *) &ip6_addr, + sizeof(ip6_addr), 0); + if (err < 0) + goto out; + } else +#endif + { + err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, + sockp); + if (err < 0) + goto out; + + sock = *sockp; + + memset(&ip_addr, 0, sizeof(ip_addr)); + ip_addr.l2tp_family = AF_INET; + ip_addr.l2tp_addr = cfg->local_ip; + ip_addr.l2tp_conn_id = tunnel_id; + err = kernel_bind(sock, (struct sockaddr *) &ip_addr, + sizeof(ip_addr)); + if (err < 0) + goto out; + ip_addr.l2tp_family = AF_INET; + ip_addr.l2tp_addr = cfg->peer_ip; + ip_addr.l2tp_conn_id = peer_tunnel_id; + err = kernel_connect(sock, (struct sockaddr *) &ip_addr, + sizeof(ip_addr), 0); + if (err < 0) + goto out; + } break; default: @@ -1357,7 +1522,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 err = -EBADF; sock = sockfd_lookup(fd, &err); if (!sock) { - printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", + pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n", tunnel_id, fd, err); goto err; } @@ -1373,7 +1538,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 case L2TP_ENCAPTYPE_UDP: err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_UDP) { - printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); goto err; } @@ -1381,7 +1546,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 case L2TP_ENCAPTYPE_IP: err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_L2TP) { - printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); goto err; } @@ -1424,6 +1589,12 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) + udpv6_encap_enable(); + else +#endif + udp_encap_enable(); } sk->sk_user_data = tunnel; @@ -1577,7 +1748,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn session->session_id = session_id; session->peer_session_id = peer_session_id; - session->nr = 1; + session->nr = 0; sprintf(&session->name[0], "sess %u/%u", tunnel->tunnel_id, session->session_id); @@ -1683,7 +1854,7 @@ static int __init l2tp_init(void) if (rc) goto out; - printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); + pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); out: return rc; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a16a48e79fa..a38ec6cdeee 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -45,6 +45,7 @@ struct l2tp_stats { u64 rx_oos_packets; u64 rx_errors; u64 rx_cookie_discards; + struct u64_stats_sync syncp; }; struct l2tp_tunnel; @@ -54,15 +55,15 @@ struct l2tp_tunnel; */ struct l2tp_session_cfg { enum l2tp_pwtype pw_type; - unsigned data_seq:2; /* data sequencing level + unsigned int data_seq:2; /* data sequencing level * 0 => none, 1 => IP only, * 2 => all */ - unsigned recv_seq:1; /* expect receive packets with + unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ - unsigned send_seq:1; /* send packets with sequence + unsigned int send_seq:1; /* send packets with sequence * numbers? */ - unsigned lns_mode:1; /* behave as LNS? LAC enables + unsigned int lns_mode:1; /* behave as LNS? LAC enables * sequence numbers under * control of LNS. */ int debug; /* bitmask of debug message @@ -107,21 +108,22 @@ struct l2tp_session { char name[32]; /* for logging */ char ifname[IFNAMSIZ]; - unsigned data_seq:2; /* data sequencing level + unsigned int data_seq:2; /* data sequencing level * 0 => none, 1 => IP only, * 2 => all */ - unsigned recv_seq:1; /* expect receive packets with + unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ - unsigned send_seq:1; /* send packets with sequence + unsigned int send_seq:1; /* send packets with sequence * numbers? */ - unsigned lns_mode:1; /* behave as LNS? LAC enables + unsigned int lns_mode:1; /* behave as LNS? LAC enables * sequence numbers under * control of LNS. */ int debug; /* bitmask of debug message * categories */ int reorder_timeout; /* configured reorder timeout * (in jiffies) */ + int reorder_skip; /* set if skip to next nr */ int mtu; int mru; enum l2tp_pwtype pwtype; @@ -150,6 +152,10 @@ struct l2tp_tunnel_cfg { /* Used only for kernel-created sockets */ struct in_addr local_ip; struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr *local_ip6; + struct in6_addr *peer_ip6; +#endif u16 local_udp_port; u16 peer_udp_port; unsigned int use_udp_checksums:1; @@ -255,17 +261,36 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session) } #ifdef L2TP_REFCNT_DEBUG -#define l2tp_session_inc_refcount(_s) do { \ - printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ - l2tp_session_inc_refcount_1(_s); \ - } while (0) -#define l2tp_session_dec_refcount(_s) do { \ - printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ - l2tp_session_dec_refcount_1(_s); \ - } while (0) +#define l2tp_session_inc_refcount(_s) \ +do { \ + pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_s)->name, \ + atomic_read(&_s->ref_count)); \ + l2tp_session_inc_refcount_1(_s); \ +} while (0) +#define l2tp_session_dec_refcount(_s) \ +do { \ + pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_s)->name, \ + atomic_read(&_s->ref_count)); \ + l2tp_session_dec_refcount_1(_s); \ +} while (0) #else #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) #endif +#define l2tp_printk(ptr, type, func, fmt, ...) \ +do { \ + if (((ptr)->debug) & (type)) \ + func(fmt, ##__VA_ARGS__); \ +} while (0) + +#define l2tp_warn(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__) +#define l2tp_info(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__) +#define l2tp_dbg(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__) + #endif /* _L2TP_CORE_H_ */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 76130134bfa..c3813bc8455 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/skbuff.h> #include <linux/socket.h> @@ -122,6 +124,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); if (tunnel->sock) { struct inet_sock *inet = inet_sk(tunnel->sock); + +#if IS_ENABLED(CONFIG_IPV6) + if (tunnel->sock->sk_family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + seq_printf(m, " from %pI6c to %pI6c\n", + &np->saddr, &np->daddr); + } else +#endif seq_printf(m, " from %pI4 to %pI4\n", &inet->inet_saddr, &inet->inet_daddr); if (tunnel->encap == L2TP_ENCAPTYPE_UDP) @@ -317,11 +327,11 @@ static int __init l2tp_debugfs_init(void) if (tunnels == NULL) rc = -EIO; - printk(KERN_INFO "L2TP debugfs support\n"); + pr_info("L2TP debugfs support\n"); out: if (rc) - printk(KERN_WARNING "l2tp debugfs: unable to init\n"); + pr_warn("unable to init\n"); return rc; } diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 63fe5f353f0..443591d629c 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/skbuff.h> #include <linux/socket.h> @@ -115,21 +117,14 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, if (session->debug & L2TP_MSG_DATA) { unsigned int length; - int offset; u8 *ptr = skb->data; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; - printk(KERN_DEBUG "%s: eth recv: ", session->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: eth recv\n", session->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) @@ -308,7 +303,7 @@ static int __init l2tp_eth_init(void) if (err) goto out_unreg; - printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); + pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 6274f0be82b..889f5d13d7b 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/icmp.h> #include <linux/module.h> #include <linux/skbuff.h> @@ -32,15 +34,8 @@ struct l2tp_ip_sock { /* inet_sock has to be the first member of l2tp_ip_sock */ struct inet_sock inet; - __u32 conn_id; - __u32 peer_conn_id; - - __u64 tx_packets; - __u64 tx_bytes; - __u64 tx_errors; - __u64 rx_packets; - __u64 rx_bytes; - __u64 rx_errors; + u32 conn_id; + u32 peer_conn_id; }; static DEFINE_RWLOCK(l2tp_ip_lock); @@ -127,7 +122,6 @@ static int l2tp_ip_recv(struct sk_buff *skb) struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int length; - int offset; /* Point to L2TP header */ optr = ptr = skb->data; @@ -162,14 +156,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) if (!pskb_may_pull(skb, length)) goto discard; - printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: ip recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); @@ -298,68 +286,27 @@ out_in_use: static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; - struct inet_sock *inet = inet_sk(sk); - struct flowi4 *fl4; - struct rtable *rt; - __be32 saddr; - int oif, rc; + int rc; - rc = -EINVAL; if (addr_len < sizeof(*lsa)) - goto out; + return -EINVAL; - rc = -EAFNOSUPPORT; - if (lsa->l2tp_family != AF_INET) - goto out; - - lock_sock(sk); - - sk_dst_reset(sk); - - oif = sk->sk_bound_dev_if; - saddr = inet->inet_saddr; - - rc = -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) - goto out; + return -EINVAL; - fl4 = &inet->cork.fl.u.ip4; - rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr, - RT_CONN_FLAGS(sk), oif, - IPPROTO_L2TP, - 0, 0, sk, true); - if (IS_ERR(rt)) { - rc = PTR_ERR(rt); - if (rc == -ENETUNREACH) - IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); - goto out; - } + rc = ip4_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + return rc; - rc = -ENETUNREACH; - if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - ip_rt_put(rt); - goto out; - } + lock_sock(sk); l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; - if (!inet->inet_saddr) - inet->inet_saddr = fl4->saddr; - if (!inet->inet_rcv_saddr) - inet->inet_rcv_saddr = fl4->saddr; - inet->inet_daddr = fl4->daddr; - sk->sk_state = TCP_ESTABLISHED; - inet->inet_id = jiffies; - - sk_dst_set(sk, &rt->dst); - write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); - rc = 0; -out: release_sock(sk); return rc; } @@ -414,7 +361,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m { struct sk_buff *skb; int rc; - struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); struct inet_sock *inet = inet_sk(sk); struct rtable *rt = NULL; struct flowi4 *fl4; @@ -514,14 +460,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m rcu_read_unlock(); error: - /* Update stats */ - if (rc >= 0) { - lsa->tx_packets++; - lsa->tx_bytes += len; + if (rc >= 0) rc = len; - } else { - lsa->tx_errors++; - } out: release_sock(sk); @@ -539,7 +479,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); - struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; @@ -581,15 +520,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m done: skb_free_datagram(sk, skb); out: - if (err) { - lsk->rx_errors++; - return err; - } - - lsk->rx_packets++; - lsk->rx_bytes += copied; - - return copied; + return err ? err : copied; } static struct proto l2tp_ip_prot = { @@ -657,7 +588,7 @@ static int __init l2tp_ip_init(void) { int err; - printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); + pr_info("L2TP IP encapsulation support (L2TPv3)\n"); err = proto_register(&l2tp_ip_prot, 1); if (err != 0) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c new file mode 100644 index 00000000000..0291d8d85f3 --- /dev/null +++ b/net/l2tp/l2tp_ip6.c @@ -0,0 +1,787 @@ +/* + * L2TPv3 IP encapsulation support for IPv6 + * + * Copyright (c) 2012 Katalix Systems Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/icmp.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/random.h> +#include <linux/socket.h> +#include <linux/l2tp.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <net/sock.h> +#include <net/ip.h> +#include <net/icmp.h> +#include <net/udp.h> +#include <net/inet_common.h> +#include <net/inet_hashtables.h> +#include <net/tcp_states.h> +#include <net/protocol.h> +#include <net/xfrm.h> + +#include <net/transp_v6.h> +#include <net/addrconf.h> +#include <net/ip6_route.h> + +#include "l2tp_core.h" + +struct l2tp_ip6_sock { + /* inet_sock has to be the first member of l2tp_ip6_sock */ + struct inet_sock inet; + + u32 conn_id; + u32 peer_conn_id; + + /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see + inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +static DEFINE_RWLOCK(l2tp_ip6_lock); +static struct hlist_head l2tp_ip6_table; +static struct hlist_head l2tp_ip6_bind_table; + +static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) +{ + return (struct l2tp_ip6_sock *)sk; +} + +static struct sock *__l2tp_ip6_bind_lookup(struct net *net, + struct in6_addr *laddr, + int dif, u32 tunnel_id) +{ + struct hlist_node *node; + struct sock *sk; + + sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { + struct in6_addr *addr = inet6_rcv_saddr(sk); + struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); + + if (l2tp == NULL) + continue; + + if ((l2tp->conn_id == tunnel_id) && + net_eq(sock_net(sk), net) && + !(addr && ipv6_addr_equal(addr, laddr)) && + !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + goto found; + } + + sk = NULL; +found: + return sk; +} + +static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, + struct in6_addr *laddr, + int dif, u32 tunnel_id) +{ + struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); + if (sk) + sock_hold(sk); + + return sk; +} + +/* When processing receive frames, there are two cases to + * consider. Data frames consist of a non-zero session-id and an + * optional cookie. Control frames consist of a regular L2TP header + * preceded by 32-bits of zeros. + * + * L2TPv3 Session Header Over IP + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Session ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Cookie (optional, maximum 64 bits)... + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * L2TPv3 Control Message Header Over IP + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | (32 bits of zeros) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Control Connection ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Ns | Nr | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * All control frames are passed to userspace. + */ +static int l2tp_ip6_recv(struct sk_buff *skb) +{ + struct sock *sk; + u32 session_id; + u32 tunnel_id; + unsigned char *ptr, *optr; + struct l2tp_session *session; + struct l2tp_tunnel *tunnel = NULL; + int length; + + /* Point to L2TP header */ + optr = ptr = skb->data; + + if (!pskb_may_pull(skb, 4)) + goto discard; + + session_id = ntohl(*((__be32 *) ptr)); + ptr += 4; + + /* RFC3931: L2TP/IP packets have the first 4 bytes containing + * the session_id. If it is 0, the packet is a L2TP control + * frame and the session_id value can be discarded. + */ + if (session_id == 0) { + __skb_pull(skb, 4); + goto pass_up; + } + + /* Ok, this is a data packet. Lookup the session. */ + session = l2tp_session_find(&init_net, NULL, session_id); + if (session == NULL) + goto discard; + + tunnel = session->tunnel; + if (tunnel == NULL) + goto discard; + + /* Trace packet contents, if enabled */ + if (tunnel->debug & L2TP_MSG_DATA) { + length = min(32u, skb->len); + if (!pskb_may_pull(skb, length)) + goto discard; + + pr_debug("%s: ip recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); + } + + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, + tunnel->recv_payload_hook); + return 0; + +pass_up: + /* Get the tunnel_id from the L2TP header */ + if (!pskb_may_pull(skb, 12)) + goto discard; + + if ((skb->data[0] & 0xc0) != 0xc0) + goto discard; + + tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel = l2tp_tunnel_find(&init_net, tunnel_id); + if (tunnel != NULL) + sk = tunnel->sock; + else { + struct ipv6hdr *iph = ipv6_hdr(skb); + + read_lock_bh(&l2tp_ip6_lock); + sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, + 0, tunnel_id); + read_unlock_bh(&l2tp_ip6_lock); + } + + if (sk == NULL) + goto discard; + + sock_hold(sk); + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_put; + + nf_reset(skb); + + return sk_receive_skb(sk, skb, 1); + +discard_put: + sock_put(sk); + +discard: + kfree_skb(skb); + return 0; +} + +static int l2tp_ip6_open(struct sock *sk) +{ + /* Prevent autobind. We don't have ports. */ + inet_sk(sk)->inet_num = IPPROTO_L2TP; + + write_lock_bh(&l2tp_ip6_lock); + sk_add_node(sk, &l2tp_ip6_table); + write_unlock_bh(&l2tp_ip6_lock); + + return 0; +} + +static void l2tp_ip6_close(struct sock *sk, long timeout) +{ + write_lock_bh(&l2tp_ip6_lock); + hlist_del_init(&sk->sk_bind_node); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); + + sk_common_release(sk); +} + +static void l2tp_ip6_destroy_sock(struct sock *sk) +{ + lock_sock(sk); + ip6_flush_pending_frames(sk); + release_sock(sk); + + inet6_destroy_sock(sk); +} + +static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; + __be32 v4addr = 0; + int addr_type; + int err; + + if (addr_len < sizeof(*addr)) + return -EINVAL; + + addr_type = ipv6_addr_type(&addr->l2tp_addr); + + /* l2tp_ip6 sockets are IPv6 only */ + if (addr_type == IPV6_ADDR_MAPPED) + return -EADDRNOTAVAIL; + + /* L2TP is point-point, not multicast */ + if (addr_type & IPV6_ADDR_MULTICAST) + return -EADDRNOTAVAIL; + + err = -EADDRINUSE; + read_lock_bh(&l2tp_ip6_lock); + if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, + sk->sk_bound_dev_if, addr->l2tp_conn_id)) + goto out_in_use; + read_unlock_bh(&l2tp_ip6_lock); + + lock_sock(sk); + + err = -EINVAL; + if (sk->sk_state != TCP_CLOSE) + goto out_unlock; + + /* Check if the address belongs to the host. */ + rcu_read_lock(); + if (addr_type != IPV6_ADDR_ANY) { + struct net_device *dev = NULL; + + if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (addr_len >= sizeof(struct sockaddr_in6) && + addr->l2tp_scope_id) { + /* Override any existing binding, if another + * one is supplied by user. + */ + sk->sk_bound_dev_if = addr->l2tp_scope_id; + } + + /* Binding to link-local address requires an + interface */ + if (!sk->sk_bound_dev_if) + goto out_unlock_rcu; + + err = -ENODEV; + dev = dev_get_by_index_rcu(sock_net(sk), + sk->sk_bound_dev_if); + if (!dev) + goto out_unlock_rcu; + } + + /* ipv4 addr of the socket is invalid. Only the + * unspecified and mapped address have a v4 equivalent. + */ + v4addr = LOOPBACK4_IPV6; + err = -EADDRNOTAVAIL; + if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0)) + goto out_unlock_rcu; + } + rcu_read_unlock(); + + inet->inet_rcv_saddr = inet->inet_saddr = v4addr; + np->rcv_saddr = addr->l2tp_addr; + np->saddr = addr->l2tp_addr; + + l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; + + write_lock_bh(&l2tp_ip6_lock); + sk_add_bind_node(sk, &l2tp_ip6_bind_table); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); + + release_sock(sk); + return 0; + +out_unlock_rcu: + rcu_read_unlock(); +out_unlock: + release_sock(sk); + return err; + +out_in_use: + read_unlock_bh(&l2tp_ip6_lock); + return err; +} + +static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, + int addr_len) +{ + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr; + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; + struct in6_addr *daddr; + int addr_type; + int rc; + + if (addr_len < sizeof(*lsa)) + return -EINVAL; + + addr_type = ipv6_addr_type(&usin->sin6_addr); + if (addr_type & IPV6_ADDR_MULTICAST) + return -EINVAL; + + if (addr_type & IPV6_ADDR_MAPPED) { + daddr = &usin->sin6_addr; + if (ipv4_is_multicast(daddr->s6_addr32[3])) + return -EINVAL; + } + + rc = ip6_datagram_connect(sk, uaddr, addr_len); + + lock_sock(sk); + + l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; + + write_lock_bh(&l2tp_ip6_lock); + hlist_del_init(&sk->sk_bind_node); + sk_add_bind_node(sk, &l2tp_ip6_bind_table); + write_unlock_bh(&l2tp_ip6_lock); + + release_sock(sk); + + return rc; +} + +static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, + int *uaddr_len, int peer) +{ + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; + struct sock *sk = sock->sk; + struct ipv6_pinfo *np = inet6_sk(sk); + struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk); + + lsa->l2tp_family = AF_INET6; + lsa->l2tp_flowinfo = 0; + lsa->l2tp_scope_id = 0; + if (peer) { + if (!lsk->peer_conn_id) + return -ENOTCONN; + lsa->l2tp_conn_id = lsk->peer_conn_id; + lsa->l2tp_addr = np->daddr; + if (np->sndflow) + lsa->l2tp_flowinfo = np->flow_label; + } else { + if (ipv6_addr_any(&np->rcv_saddr)) + lsa->l2tp_addr = np->saddr; + else + lsa->l2tp_addr = np->rcv_saddr; + + lsa->l2tp_conn_id = lsk->conn_id; + } + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) + lsa->l2tp_scope_id = sk->sk_bound_dev_if; + *uaddr_len = sizeof(*lsa); + return 0; +} + +static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) +{ + int rc; + + /* Charge it to the socket, dropping if the queue is full. */ + rc = sock_queue_rcv_skb(sk, skb); + if (rc < 0) + goto drop; + + return 0; + +drop: + IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); + kfree_skb(skb); + return -1; +} + +static int l2tp_ip6_push_pending_frames(struct sock *sk) +{ + struct sk_buff *skb; + __be32 *transhdr = NULL; + int err = 0; + + skb = skb_peek(&sk->sk_write_queue); + if (skb == NULL) + goto out; + + transhdr = (__be32 *)skb_transport_header(skb); + *transhdr = 0; + + err = ip6_push_pending_frames(sk); + +out: + return err; +} + +/* Userspace will call sendmsg() on the tunnel socket to send L2TP + * control frames. + */ +static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len) +{ + struct ipv6_txoptions opt_space; + struct sockaddr_l2tpip6 *lsa = + (struct sockaddr_l2tpip6 *) msg->msg_name; + struct in6_addr *daddr, *final_p, final; + struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt = NULL; + struct ip6_flowlabel *flowlabel = NULL; + struct dst_entry *dst = NULL; + struct flowi6 fl6; + int addr_len = msg->msg_namelen; + int hlimit = -1; + int tclass = -1; + int dontfrag = -1; + int transhdrlen = 4; /* zero session-id */ + int ulen = len + transhdrlen; + int err; + + /* Rough check on arithmetic overflow, + better check is made in ip6_append_data(). + */ + if (len > INT_MAX) + return -EMSGSIZE; + + /* Mirror BSD error message compatibility */ + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + /* + * Get and verify the address. + */ + memset(&fl6, 0, sizeof(fl6)); + + fl6.flowi6_mark = sk->sk_mark; + + if (lsa) { + if (addr_len < SIN6_LEN_RFC2133) + return -EINVAL; + + if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6) + return -EAFNOSUPPORT; + + daddr = &lsa->l2tp_addr; + if (np->sndflow) { + fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK; + if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { + flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); + if (flowlabel == NULL) + return -EINVAL; + daddr = &flowlabel->dst; + } + } + + /* + * Otherwise it will be difficult to maintain + * sk->sk_dst_cache. + */ + if (sk->sk_state == TCP_ESTABLISHED && + ipv6_addr_equal(daddr, &np->daddr)) + daddr = &np->daddr; + + if (addr_len >= sizeof(struct sockaddr_in6) && + lsa->l2tp_scope_id && + ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = lsa->l2tp_scope_id; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; + + daddr = &np->daddr; + fl6.flowlabel = np->flow_label; + } + + if (fl6.flowi6_oif == 0) + fl6.flowi6_oif = sk->sk_bound_dev_if; + + if (msg->msg_controllen) { + opt = &opt_space; + memset(opt, 0, sizeof(struct ipv6_txoptions)); + opt->tot_len = sizeof(struct ipv6_txoptions); + + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); + if (err < 0) { + fl6_sock_release(flowlabel); + return err; + } + if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) { + flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); + if (flowlabel == NULL) + return -EINVAL; + } + if (!(opt->opt_nflen|opt->opt_flen)) + opt = NULL; + } + + if (opt == NULL) + opt = np->opt; + if (flowlabel) + opt = fl6_merge_options(&opt_space, flowlabel, opt); + opt = ipv6_fixup_options(&opt_space, opt); + + fl6.flowi6_proto = sk->sk_protocol; + if (!ipv6_addr_any(daddr)) + fl6.daddr = *daddr; + else + fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ + if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) + fl6.saddr = np->saddr; + + final_p = fl6_update_dst(&fl6, opt, &final); + + if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) + fl6.flowi6_oif = np->mcast_oif; + else if (!fl6.flowi6_oif) + fl6.flowi6_oif = np->ucast_oif; + + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + + dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; + } + + if (hlimit < 0) { + if (ipv6_addr_is_multicast(&fl6.daddr)) + hlimit = np->mcast_hops; + else + hlimit = np->hop_limit; + if (hlimit < 0) + hlimit = ip6_dst_hoplimit(dst); + } + + if (tclass < 0) + tclass = np->tclass; + + if (dontfrag < 0) + dontfrag = np->dontfrag; + + if (msg->msg_flags & MSG_CONFIRM) + goto do_confirm; + +back_from_confirm: + lock_sock(sk); + err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, + ulen, transhdrlen, hlimit, tclass, opt, + &fl6, (struct rt6_info *)dst, + msg->msg_flags, dontfrag); + if (err) + ip6_flush_pending_frames(sk); + else if (!(msg->msg_flags & MSG_MORE)) + err = l2tp_ip6_push_pending_frames(sk); + release_sock(sk); +done: + dst_release(dst); +out: + fl6_sock_release(flowlabel); + + return err < 0 ? err : len; + +do_confirm: + dst_confirm(dst); + if (!(msg->msg_flags & MSG_PROBE) || len) + goto back_from_confirm; + err = 0; + goto done; +} + +static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) +{ + struct inet_sock *inet = inet_sk(sk); + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; + size_t copied = 0; + int err = -EOPNOTSUPP; + struct sk_buff *skb; + + if (flags & MSG_OOB) + goto out; + + if (addr_len) + *addr_len = sizeof(*lsa); + + if (flags & MSG_ERRQUEUE) + return ipv6_recv_error(sk, msg, len); + + skb = skb_recv_datagram(sk, flags, noblock, &err); + if (!skb) + goto out; + + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (err) + goto done; + + sock_recv_timestamp(msg, sk, skb); + + /* Copy the address. */ + if (lsa) { + lsa->l2tp_family = AF_INET6; + lsa->l2tp_unused = 0; + lsa->l2tp_addr = ipv6_hdr(skb)->saddr; + lsa->l2tp_flowinfo = 0; + lsa->l2tp_scope_id = 0; + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) + lsa->l2tp_scope_id = IP6CB(skb)->iif; + } + + if (inet->cmsg_flags) + ip_cmsg_recv(msg, skb); + + if (flags & MSG_TRUNC) + copied = skb->len; +done: + skb_free_datagram(sk, skb); +out: + return err ? err : copied; +} + +static struct proto l2tp_ip6_prot = { + .name = "L2TP/IPv6", + .owner = THIS_MODULE, + .init = l2tp_ip6_open, + .close = l2tp_ip6_close, + .bind = l2tp_ip6_bind, + .connect = l2tp_ip6_connect, + .disconnect = udp_disconnect, + .ioctl = udp_ioctl, + .destroy = l2tp_ip6_destroy_sock, + .setsockopt = ipv6_setsockopt, + .getsockopt = ipv6_getsockopt, + .sendmsg = l2tp_ip6_sendmsg, + .recvmsg = l2tp_ip6_recvmsg, + .backlog_rcv = l2tp_ip6_backlog_recv, + .hash = inet_hash, + .unhash = inet_unhash, + .obj_size = sizeof(struct l2tp_ip6_sock), +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, +#endif +}; + +static const struct proto_ops l2tp_ip6_ops = { + .family = PF_INET6, + .owner = THIS_MODULE, + .release = inet6_release, + .bind = inet6_bind, + .connect = inet_dgram_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = l2tp_ip6_getname, + .poll = datagram_poll, + .ioctl = inet6_ioctl, + .listen = sock_no_listen, + .shutdown = inet_shutdown, + .setsockopt = sock_common_setsockopt, + .getsockopt = sock_common_getsockopt, + .sendmsg = inet_sendmsg, + .recvmsg = sock_common_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_sock_common_setsockopt, + .compat_getsockopt = compat_sock_common_getsockopt, +#endif +}; + +static struct inet_protosw l2tp_ip6_protosw = { + .type = SOCK_DGRAM, + .protocol = IPPROTO_L2TP, + .prot = &l2tp_ip6_prot, + .ops = &l2tp_ip6_ops, + .no_check = 0, +}; + +static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { + .handler = l2tp_ip6_recv, +}; + +static int __init l2tp_ip6_init(void) +{ + int err; + + pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n"); + + err = proto_register(&l2tp_ip6_prot, 1); + if (err != 0) + goto out; + + err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); + if (err) + goto out1; + + inet6_register_protosw(&l2tp_ip6_protosw); + return 0; + +out1: + proto_unregister(&l2tp_ip6_prot); +out: + return err; +} + +static void __exit l2tp_ip6_exit(void) +{ + inet6_unregister_protosw(&l2tp_ip6_protosw); + inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); + proto_unregister(&l2tp_ip6_prot); +} + +module_init(l2tp_ip6_init); +module_exit(l2tp_ip6_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chris Elston <celston@katalix.com>"); +MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6"); +MODULE_VERSION("1.0"); + +/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like + * enums + */ +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 93a41a09458..8577264378f 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -14,6 +14,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <net/sock.h> #include <net/genetlink.h> #include <net/udp.h> @@ -133,10 +135,25 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info if (info->attrs[L2TP_ATTR_FD]) { fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); } else { - if (info->attrs[L2TP_ATTR_IP_SADDR]) - cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); - if (info->attrs[L2TP_ATTR_IP_DADDR]) - cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); +#if IS_ENABLED(CONFIG_IPV6) + if (info->attrs[L2TP_ATTR_IP6_SADDR] && + info->attrs[L2TP_ATTR_IP6_DADDR]) { + cfg.local_ip6 = nla_data( + info->attrs[L2TP_ATTR_IP6_SADDR]); + cfg.peer_ip6 = nla_data( + info->attrs[L2TP_ATTR_IP6_DADDR]); + } else +#endif + if (info->attrs[L2TP_ATTR_IP_SADDR] && + info->attrs[L2TP_ATTR_IP_DADDR]) { + cfg.local_ip.s_addr = nla_get_be32( + info->attrs[L2TP_ATTR_IP_SADDR]); + cfg.peer_ip.s_addr = nla_get_be32( + info->attrs[L2TP_ATTR_IP_DADDR]); + } else { + ret = -EINVAL; + goto out; + } if (info->attrs[L2TP_ATTR_UDP_SPORT]) cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); if (info->attrs[L2TP_ATTR_UDP_DPORT]) @@ -225,47 +242,85 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6_pinfo *np = NULL; +#endif + struct l2tp_stats stats; + unsigned int start; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); - NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); - NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); + if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || + nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || + nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) + goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); - NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); - NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); + do { + start = u64_stats_fetch_begin(&tunnel->stats.syncp); + stats.tx_packets = tunnel->stats.tx_packets; + stats.tx_bytes = tunnel->stats.tx_bytes; + stats.tx_errors = tunnel->stats.tx_errors; + stats.rx_packets = tunnel->stats.rx_packets; + stats.rx_bytes = tunnel->stats.rx_bytes; + stats.rx_errors = tunnel->stats.rx_errors; + stats.rx_seq_discards = tunnel->stats.rx_seq_discards; + stats.rx_oos_packets = tunnel->stats.rx_oos_packets; + } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); + + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || + nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + stats.rx_seq_discards) || + nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, + stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) + goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + np = inet6_sk(sk); +#endif + inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: - NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); - NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); - NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); + if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || + nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || + nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, + (sk->sk_no_check != UDP_CSUM_NOXMIT))) + goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: - NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); - NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); +#if IS_ENABLED(CONFIG_IPV6) + if (np) { + if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr), + &np->saddr) || + nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr), + &np->daddr)) + goto nla_put_failure; + } else +#endif + if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || + nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) + goto nla_put_failure; break; } @@ -556,6 +611,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; + struct l2tp_stats stats; + unsigned int start; sk = tunnel->sock; @@ -563,43 +620,64 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); - NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); - NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); - NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); - if (session->mru) - NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); - - if (session->ifname && session->ifname[0]) - NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); - if (session->cookie_len) - NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); - if (session->peer_cookie_len) - NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); - NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); - NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); - NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); + if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, + session->peer_session_id) || + nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || + nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || + nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) || + (session->mru && + nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) + goto nla_put_failure; + + if ((session->ifname && session->ifname[0] && + nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || + (session->cookie_len && + nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, + &session->cookie[0])) || + (session->peer_cookie_len && + nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, + &session->peer_cookie[0])) || + nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || + nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || + nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || #ifdef CONFIG_XFRM - if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) - NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); + (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) && + nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || #endif - if (session->reorder_timeout) - NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); + (session->reorder_timeout && + nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) + goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); - NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); - NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); + + do { + start = u64_stats_fetch_begin(&session->stats.syncp); + stats.tx_packets = session->stats.tx_packets; + stats.tx_bytes = session->stats.tx_bytes; + stats.tx_errors = session->stats.tx_errors; + stats.rx_packets = session->stats.rx_packets; + stats.rx_bytes = session->stats.rx_bytes; + stats.rx_errors = session->stats.rx_errors; + stats.rx_seq_discards = session->stats.rx_seq_discards; + stats.rx_oos_packets = session->stats.rx_oos_packets; + } while (u64_stats_fetch_retry(&session->stats.syncp, start)); + + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || + nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + stats.rx_seq_discards) || + nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, + stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) + goto nla_put_failure; nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); @@ -708,6 +786,14 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { [L2TP_ATTR_MTU] = { .type = NLA_U16, }, [L2TP_ATTR_MRU] = { .type = NLA_U16, }, [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, + [L2TP_ATTR_IP6_SADDR] = { + .type = NLA_BINARY, + .len = sizeof(struct in6_addr), + }, + [L2TP_ATTR_IP6_DADDR] = { + .type = NLA_BINARY, + .len = sizeof(struct in6_addr), + }, [L2TP_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1, @@ -818,7 +904,7 @@ static int l2tp_nl_init(void) { int err; - printk(KERN_INFO "L2TP netlink interface\n"); + pr_info("L2TP netlink interface\n"); err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, ARRAY_SIZE(l2tp_nl_ops)); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1addd9f3f40..8ef6b9416cb 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -57,6 +57,8 @@ * http://openl2tp.sourceforge.net. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/string.h> #include <linux/list.h> @@ -106,12 +108,6 @@ /* Space for UDP, L2TP and PPP headers */ #define PPPOL2TP_HEADER_OVERHEAD 40 -#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ - do { \ - if ((_mask) & (_type)) \ - printk(_lvl "PPPOL2TP: " _fmt, ##args); \ - } while (0) - /* Number of bytes to build transmit L2TP headers. * Unfortunately the size is different depending on whether sequence numbers * are enabled. @@ -236,9 +232,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: recv %d byte data frame, passing to ppp\n", - session->name, data_len); + l2tp_dbg(session, PPPOL2TP_MSG_DATA, + "%s: recv %d byte data frame, passing to ppp\n", + session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same @@ -259,8 +255,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: socket not bound\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", + session->name); /* Not bound. Nothing we can do, so discard. */ session->stats.rx_errors++; @@ -270,8 +266,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int return; no_sock: - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: no socket\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); } @@ -628,7 +623,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, { struct sock *sk = sock->sk; struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; - struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; @@ -657,7 +651,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (sk->sk_user_data) goto end; /* socket is already attached */ - /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ + /* Get params from socket address. Handle L2TPv2 and L2TPv3. + * This is nasty because there are different sockaddr_pppol2tp + * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use + * the sockaddr size to determine which structure the caller + * is using. + */ + peer_tunnel_id = 0; if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { fd = sp->pppol2tp.fd; tunnel_id = sp->pppol2tp.s_tunnel; @@ -665,12 +665,31 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, session_id = sp->pppol2tp.s_session; peer_session_id = sp->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { + struct sockaddr_pppol2tpv3 *sp3 = + (struct sockaddr_pppol2tpv3 *) sp; ver = 3; fd = sp3->pppol2tp.fd; tunnel_id = sp3->pppol2tp.s_tunnel; peer_tunnel_id = sp3->pppol2tp.d_tunnel; session_id = sp3->pppol2tp.s_session; peer_session_id = sp3->pppol2tp.d_session; + } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) { + struct sockaddr_pppol2tpin6 *sp6 = + (struct sockaddr_pppol2tpin6 *) sp; + fd = sp6->pppol2tp.fd; + tunnel_id = sp6->pppol2tp.s_tunnel; + peer_tunnel_id = sp6->pppol2tp.d_tunnel; + session_id = sp6->pppol2tp.s_session; + peer_session_id = sp6->pppol2tp.d_session; + } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) { + struct sockaddr_pppol2tpv3in6 *sp6 = + (struct sockaddr_pppol2tpv3in6 *) sp; + ver = 3; + fd = sp6->pppol2tp.fd; + tunnel_id = sp6->pppol2tp.s_tunnel; + peer_tunnel_id = sp6->pppol2tp.d_tunnel; + session_id = sp6->pppol2tp.s_session; + peer_session_id = sp6->pppol2tp.d_session; } else { error = -EINVAL; goto end; /* bad socket address */ @@ -711,12 +730,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->recv_payload_hook == NULL) tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; - if (tunnel->peer_tunnel_id == 0) { - if (ver == 2) - tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; - else - tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; - } + if (tunnel->peer_tunnel_id == 0) + tunnel->peer_tunnel_id = peer_tunnel_id; /* Create session if it doesn't already exist. We handle the * case where a session was previously created by the netlink @@ -807,8 +822,8 @@ out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; sk->sk_state = PPPOX_CONNECTED; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: created\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", + session->name); end: release_sock(sk); @@ -861,8 +876,8 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: created\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", + session->name); error = 0; @@ -916,7 +931,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, } inet = inet_sk(tunnel->sock); - if (tunnel->version == 2) { + if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { struct sockaddr_pppol2tp sp; len = sizeof(sp); memset(&sp, 0, len); @@ -932,6 +947,46 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); +#if IS_ENABLED(CONFIG_IPV6) + } else if ((tunnel->version == 2) && + (tunnel->sock->sk_family == AF_INET6)) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + struct sockaddr_pppol2tpin6 sp; + len = sizeof(sp); + memset(&sp, 0, len); + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_OL2TP; + sp.pppol2tp.fd = tunnel->fd; + sp.pppol2tp.pid = pls->owner; + sp.pppol2tp.s_tunnel = tunnel->tunnel_id; + sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; + sp.pppol2tp.s_session = session->session_id; + sp.pppol2tp.d_session = session->peer_session_id; + sp.pppol2tp.addr.sin6_family = AF_INET6; + sp.pppol2tp.addr.sin6_port = inet->inet_dport; + memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, + sizeof(np->daddr)); + memcpy(uaddr, &sp, len); + } else if ((tunnel->version == 3) && + (tunnel->sock->sk_family == AF_INET6)) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + struct sockaddr_pppol2tpv3in6 sp; + len = sizeof(sp); + memset(&sp, 0, len); + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_OL2TP; + sp.pppol2tp.fd = tunnel->fd; + sp.pppol2tp.pid = pls->owner; + sp.pppol2tp.s_tunnel = tunnel->tunnel_id; + sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; + sp.pppol2tp.s_session = session->session_id; + sp.pppol2tp.d_session = session->peer_session_id; + sp.pppol2tp.addr.sin6_family = AF_INET6; + sp.pppol2tp.addr.sin6_port = inet->inet_dport; + memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, + sizeof(np->daddr)); + memcpy(uaddr, &sp, len); +#endif } else if (tunnel->version == 3) { struct sockaddr_pppol2tpv3 sp; len = sizeof(sp); @@ -998,9 +1053,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", - session->name, cmd, arg); + l2tp_dbg(session, PPPOL2TP_MSG_CONTROL, + "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", + session->name, cmd, arg); sk = ps->sock; sock_hold(sk); @@ -1018,8 +1073,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mtu=%d\n", session->name, session->mtu); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n", + session->name, session->mtu); err = 0; break; @@ -1034,8 +1089,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, session->mtu = ifr.ifr_mtu; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mtu=%d\n", session->name, session->mtu); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n", + session->name, session->mtu); err = 0; break; @@ -1048,8 +1103,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(session->mru, (int __user *) arg)) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mru=%d\n", session->name, session->mru); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n", + session->name, session->mru); err = 0; break; @@ -1063,8 +1118,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, break; session->mru = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mru=%d\n", session->name, session->mru); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n", + session->name, session->mru); err = 0; break; @@ -1073,8 +1128,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(ps->flags, (int __user *) arg)) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get flags=%d\n", session->name, ps->flags); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n", + session->name, ps->flags); err = 0; break; @@ -1083,8 +1138,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (get_user(val, (int __user *) arg)) break; ps->flags = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set flags=%d\n", session->name, ps->flags); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n", + session->name, ps->flags); err = 0; break; @@ -1100,8 +1155,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + session->name); err = 0; break; @@ -1128,9 +1183,9 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, struct sock *sk; struct pppol2tp_ioc_stats stats; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", - tunnel->name, cmd, arg); + l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL, + "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", + tunnel->name, cmd, arg); sk = tunnel->sock; sock_hold(sk); @@ -1164,8 +1219,8 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, err = -EFAULT; break; } - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", tunnel->name); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + tunnel->name); err = 0; break; @@ -1254,8 +1309,8 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: tunnel->debug = val; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", tunnel->name, tunnel->debug); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + tunnel->name, tunnel->debug); break; default: @@ -1282,8 +1337,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->recv_seq = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set recv_seq=%d\n", session->name, session->recv_seq); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set recv_seq=%d\n", + session->name, session->recv_seq); break; case PPPOL2TP_SO_SENDSEQ: @@ -1298,8 +1354,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk, po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set send_seq=%d\n", session->name, session->send_seq); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set send_seq=%d\n", + session->name, session->send_seq); break; case PPPOL2TP_SO_LNSMODE: @@ -1308,20 +1365,22 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->lns_mode = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set lns_mode=%d\n", session->name, session->lns_mode); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set lns_mode=%d\n", + session->name, session->lns_mode); break; case PPPOL2TP_SO_DEBUG: session->debug = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", session->name, session->debug); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + session->name, session->debug); break; case PPPOL2TP_SO_REORDERTO: session->reorder_timeout = msecs_to_jiffies(val); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set reorder_timeout=%d\n", + session->name, session->reorder_timeout); break; default: @@ -1400,8 +1459,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: *val = tunnel->debug; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%x\n", tunnel->name, tunnel->debug); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n", + tunnel->name, tunnel->debug); break; default: @@ -1423,32 +1482,32 @@ static int pppol2tp_session_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_RECVSEQ: *val = session->recv_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get recv_seq=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get recv_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_SENDSEQ: *val = session->send_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get send_seq=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get send_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_LNSMODE: *val = session->lns_mode; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get lns_mode=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get lns_mode=%d\n", session->name, *val); break; case PPPOL2TP_SO_DEBUG: *val = session->debug; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n", + session->name, *val); break; case PPPOL2TP_SO_REORDERTO: *val = (int) jiffies_to_msecs(session->reorder_timeout); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get reorder_timeout=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get reorder_timeout=%d\n", session->name, *val); break; default: @@ -1811,8 +1870,7 @@ static int __init pppol2tp_init(void) goto out_unregister_pppox; #endif - printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", - PPPOL2TP_DRV_VERSION); + pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION); out: return err; diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index ab3d35f2325..3cdaa046c1b 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -15,6 +15,8 @@ * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> @@ -279,9 +281,7 @@ int lapb_connect_request(struct net_device *dev) lapb_establish_data_link(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev); lapb->state = LAPB_STATE_1; rc = LAPB_OK; @@ -305,12 +305,8 @@ int lapb_disconnect_request(struct net_device *dev) goto out_put; case LAPB_STATE_1: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev); + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -329,12 +325,8 @@ int lapb_disconnect_request(struct net_device *dev) lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_2; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev); + lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev); rc = LAPB_OK; out_put: diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index f4e3c1accab..5dba899131b 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c @@ -15,6 +15,8 @@ * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> @@ -44,25 +46,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -78,18 +71,11 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -102,22 +88,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; @@ -137,68 +117,45 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; @@ -212,14 +169,9 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -242,34 +194,22 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, switch (frame->type) { case LAPB_SABM: case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); @@ -278,14 +218,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); @@ -297,12 +232,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, case LAPB_REJ: case LAPB_RNR: case LAPB_RR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", + lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n", lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); @@ -325,22 +257,15 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -355,15 +280,10 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -375,23 +295,16 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_requeue_frames(lapb); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_start_t1timer(lapb); @@ -401,13 +314,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -416,10 +324,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_RNR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -428,9 +334,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -439,10 +343,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_RR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -451,9 +353,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -462,10 +362,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_REJ: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -477,9 +375,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -488,17 +384,13 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_I: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n", - lapb->dev, frame->pf, frame->ns, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n", + lapb->dev, frame->pf, frame->ns, frame->nr); if (!lapb_validate_nr(lapb, frame->nr)) { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -522,7 +414,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, * a frame lost on the wire. */ if (cn == NET_RX_DROP) { - printk(KERN_DEBUG "LAPB: rx congestion\n"); + pr_debug("rx congestion\n"); break; } lapb->vr = (lapb->vr + 1) % modulus; @@ -541,11 +433,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, if (frame->pf) lapb_enquiry_response(lapb); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG - "lapb: (%p) S3 TX REJ(%d) R%d\n", - lapb->dev, frame->pf, lapb->vr); -#endif + lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n", + lapb->dev, frame->pf, lapb->vr); lapb->condition |= LAPB_REJECT_CONDITION; lapb_send_control(lapb, LAPB_REJ, frame->pf, LAPB_RESPONSE); @@ -555,31 +444,22 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_FRMR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X " - "%02X %02X %02X %02X\n", lapb->dev, frame->pf, - skb->data[0], skb->data[1], skb->data[2], - skb->data[3], skb->data[4]); -#endif + lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", + lapb->dev, frame->pf, + skb->data[0], skb->data[1], skb->data[2], + skb->data[3], skb->data[4]); lapb_establish_data_link(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); lapb_requeue_frames(lapb); lapb->state = LAPB_STATE_1; break; case LAPB_ILLEGAL: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf); lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_W; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -600,25 +480,16 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S4 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -634,18 +505,11 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S4 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -658,10 +522,8 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index baab2760f65..ba4d015bd1a 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c @@ -14,6 +14,8 @@ * LAPB 002 Jonathan Naylor New timer architecture. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> @@ -60,10 +62,8 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll *frame |= lapb->vs << 1; } -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n", - lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n", + lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); lapb_transmit_buffer(lapb, skb, LAPB_COMMAND); } @@ -148,11 +148,9 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) } } -#if LAPB_DEBUG > 2 - printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); -#endif + lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[0], skb->data[1], skb->data[2]); if (!lapb_data_transmit(lapb, skb)) kfree_skb(skb); @@ -164,16 +162,10 @@ void lapb_establish_data_link(struct lapb_cb *lapb) lapb->n2count = 0; if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n", - lapb->dev, lapb->state); -#endif + lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n", - lapb->dev, lapb->state); -#endif + lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } @@ -183,10 +175,8 @@ void lapb_establish_data_link(struct lapb_cb *lapb) void lapb_enquiry_response(struct lapb_cb *lapb) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n", - lapb->dev, lapb->state, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n", + lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE); @@ -195,10 +185,8 @@ void lapb_enquiry_response(struct lapb_cb *lapb) void lapb_timeout_response(struct lapb_cb *lapb) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n", - lapb->dev, lapb->state, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n", + lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index 066225b4e82..9d0a426eccb 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c @@ -13,6 +13,8 @@ * LAPB 001 Jonathan Naylor Started Coding */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> @@ -111,11 +113,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, { frame->type = LAPB_ILLEGAL; -#if LAPB_DEBUG > 2 - printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); -#endif + lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[0], skb->data[1], skb->data[2]); /* We always need to look at 2 bytes, sometimes we need * to look at 3 and those cases are handled below. @@ -284,12 +284,10 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5]); -#endif + lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[1], skb->data[2], skb->data[3], + skb->data[4], skb->data[5]); } else { dptr = skb_put(skb, 4); *dptr++ = LAPB_FRMR; @@ -301,11 +299,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n", - lapb->dev, lapb->state, skb->data[1], - skb->data[2], skb->data[3]); -#endif + lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", + lapb->dev, lapb->state, skb->data[1], + skb->data[2], skb->data[3]); } lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c index f8cd641dfc8..54563ad8aeb 100644 --- a/net/lapb/lapb_timer.c +++ b/net/lapb/lapb_timer.c @@ -14,6 +14,8 @@ * LAPB 002 Jonathan Naylor New timer architecture. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> @@ -105,21 +107,17 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); return; } else { lapb->n2count++; if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX SABME(1)\n", + lapb->dev); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX SABM(1)\n", + lapb->dev); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } } @@ -133,15 +131,11 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); return; } else { lapb->n2count++; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); } break; @@ -155,9 +149,7 @@ static void lapb_t1timer_expiry(unsigned long param) lapb->state = LAPB_STATE_0; lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); return; } else { lapb->n2count++; @@ -173,9 +165,7 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev); return; } else { lapb->n2count++; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b9bef2c7502..fe5453c3e71 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -71,8 +71,7 @@ static inline u16 llc_ui_next_link_no(int sap) */ static inline __be16 llc_proto_type(u16 arphrd) { - return arphrd == ARPHRD_IEEE802_TR ? - htons(ETH_P_TR_802_2) : htons(ETH_P_802_2); + return htons(ETH_P_802_2); } /** @@ -518,7 +517,7 @@ static int llc_ui_listen(struct socket *sock, int backlog) if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; - if (!(unsigned)backlog) /* BSDism */ + if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { @@ -806,10 +805,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, sk_wait_data(sk, &timeo); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { - if (net_ratelimit()) - printk(KERN_DEBUG "LLC(%s:%d): Application " - "bug, race in MSG_PEEK.\n", - current->comm, task_pid_nr(current)); + net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", + current->comm, + task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; @@ -840,7 +838,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } @@ -863,7 +861,7 @@ copy_uaddr: if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index ba137a6a224..0d0d416dfab 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); - if (sk_add_backlog(sk, skb)) + if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) goto drop_unlock; } out: diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index b658cba89fd..2dae8a5df23 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -14,9 +14,7 @@ */ #include <linux/if_arp.h> -#include <linux/if_tr.h> #include <linux/netdevice.h> -#include <linux/trdevice.h> #include <linux/skbuff.h> #include <linux/export.h> #include <net/llc.h> @@ -37,7 +35,6 @@ int llc_mac_hdr_init(struct sk_buff *skb, int rc = -EINVAL; switch (skb->dev->type) { - case ARPHRD_IEEE802_TR: case ARPHRD_ETHER: case ARPHRD_LOOPBACK: rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa, diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 94e7fca75b8..7c5073badc7 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -31,10 +31,6 @@ static int llc_mac_header_len(unsigned short devtype) case ARPHRD_ETHER: case ARPHRD_LOOPBACK: return sizeof(struct ethhdr); -#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) - case ARPHRD_IEEE802_TR: - return sizeof(struct trh_hdr); -#endif } return 0; } diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index e2ebe358626..d75306b9c2f 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/sysctl.h> +#include <net/net_namespace.h> #include <net/llc.h> #ifndef CONFIG_SYSCTL @@ -56,48 +57,29 @@ static struct ctl_table llc_station_table[] = { { }, }; -static struct ctl_table llc2_dir_timeout_table[] = { - { - .procname = "timeout", - .mode = 0555, - .child = llc2_timeout_table, - }, - { }, -}; - -static struct ctl_table llc_table[] = { - { - .procname = "llc2", - .mode = 0555, - .child = llc2_dir_timeout_table, - }, - { - .procname = "station", - .mode = 0555, - .child = llc_station_table, - }, - { }, -}; - -static struct ctl_path llc_path[] = { - { .procname = "net", }, - { .procname = "llc", }, - { } -}; - -static struct ctl_table_header *llc_table_header; +static struct ctl_table_header *llc2_timeout_header; +static struct ctl_table_header *llc_station_header; int __init llc_sysctl_init(void) { - llc_table_header = register_sysctl_paths(llc_path, llc_table); + llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table); + llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table); - return llc_table_header ? 0 : -ENOMEM; + if (!llc2_timeout_header || !llc_station_header) { + llc_sysctl_exit(); + return -ENOMEM; + } + return 0; } void llc_sysctl_exit(void) { - if (llc_table_header) { - unregister_sysctl_table(llc_table_header); - llc_table_header = NULL; + if (llc2_timeout_header) { + unregister_net_sysctl_table(llc2_timeout_header); + llc2_timeout_header = NULL; + } + if (llc_station_header) { + unregister_net_sysctl_table(llc_station_header); + llc_station_header = NULL; } } diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 96ddb72760b..8d249d70598 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -225,6 +225,17 @@ config MAC80211_VERBOSE_MHWMP_DEBUG Do not select this option. +config MAC80211_VERBOSE_MESH_SYNC_DEBUG + bool "Verbose mesh mesh synchronization debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + ---help--- + Selecting this option causes mac80211 to print out very verbose mesh + synchronization debugging messages (when mac80211 is taking part in a + mesh network). + + Do not select this option. + config MAC80211_VERBOSE_TDLS_DEBUG bool "Verbose TDLS debugging" depends on MAC80211_DEBUG_MENU diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 1be7a454aa7..3e9d931bba3 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -38,7 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \ mesh.o \ mesh_pathtbl.o \ mesh_plink.o \ - mesh_hwmp.o + mesh_hwmp.o \ + mesh_sync.o mac80211-$(CONFIG_PM) += pm.o diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 64d3ce5ea1a..26ddb699d69 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -142,6 +142,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); + struct tid_ampdu_rx *tid_rx; + unsigned long timeout; + + tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); + if (!tid_rx) + return; + + timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_rx->session_timer, timeout); + return; + } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); @@ -248,11 +260,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, (buf_size > IEEE80211_MAX_AMPDU_BUF)) { status = WLAN_STATUS_INVALID_QOS_PARAM; #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "AddBA Req with bad params from " - "%pM on tid %u. policy %d, buffer size %d\n", - mgmt->sa, tid, ba_policy, - buf_size); + net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", + mgmt->sa, tid, ba_policy, buf_size); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end_no_lock; } @@ -269,10 +278,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, if (sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "unexpected AddBA Req from " - "%pM on tid %u\n", - mgmt->sa, tid); + net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n", + mgmt->sa, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ /* delete existing Rx BA session on the same tid */ @@ -291,7 +298,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, /* rx timer */ tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_agg_rx->session_timer); + init_timer_deferrable(&tid_agg_rx->session_timer); /* rx reorder timer */ tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; @@ -335,8 +342,10 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, /* activate it for RX */ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); - if (timeout) + if (timeout) { mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); + tid_agg_rx->last_rx = jiffies; + } end: mutex_unlock(&sta->ampdu_mlme.mtx); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 76be6174419..5b7053c5873 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -286,25 +286,25 @@ static inline int ieee80211_ac_from_tid(int tid) * a global "agg_queue_stop" refcount. */ static void __acquires(agg_queue) -ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) +ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { - int queue = ieee80211_ac_from_tid(tid); + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; - if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) + if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) ieee80211_stop_queue_by_reason( - &local->hw, queue, + &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __acquire(agg_queue); } static void __releases(agg_queue) -ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) +ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { - int queue = ieee80211_ac_from_tid(tid); + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; - if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) + if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) ieee80211_wake_queue_by_reason( - &local->hw, queue, + &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __release(agg_queue); } @@ -314,13 +314,14 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) * requires a call to ieee80211_agg_splice_finish later */ static void __acquires(agg_queue) -ieee80211_agg_splice_packets(struct ieee80211_local *local, +ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_tx *tid_tx, u16 tid) { - int queue = ieee80211_ac_from_tid(tid); + struct ieee80211_local *local = sdata->local; + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; unsigned long flags; - ieee80211_stop_queue_agg(local, tid); + ieee80211_stop_queue_agg(sdata, tid); if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" " from the pending queue\n", tid)) @@ -336,9 +337,9 @@ ieee80211_agg_splice_packets(struct ieee80211_local *local, } static void __releases(agg_queue) -ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) +ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) { - ieee80211_wake_queue_agg(local, tid); + ieee80211_wake_queue_agg(sdata, tid); } void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) @@ -376,9 +377,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) " tid %d\n", tid); #endif spin_lock_bh(&sta->lock); - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sdata, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sdata, tid); spin_unlock_bh(&sta->lock); kfree_rcu(tid_tx, rcu_head); @@ -417,6 +418,18 @@ static void sta_tx_agg_session_timer_expired(unsigned long data) u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); + struct tid_ampdu_tx *tid_tx; + unsigned long timeout; + + tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid); + if (!tid_tx) + return; + + timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_tx->session_timer, timeout); + return; + } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); @@ -542,7 +555,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, /* tx timer */ tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_tx->session_timer); + init_timer_deferrable(&tid_tx->session_timer); /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; @@ -586,14 +599,14 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, */ spin_lock_bh(&sta->lock); - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* * Now mark as operational. This will be visible * in the TX path, and lets it go lock-free in * the common case. */ set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sta->sdata, tid); spin_unlock_bh(&sta->lock); } @@ -778,12 +791,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) * more. */ - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* future packets must not find the tid_tx struct any more */ ieee80211_assign_tid_tx(sta, tid, NULL); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sta->sdata, tid); kfree_rcu(tid_tx, rcu_head); @@ -884,9 +897,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, sta->ampdu_mlme.addba_req_num[tid] = 0; - if (tid_tx->timeout) + if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); + tid_tx->last_tx = jiffies; + } } else { ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 677d6592978..495831ee48f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -412,6 +412,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->llid = le16_to_cpu(sta->llid); sinfo->plid = le16_to_cpu(sta->plid); sinfo->plink_state = sta->plink_state; + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + sinfo->filled |= STATION_INFO_T_OFFSET; + sinfo->t_offset = sta->t_offset; + } #endif } @@ -446,6 +450,180 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); } +static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", "wep_weak_iv_count", + "rx_duplicates", "rx_fragments", "rx_dropped", + "tx_packets", "tx_bytes", "tx_fragments", + "tx_filtered", "tx_retry_failed", "tx_retries", + "beacon_loss", "sta_state", "txrate", "rxrate", "signal", + "channel", "noise", "ch_time", "ch_time_busy", + "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" +}; +#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) + +static int ieee80211_get_et_sset_count(struct wiphy *wiphy, + struct net_device *dev, + int sset) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int rv = 0; + + if (sset == ETH_SS_STATS) + rv += STA_STATS_LEN; + + rv += drv_get_et_sset_count(sdata, sset); + + if (rv == 0) + return -EOPNOTSUPP; + return rv; +} + +static void ieee80211_get_et_stats(struct wiphy *wiphy, + struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + struct ieee80211_local *local = sdata->local; + struct station_info sinfo; + struct survey_info survey; + int i, q; +#define STA_STATS_SURVEY_LEN 7 + + memset(data, 0, sizeof(u64) * STA_STATS_LEN); + +#define ADD_STA_STATS(sta) \ + do { \ + data[i++] += sta->rx_packets; \ + data[i++] += sta->rx_bytes; \ + data[i++] += sta->wep_weak_iv_count; \ + data[i++] += sta->num_duplicates; \ + data[i++] += sta->rx_fragments; \ + data[i++] += sta->rx_dropped; \ + \ + data[i++] += sta->tx_packets; \ + data[i++] += sta->tx_bytes; \ + data[i++] += sta->tx_fragments; \ + data[i++] += sta->tx_filtered_count; \ + data[i++] += sta->tx_retry_failed; \ + data[i++] += sta->tx_retry_count; \ + data[i++] += sta->beacon_loss_count; \ + } while (0) + + /* For Managed stations, find the single station based on BSSID + * and use that. For interface types, iterate through all available + * stations and add stats for any station that is assigned to this + * network device. + */ + + rcu_read_lock(); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); + + if (!(sta && !WARN_ON(sta->sdata->dev != dev))) + goto do_survey; + + i = 0; + ADD_STA_STATS(sta); + + data[i++] = sta->sta_state; + + sinfo.filled = 0; + sta_set_sinfo(sta, &sinfo); + + if (sinfo.filled | STATION_INFO_TX_BITRATE) + data[i] = 100000 * + cfg80211_calculate_bitrate(&sinfo.txrate); + i++; + if (sinfo.filled | STATION_INFO_RX_BITRATE) + data[i] = 100000 * + cfg80211_calculate_bitrate(&sinfo.rxrate); + i++; + + if (sinfo.filled | STATION_INFO_SIGNAL_AVG) + data[i] = (u8)sinfo.signal_avg; + i++; + } else { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + /* Make sure this station belongs to the proper dev */ + if (sta->sdata->dev != dev) + continue; + + i = 0; + ADD_STA_STATS(sta); + } + } + +do_survey: + i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; + /* Get survey stats for current channel */ + q = 0; + while (true) { + survey.filled = 0; + if (drv_get_survey(local, q, &survey) != 0) { + survey.filled = 0; + break; + } + + if (survey.channel && + (local->oper_channel->center_freq == + survey.channel->center_freq)) + break; + q++; + } + + if (survey.filled) + data[i++] = survey.channel->center_freq; + else + data[i++] = 0; + if (survey.filled & SURVEY_INFO_NOISE_DBM) + data[i++] = (u8)survey.noise; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME) + data[i++] = survey.channel_time; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY) + data[i++] = survey.channel_time_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) + data[i++] = survey.channel_time_ext_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX) + data[i++] = survey.channel_time_rx; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX) + data[i++] = survey.channel_time_tx; + else + data[i++] = -1LL; + + rcu_read_unlock(); + + if (WARN_ON(i != STA_STATS_LEN)) + return; + + drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); +} + +static void ieee80211_get_et_strings(struct wiphy *wiphy, + struct net_device *dev, + u32 sset, u8 *data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int sz_sta_stats = 0; + + if (sset == ETH_SS_STATS) { + sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); + memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); + } + drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); +} static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) @@ -640,6 +818,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, ieee80211_bss_info_change_notify(sdata, changed); + netif_carrier_on(dev); + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_on(vlan->dev); + return 0; } @@ -665,7 +847,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) { - struct ieee80211_sub_if_data *sdata; + struct ieee80211_sub_if_data *sdata, *vlan; struct beacon_data *old; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -674,6 +856,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) if (!old) return -ENOENT; + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_off(vlan->dev); + netif_carrier_off(dev); + RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); kfree_rcu(old, rcu_head); @@ -907,7 +1093,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, } else sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (compare_ether_addr(mac, sdata->vif.addr) == 0) + if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL; if (is_multicast_ether_addr(mac)) @@ -993,6 +1179,9 @@ static int ieee80211_change_station(struct wiphy *wiphy, } if (params->vlan && params->vlan != sta->sdata->dev) { + bool prev_4addr = false; + bool new_4addr = false; + vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && @@ -1008,9 +1197,25 @@ static int ieee80211_change_station(struct wiphy *wiphy, } rcu_assign_pointer(vlansdata->u.vlan.sta, sta); + new_4addr = true; + } + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + sta->sdata->u.vlan.sta) { + rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL); + prev_4addr = true; } sta->sdata = vlansdata; + + if (sta->sta_state == IEEE80211_STA_AUTHORIZED && + prev_4addr != new_4addr) { + if (new_4addr) + atomic_dec(&sta->sdata->bss->num_mcast_sta); + else + atomic_inc(&sta->sdata->bss->num_mcast_sta); + } + ieee80211_send_layer2_update(sta); } @@ -1235,6 +1440,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); + ifmsh->mesh_sp_id = setup->sync_method; ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->security = IEEE80211_MESH_SEC_NONE; @@ -1279,6 +1485,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, conf->dot11MeshTTL = nconf->element_ttl; if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) conf->auto_open_plinks = nconf->auto_open_plinks; + if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) + conf->dot11MeshNbrOffsetMaxNeighbor = + nconf->dot11MeshNbrOffsetMaxNeighbor; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) conf->dot11MeshHWMPmaxPREQretries = nconf->dot11MeshHWMPmaxPREQretries; @@ -1329,6 +1538,11 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, return -ENOTSUPP; conf->rssi_threshold = nconf->rssi_threshold; } + if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) { + conf->ht_opmode = nconf->ht_opmode; + sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); + } return 0; } @@ -1437,6 +1651,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, if (!local->ops->conf_tx) return -EOPNOTSUPP; + if (local->hw.queues < IEEE80211_NUM_ACS) + return -EOPNOTSUPP; + memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; @@ -1449,14 +1666,11 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, */ p.uapsd = false; - if (params->queue >= local->hw.queues) - return -EINVAL; - - sdata->tx_conf[params->queue] = p; - if (drv_conf_tx(local, sdata, params->queue, &p)) { + sdata->tx_conf[params->ac] = p; + if (drv_conf_tx(local, sdata, params->ac, &p)) { wiphy_debug(local->hw.wiphy, - "failed to set TX queue parameters for queue %d\n", - params->queue); + "failed to set TX queue parameters for AC %d\n", + params->ac); return -EINVAL; } @@ -2090,6 +2304,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, IEEE80211_SKB_CB(skb)->flags = flags; + if (flags & IEEE80211_TX_CTL_TX_OFFCHAN) + IEEE80211_SKB_CB(skb)->hw_queue = + local->hw.offchannel_tx_hw_queue; + skb->dev = sdata->dev; *cookie = (unsigned long) skb; @@ -2131,6 +2349,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, /* modify cookie to prevent API mismatches */ *cookie ^= 2; IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; + IEEE80211_SKB_CB(skb)->hw_queue = + local->hw.offchannel_tx_hw_queue; local->hw_roc_skb = skb; local->hw_roc_skb_for_status = skb; mutex_unlock(&local->mtx); @@ -2350,8 +2570,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->u.setup_req.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; case WLAN_TDLS_SETUP_RESPONSE: @@ -2364,8 +2584,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->u.setup_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; case WLAN_TDLS_SETUP_CONFIRM: @@ -2425,8 +2645,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; default: @@ -2666,13 +2886,22 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, } static struct ieee80211_channel * -ieee80211_wiphy_get_channel(struct wiphy *wiphy) +ieee80211_wiphy_get_channel(struct wiphy *wiphy, + enum nl80211_channel_type *type) { struct ieee80211_local *local = wiphy_priv(wiphy); + *type = local->_oper_channel_type; return local->oper_channel; } +#ifdef CONFIG_PM +static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled) +{ + drv_set_wakeup(wiphy_priv(wiphy), enabled); +} +#endif + struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -2741,4 +2970,10 @@ struct cfg80211_ops mac80211_config_ops = { .probe_client = ieee80211_probe_client, .get_channel = ieee80211_wiphy_get_channel, .set_noack_map = ieee80211_set_noack_map, +#ifdef CONFIG_PM + .set_wakeup = ieee80211_set_wakeup, +#endif + .get_et_sset_count = ieee80211_get_et_sset_count, + .get_et_stats = ieee80211_get_et_stats, + .get_et_strings = ieee80211_get_et_strings, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e00ce8c3e28..c76cf7230c7 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -135,29 +135,3 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local, return result; } - -/* - * ieee80211_get_tx_channel_type returns the channel type we should - * use for packet transmission, given the channel capability and - * whatever regulatory flags we have been given. - */ -enum nl80211_channel_type ieee80211_get_tx_channel_type( - struct ieee80211_local *local, - enum nl80211_channel_type channel_type) -{ - switch (channel_type) { - case NL80211_CHAN_HT40PLUS: - if (local->hw.conf.channel->flags & - IEEE80211_CHAN_NO_HT40PLUS) - return NL80211_CHAN_HT20; - break; - case NL80211_CHAN_HT40MINUS: - if (local->hw.conf.channel->flags & - IEEE80211_CHAN_NO_HT40MINUS) - return NL80211_CHAN_HT20; - break; - default: - break; - } - return channel_type; -} diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 30f99c34484..ea0122dbd2b 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -394,7 +394,7 @@ static ssize_t ieee80211_if_parse_uapsd_max_sp_len( __IEEE80211_IF_FILE_W(uapsd_max_sp_len); /* AP attributes */ -IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); +IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); @@ -424,6 +424,7 @@ static ssize_t ieee80211_if_parse_tsf( struct ieee80211_local *local = sdata->local; unsigned long long tsf; int ret; + int tsf_is_delta = 0; if (strncmp(buf, "reset", 5) == 0) { if (local->ops->reset_tsf) { @@ -431,9 +432,20 @@ static ssize_t ieee80211_if_parse_tsf( wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); } } else { + if (buflen > 10 && buf[1] == '=') { + if (buf[0] == '+') + tsf_is_delta = 1; + else if (buf[0] == '-') + tsf_is_delta = -1; + else + return -EINVAL; + buf += 2; + } ret = kstrtoull(buf, 10, &tsf); if (ret < 0) return -EINVAL; + if (tsf_is_delta) + tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; if (local->ops->set_tsf) { drv_set_tsf(local, sdata, tsf); wiphy_info(local->hw.wiphy, @@ -499,26 +511,23 @@ IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); #endif - -#define DEBUGFS_ADD(name) \ - debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ - sdata, &name##_ops); - #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->debugfs.dir, \ sdata, &name##_ops); -static void add_sta_files(struct ieee80211_sub_if_data *sdata) +#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) + +static void add_common_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); DEBUGFS_ADD(rc_rateidx_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mask_5ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); +} +static void add_sta_files(struct ieee80211_sub_if_data *sdata) +{ DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); DEBUGFS_ADD(last_beacon); @@ -531,16 +540,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) static void add_ap_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - - DEBUGFS_ADD(num_sta_authorized); + DEBUGFS_ADD(num_mcast_sta); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); @@ -549,48 +549,14 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) static void add_ibss_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - DEBUGFS_ADD_MODE(tsf, 0600); } static void add_wds_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - DEBUGFS_ADD(peer); } -static void add_vlan_files(struct ieee80211_sub_if_data *sdata) -{ - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); -} - -static void add_monitor_files(struct ieee80211_sub_if_data *sdata) -{ - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); -} - #ifdef CONFIG_MAC80211_MESH static void add_mesh_files(struct ieee80211_sub_if_data *sdata) @@ -651,6 +617,13 @@ static void add_files(struct ieee80211_sub_if_data *sdata) if (!sdata->debugfs.dir) return; + DEBUGFS_ADD(flags); + DEBUGFS_ADD(state); + DEBUGFS_ADD(channel_type); + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + add_common_files(sdata); + switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: #ifdef CONFIG_MAC80211_MESH @@ -671,12 +644,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata) case NL80211_IFTYPE_WDS: add_wds_files(sdata); break; - case NL80211_IFTYPE_MONITOR: - add_monitor_files(sdata); - break; - case NL80211_IFTYPE_AP_VLAN: - add_vlan_files(sdata); - break; default: break; } diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 832b2da5e4c..5ccec2c1e9f 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -63,7 +63,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), TEST(SHORT_PREAMBLE), @@ -71,7 +71,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), - TEST(INSERTED), TEST(RATE_CONTROL)); + TEST(INSERTED), TEST(RATE_CONTROL), + TEST(TOFFSET_KNOWN)); #undef TEST return simple_read_from_buffer(userbuf, count, ppos, buf, res); } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index af4691fed64..6d33a0c743a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -7,7 +7,9 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) { - WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); + WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), + "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", + sdata->dev->name, sdata->flags); } static inline struct ieee80211_sub_if_data * @@ -33,6 +35,43 @@ static inline void drv_tx_frags(struct ieee80211_local *local, local->ops->tx_frags(&local->hw, vif, sta, skbs); } +static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, + u32 sset, u8 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_strings) { + trace_drv_get_et_strings(local, sset); + local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data); + trace_drv_return_void(local); + } +} + +static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_stats) { + trace_drv_get_et_stats(local); + local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data); + trace_drv_return_void(local); + } +} + +static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, + int sset) +{ + struct ieee80211_local *local = sdata->local; + int rv = 0; + if (local->ops->get_et_sset_count) { + trace_drv_get_et_sset_count(local, sset); + rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif, + sset); + trace_drv_return_int(local, rv); + } + return rv; +} + static inline int drv_start(struct ieee80211_local *local) { int ret; @@ -89,6 +128,19 @@ static inline int drv_resume(struct ieee80211_local *local) trace_drv_return_int(local, ret); return ret; } + +static inline void drv_set_wakeup(struct ieee80211_local *local, + bool enabled) +{ + might_sleep(); + + if (!local->ops->set_wakeup) + return; + + trace_drv_set_wakeup(local, enabled); + local->ops->set_wakeup(&local->hw, enabled); + trace_drv_return_void(local); +} #endif static inline int drv_add_interface(struct ieee80211_local *local, @@ -99,7 +151,8 @@ static inline int drv_add_interface(struct ieee80211_local *local, might_sleep(); if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MONITOR)) + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)))) return -EINVAL; trace_drv_add_interface(local, sdata); @@ -474,8 +527,23 @@ int drv_sta_state(struct ieee80211_local *local, return ret; } +static inline void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed) +{ + sdata = get_bss_sdata(sdata); + check_sdata_in_driver(sdata); + + trace_drv_sta_rc_update(local, sdata, sta, changed); + if (local->ops->sta_rc_update) + local->ops->sta_rc_update(&local->hw, &sdata->vif, + sta, changed); + + trace_drv_return_void(local); +} + static inline int drv_conf_tx(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, u16 queue, + struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params) { int ret = -EOPNOTSUPP; @@ -484,10 +552,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, check_sdata_in_driver(sdata); - trace_drv_conf_tx(local, sdata, queue, params); + trace_drv_conf_tx(local, sdata, ac, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, - queue, params); + ac, params); trace_drv_return_int(local, ret); return ret; } diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 21d6f5290a1..6de00b2c268 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -161,6 +161,21 @@ DEFINE_EVENT(local_only_evt, drv_start, TP_ARGS(local) ); +DEFINE_EVENT(local_u32_evt, drv_get_et_strings, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_only_evt, drv_get_et_stats, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + DEFINE_EVENT(local_only_evt, drv_suspend, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) @@ -171,6 +186,20 @@ DEFINE_EVENT(local_only_evt, drv_resume, TP_ARGS(local) ); +TRACE_EVENT(drv_set_wakeup, + TP_PROTO(struct ieee80211_local *local, bool enabled), + TP_ARGS(local, enabled), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, enabled) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->enabled = enabled; + ), + TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled) +); + DEFINE_EVENT(local_only_evt, drv_stop, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) @@ -624,6 +653,34 @@ TRACE_EVENT(drv_sta_state, ) ); +TRACE_EVENT(drv_sta_rc_update, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u32 changed), + + TP_ARGS(local, sdata, sta, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, changed) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->changed = changed; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed + ) +); + TRACE_EVENT(drv_sta_add, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -677,15 +734,14 @@ TRACE_EVENT(drv_sta_remove, TRACE_EVENT(drv_conf_tx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - u16 queue, - const struct ieee80211_tx_queue_params *params), + u16 ac, const struct ieee80211_tx_queue_params *params), - TP_ARGS(local, sdata, queue, params), + TP_ARGS(local, sdata, ac, params), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY - __field(u16, queue) + __field(u16, ac) __field(u16, txop) __field(u16, cw_min) __field(u16, cw_max) @@ -696,7 +752,7 @@ TRACE_EVENT(drv_conf_tx, TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; - __entry->queue = queue; + __entry->ac = ac; __entry->txop = params->txop; __entry->cw_max = params->cw_max; __entry->cw_min = params->cw_min; @@ -705,8 +761,8 @@ TRACE_EVENT(drv_conf_tx, ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT " queue:%d", - LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue + LOCAL_PR_FMT VIF_PR_FMT " AC:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac ) ); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index f25fff7607d..6f8615c54b2 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -19,15 +19,6 @@ #include "ieee80211_i.h" #include "rate.h" -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata) -{ - const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40); - if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) && - !(sdata->u.mgd.ht_capa.cap_info & flg)) - return true; - return false; -} - static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap, u16 flag) @@ -315,10 +306,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", - mgmt->sa, initiator ? "initiator" : "recipient", tid, - le16_to_cpu(mgmt->u.action.u.delba.reason_code)); + net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n", + mgmt->sa, initiator ? "initiator" : "recipient", + tid, + le16_to_cpu(mgmt->u.action.u.delba.reason_code)); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (initiator == WLAN_BACK_INITIATOR) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index cef7c29214a..3ad33a82462 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -66,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, skb_reset_tail_pointer(skb); skb_reserve(skb, sdata->local->hw.extra_tx_headroom); - if (compare_ether_addr(ifibss->bssid, bssid)) + if (!ether_addr_equal(ifibss->bssid, bssid)) sta_info_flush(sdata->local, sdata); /* if merging, indicate to driver that we leave the old IBSS */ @@ -160,16 +160,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, if (channel_type && sband->ht_cap.ht_supported) { pos = skb_put(skb, 4 + sizeof(struct ieee80211_ht_cap) + - sizeof(struct ieee80211_ht_info)); + sizeof(struct ieee80211_ht_operation)); pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); - pos = ieee80211_ie_build_ht_info(pos, - &sband->ht_cap, - chan, - channel_type); + pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, + chan, channel_type, 0); } - if (local->hw.queues >= 4) { + if (local->hw.queues >= IEEE80211_NUM_ACS) { pos = skb_put(skb, 9); *pos++ = WLAN_EID_VENDOR_SPECIFIC; *pos++ = 7; /* len */ @@ -305,9 +303,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", - sdata->name, addr); + net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); rcu_read_lock(); return NULL; } @@ -317,7 +314,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, return NULL; } - if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) { + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); return NULL; } @@ -403,14 +400,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, return; if (sdata->vif.type == NL80211_IFTYPE_ADHOC && - compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) { + ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (elems->supp_rates) { supp_rates = ieee80211_sta_get_rates(local, elems, - band); + band, NULL); if (sta) { u32 prev_rates; @@ -441,13 +438,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, if (sta && elems->wmm_info) set_sta_flag(sta, WLAN_STA_WME); - if (sta && elems->ht_info_elem && elems->ht_cap_elem && + if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { /* we both use HT */ struct ieee80211_sta_ht_cap sta_ht_cap_new; enum nl80211_channel_type channel_type = - ieee80211_ht_info_to_channel_type( - elems->ht_info_elem); + ieee80211_ht_oper_to_channel_type( + elems->ht_operation); ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, @@ -508,7 +505,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, goto put_bss; /* same BSSID */ - if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0) + if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) goto put_bss; if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { @@ -560,7 +557,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, sdata->name, mgmt->bssid); #endif ieee80211_sta_join_ibss(sdata, bss); - supp_rates = ieee80211_sta_get_rates(local, elems, band); + supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates, true); rcu_read_unlock(); @@ -584,16 +581,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", - sdata->name, addr); + net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); return; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) return; - if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); @@ -831,7 +827,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; - if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 && + if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && !is_broadcast_ether_addr(mgmt->bssid)) return; @@ -1063,7 +1059,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 4 /* IBSS params */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + params->ie_len); if (!skb) return -ENOMEM; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index db8fae51714..3f3cd50fff1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -52,7 +52,8 @@ struct ieee80211_local; * increased memory use (about 2 kB of RAM per entry). */ #define IEEE80211_FRAGMENT_MAX 4 -#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) +#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) +#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) #define IEEE80211_DEFAULT_UAPSD_QUEUES \ (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ @@ -281,7 +282,7 @@ struct ieee80211_if_ap { u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; struct sk_buff_head ps_bc_buf; atomic_t num_sta_ps; /* number of stations in PS mode */ - atomic_t num_sta_authorized; /* number of authorized stations */ + atomic_t num_mcast_sta; /* number of stations receiving multicast */ int dtim_count; bool dtim_bc_mc; }; @@ -378,6 +379,7 @@ enum ieee80211_sta_flags { IEEE80211_STA_UAPSD_ENABLED = BIT(7), IEEE80211_STA_NULLFUNC_ACKED = BIT(8), IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), + IEEE80211_STA_DISABLE_40MHZ = BIT(10), }; struct ieee80211_mgd_auth_data { @@ -397,7 +399,7 @@ struct ieee80211_mgd_auth_data { struct ieee80211_mgd_assoc_data { struct cfg80211_bss *bss; const u8 *supp_rates; - const u8 *ht_information_ie; + const u8 *ht_operation_ie; unsigned long timeout; int tries; @@ -552,6 +554,24 @@ struct ieee80211_if_ibss { } state; }; +/** + * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface + * + * these declarations define the interface, which enables + * vendor-specific mesh synchronization + * + */ +struct ieee802_11_elems; +struct ieee80211_mesh_sync_ops { + void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status); + void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata); + /* add other framework functions here */ +}; + struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; @@ -600,6 +620,11 @@ struct ieee80211_if_mesh { IEEE80211_MESH_SEC_AUTHED = 0x1, IEEE80211_MESH_SEC_SECURED = 0x2, } security; + /* Extensible Synchronization Framework */ + struct ieee80211_mesh_sync_ops *sync_ops; + s64 sync_offset_clockdrift_max; + spinlock_t sync_offset_lock; + bool adjusting_tbtt; }; #ifdef CONFIG_MAC80211_MESH @@ -666,12 +691,6 @@ struct ieee80211_sub_if_data { char name[IFNAMSIZ]; - /* - * keep track of whether the HT opmode (stored in - * vif.bss_info.ht_operation_mode) is valid. - */ - bool ht_opmode_valid; - /* to detect idle changes */ bool old_idle; @@ -691,7 +710,7 @@ struct ieee80211_sub_if_data { __be16 control_port_protocol; bool control_port_no_encrypt; - struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES]; + struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; struct work_struct work; struct sk_buff_head skb_queue; @@ -761,7 +780,6 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_AGGREGATION, IEEE80211_QUEUE_STOP_REASON_SUSPEND, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE, }; #ifdef CONFIG_MAC80211_LEDS @@ -785,6 +803,8 @@ struct tpt_led_trigger { * well be on the operating channel * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to * determine if we are on the operating channel or not + * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating + * channel. This should not interrupt normal traffic. * @SCAN_COMPLETED: Set for our scan work function when the driver reported * that the scan completed. * @SCAN_ABORTED: Set for our scan work function when the driver reported @@ -793,6 +813,7 @@ struct tpt_led_trigger { enum { SCAN_SW_SCANNING, SCAN_HW_SCANNING, + SCAN_ONCHANNEL_SCANNING, SCAN_COMPLETED, SCAN_ABORTED, }; @@ -1082,6 +1103,9 @@ struct ieee80211_local { struct net_device napi_dev; struct napi_struct napi; + + /* virtual monitor interface */ + struct ieee80211_sub_if_data __rcu *monitor_sdata; }; static inline struct ieee80211_sub_if_data * @@ -1117,7 +1141,7 @@ struct ieee802_11_elems { u8 *wmm_info; u8 *wmm_param; struct ieee80211_ht_cap *ht_cap_elem; - struct ieee80211_ht_info *ht_info_elem; + struct ieee80211_ht_operation *ht_operation; struct ieee80211_meshconf_ie *mesh_config; u8 *mesh_id; u8 *peering; @@ -1171,7 +1195,7 @@ static inline struct ieee80211_local *hw_to_local( static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) { - return compare_ether_addr(raddr, addr) == 0 || + return ether_addr_equal(raddr, addr) || is_broadcast_ether_addr(raddr); } @@ -1239,6 +1263,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); +void ieee80211_run_deferred_scan(struct ieee80211_local *local); ieee80211_rx_result ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); @@ -1251,9 +1276,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee802_11_elems *elems, struct ieee80211_channel *channel, bool beacon); -struct ieee80211_bss * -ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, - u8 *ssid, u8 ssid_len); void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss); @@ -1299,7 +1321,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); /* HT */ -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata); void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap); void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, @@ -1383,7 +1404,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw) extern void *mac80211_wiphy_privid; /* for wiphy privid */ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type); -int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, +int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble); void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, struct ieee80211_hdr *hdr, const u8 *tsc, @@ -1429,13 +1450,17 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason); void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason); +void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); -void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs); void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, struct sk_buff_head *skbs, void (*fn)(void *data), void *data); +static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) +{ + ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); +} void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, @@ -1460,7 +1485,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, const u8 *supp_rates); u32 ieee80211_sta_get_rates(struct ieee80211_local *local, struct ieee802_11_elems *elems, - enum ieee80211_band band); + enum ieee80211_band band, u32 *basic_rates); int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); void ieee80211_recalc_smps(struct ieee80211_local *local); @@ -1470,10 +1495,10 @@ size_t ieee80211_ie_split(const u8 *ies, size_t ielen, size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type); +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type, + u16 prot_mode); /* internal work items */ void ieee80211_work_init(struct ieee80211_local *local); @@ -1501,10 +1526,7 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_channel_type chantype); enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); -enum nl80211_channel_type ieee80211_get_tx_channel_type( - struct ieee80211_local *local, - enum nl80211_channel_type channel_type); +ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index c20051b7ffc..856237c5c1f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -127,7 +127,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, * The remaining checks are only performed for interfaces * with the same MAC address. */ - if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr)) continue; /* @@ -149,6 +149,35 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, return 0; } +static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata) +{ + int n_queues = sdata->local->hw.queues; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == + IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= + n_queues)) + return -EINVAL; + } + + if ((sdata->vif.type != NL80211_IFTYPE_AP) || + !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) { + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; + } + + if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + + if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues)) + return -EINVAL; + + return 0; +} + void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset) { @@ -169,6 +198,81 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, #undef ADJUST } +static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; + else + sdata->vif.hw_queue[i] = i; + } + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; +} + +static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + int ret; + + if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) + return 0; + + if (local->monitor_sdata) + return 0; + + sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); + if (!sdata) + return -ENOMEM; + + /* set up data */ + sdata->local = local; + sdata->vif.type = NL80211_IFTYPE_MONITOR; + snprintf(sdata->name, IFNAMSIZ, "%s-monitor", + wiphy_name(local->hw.wiphy)); + + ieee80211_set_default_queues(sdata); + + ret = drv_add_interface(local, sdata); + if (WARN_ON(ret)) { + /* ok .. stupid driver, it asked for this! */ + kfree(sdata); + return ret; + } + + ret = ieee80211_check_queues(sdata); + if (ret) { + kfree(sdata); + return ret; + } + + rcu_assign_pointer(local->monitor_sdata, sdata); + + return 0; +} + +static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) + return; + + sdata = rtnl_dereference(local->monitor_sdata); + + if (!sdata) + return; + + rcu_assign_pointer(local->monitor_sdata, NULL); + synchronize_net(); + + drv_remove_interface(local, sdata); + + kfree(sdata); +} + /* * NOTE: Be very careful when changing this function, it must NOT return * an error on interface type changes that have been pre-checked, so most @@ -246,15 +350,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { - if (!local->open_count) - drv_stop(local); - return -EADDRNOTAVAIL; + res = -EADDRNOTAVAIL; + goto err_stop; } } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: - /* no need to tell driver */ + /* no need to tell driver, but set carrier */ + if (rtnl_dereference(sdata->bss->beacon)) + netif_carrier_on(dev); + else + netif_carrier_off(dev); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { @@ -262,6 +369,12 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; } + if (local->monitors == 0 && local->open_count == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; + } + /* must be before the call to ieee80211_configure_filter */ local->monitors++; if (local->monitors == 1) { @@ -276,9 +389,14 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; default: if (coming_up) { + ieee80211_del_virtual_monitor(local); + res = drv_add_interface(local, sdata); if (res) goto err_stop; + res = ieee80211_check_queues(sdata); + if (res) + goto err_del_interface; } if (sdata->vif.type == NL80211_IFTYPE_AP) { @@ -294,7 +412,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) ieee80211_bss_info_change_notify(sdata, changed); if (sdata->vif.type == NL80211_IFTYPE_STATION || - sdata->vif.type == NL80211_IFTYPE_ADHOC) + sdata->vif.type == NL80211_IFTYPE_ADHOC || + sdata->vif.type == NL80211_IFTYPE_AP) netif_carrier_off(dev); else netif_carrier_on(dev); @@ -366,6 +485,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) list_del(&sdata->u.vlan.list); + /* might already be clear but that doesn't matter */ clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } @@ -508,6 +628,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + ieee80211_del_virtual_monitor(local); } ieee80211_adjust_monitor_flags(sdata, -1); @@ -581,6 +702,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (local->monitors == local->open_count && local->monitors > 0) + ieee80211_add_virtual_monitor(local); } static int ieee80211_stop(struct net_device *dev) @@ -676,7 +800,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (skb->len < 4 || @@ -907,6 +1031,18 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, ieee80211_debugfs_add_netdev(sdata); } +static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata) +{ + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: + mesh_path_flush_by_iface(sdata); + break; + + default: + break; + } +} + static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { @@ -970,6 +1106,13 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, if (ret) type = sdata->vif.type; + /* + * Ignore return value here, there's not much we can do since + * the driver changed the interface type internally already. + * The warnings will hopefully make driver authors fix it :-) + */ + ieee80211_check_queues(sdata); + ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(sdata->dev, false); @@ -1133,11 +1276,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, struct net_device *ndev; struct ieee80211_sub_if_data *sdata = NULL; int ret, i; + int txqs = 1; ASSERT_RTNL(); + if (local->hw.queues >= IEEE80211_NUM_ACS) + txqs = IEEE80211_NUM_ACS; + ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, - name, ieee80211_if_setup, local->hw.queues, 1); + name, ieee80211_if_setup, txqs, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); @@ -1192,6 +1339,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sizeof(sdata->rc_rateidx_mcs_mask[i])); } + ieee80211_set_default_queues(sdata); + /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); @@ -1227,8 +1376,8 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); - if (ieee80211_vif_is_mesh(&sdata->vif)) - mesh_path_flush_by_iface(sdata); + /* clean up type-dependent data */ + ieee80211_clean_sdata(sdata); synchronize_rcu(); unregister_netdevice(sdata->dev); @@ -1249,8 +1398,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); - if (ieee80211_vif_is_mesh(&sdata->vif)) - mesh_path_flush_by_iface(sdata); + ieee80211_clean_sdata(sdata); unregister_netdevice_queue(sdata->dev, &unreg_list); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 16336480c63..b70f7f09da6 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -47,7 +47,8 @@ void ieee80211_configure_filter(struct ieee80211_local *local) if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; - if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning)) + if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) @@ -148,6 +149,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) } if (test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || test_bit(SCAN_HW_SCANNING, &local->scanning)) power = chan->max_power; else @@ -557,8 +559,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | - WIPHY_FLAG_OFFCHAN_TX | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + WIPHY_FLAG_OFFCHAN_TX; + + if (ops->remain_on_channel) + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features = NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_HT_IBSS; @@ -589,6 +593,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->user_power_level = -1; @@ -685,6 +690,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) WLAN_CIPHER_SUITE_AES_CMAC }; + if (hw->flags & IEEE80211_HW_QUEUE_CONTROL && + (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || + local->hw.offchannel_tx_hw_queue >= local->hw.queues)) + return -EINVAL; + if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) #ifdef CONFIG_PM && (!local->ops->suspend || !local->ops->resume) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e5fbb7cf356..0675a2fec6a 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -13,9 +13,6 @@ #include "ieee80211_i.h" #include "mesh.h" -#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 -#define MESHCONF_CAPAB_FORWARDING 0x08 - #define TMR_RUNNING_HK 0 #define TMR_RUNNING_MP 1 #define TMR_RUNNING_MPR 2 @@ -67,16 +64,19 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) /** * mesh_matches_local - check if the config of a mesh point matches ours * - * @ie: information elements of a management frame from the mesh peer * @sdata: local mesh subif + * @ie: information elements of a management frame from the mesh peer * * This function checks if the mesh configuration of a mesh point matches the * local mesh configuration, i.e. if both nodes belong to the same mesh network. */ -bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + u32 basic_rates = 0; + enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT; /* * As support for each feature is added, check for matching @@ -97,10 +97,21 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) goto mismatch; - /* disallow peering with mismatched channel types for now */ - if (ie->ht_info_elem && - (local->_oper_channel_type != - ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) + ieee80211_sta_get_rates(local, ie, local->oper_channel->band, + &basic_rates); + + if (sdata->vif.bss_conf.basic_rates != basic_rates) + goto mismatch; + + if (ie->ht_operation) + sta_channel_type = + ieee80211_ht_oper_to_channel_type(ie->ht_operation); + + /* Disallow HT40+/- mismatch */ + if (ie->ht_operation && + local->_oper_channel_type > NL80211_CHAN_HT20 && + sta_channel_type > NL80211_CHAN_HT20 && + local->_oper_channel_type != sta_channel_type) goto mismatch; return true; @@ -204,7 +215,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, kmem_cache_free(rm_cache, p); --entries; } else if ((seqnum == p->seqnum) && - (compare_ether_addr(sa, p->sa) == 0)) + (ether_addr_equal(sa, p->sa))) return -1; } @@ -251,8 +262,10 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) /* Mesh capability */ ifmsh->accepting_plinks = mesh_plink_availables(sdata); *pos = MESHCONF_CAPAB_FORWARDING; - *pos++ |= ifmsh->accepting_plinks ? + *pos |= ifmsh->accepting_plinks ? MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; + *pos++ |= ifmsh->adjusting_tbtt ? + MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; *pos++ = 0x00; return 0; @@ -371,7 +384,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb, return 0; } -int mesh_add_ht_info_ie(struct sk_buff *skb, +int mesh_add_ht_oper_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; @@ -385,11 +398,12 @@ int mesh_add_ht_info_ie(struct sk_buff *skb, if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) return 0; - if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation)) return -ENOMEM; - pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); - ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); + ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type, + sdata->vif.bss_conf.ht_operation_mode); return 0; } @@ -573,14 +587,24 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ieee80211_configure_filter(local); ifmsh->mesh_cc_id = 0; /* Disabled */ - ifmsh->mesh_sp_id = 0; /* Neighbor Offset */ ifmsh->mesh_auth_id = 0; /* Disabled */ + /* register sync ops from extensible synchronization framework */ + ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); + ifmsh->adjusting_tbtt = false; + ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); ieee80211_queue_work(&local->hw, &sdata->work); + sdata->vif.bss_conf.ht_operation_mode = + ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; + sdata->vif.bss_conf.basic_rates = + ieee80211_mandatory_rates(sdata->local, + sdata->local->hw.conf.channel->band); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); } @@ -616,16 +640,16 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status) { struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; struct ieee80211_channel *channel; - u32 supp_rates = 0; size_t baselen; int freq; enum ieee80211_band band = rx_status->band; /* ignore ProbeResp to foreign address */ if (stype == IEEE80211_STYPE_PROBE_RESP && - compare_ether_addr(mgmt->da, sdata->vif.addr)) + !ether_addr_equal(mgmt->da, sdata->vif.addr)) return; baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; @@ -650,10 +674,12 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, return; if (elems.mesh_id && elems.mesh_config && - mesh_matches_local(&elems, sdata)) { - supp_rates = ieee80211_sta_get_rates(local, &elems, band); - mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); - } + mesh_matches_local(sdata, &elems)) + mesh_neighbour_update(sdata, mgmt->sa, &elems); + + if (ifmsh->sync_ops) + ifmsh->sync_ops->rx_bcn_presp(sdata, + stype, mgmt, &elems, rx_status); } static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, @@ -721,6 +747,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) ieee80211_mesh_rootpath(sdata); + + if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) + mesh_sync_adjust_tbtt(sdata); } void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) @@ -761,4 +790,5 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) (unsigned long) sdata); INIT_LIST_HEAD(&ifmsh->preq_queue.list); spin_lock_init(&ifmsh->mesh_preq_queue_lock); + spin_lock_init(&ifmsh->sync_offset_lock); } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 8d53b71378e..e3642756f8f 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -19,6 +19,20 @@ /* Data structures */ /** + * enum mesh_config_capab_flags - mesh config IE capability flags + * + * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish + * additional mesh peerings with other mesh STAs + * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs + * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing + */ +enum mesh_config_capab_flags { + MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0), + MESHCONF_CAPAB_FORWARDING = BIT(3), + MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5), +}; + +/** * enum mesh_path_flags - mac80211 mesh path flags * * @@ -56,12 +70,15 @@ enum mesh_path_flags { * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to * grow * @MESH_WORK_ROOT: the mesh root station needs to send a frame + * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other + * mesh nodes */ enum mesh_deferred_task_flags { MESH_WORK_HOUSEKEEPING, MESH_WORK_GROW_MPATH_TABLE, MESH_WORK_GROW_MPP_TABLE, MESH_WORK_ROOT, + MESH_WORK_DRIFT_ADJUST, }; /** @@ -86,6 +103,7 @@ enum mesh_deferred_task_flags { * mpath itself. No need to take this lock when adding or removing * an mpath to a hash bucket on a path table. * @rann_snd_addr: the RANN sender address + * @rann_metric: the aggregated path metric towards the root node * @is_root: the destination station of this path is a root node * @is_gate: the destination station of this path is a mesh gate * @@ -112,6 +130,7 @@ struct mesh_path { enum mesh_path_flags flags; spinlock_t state_lock; u8 rann_snd_addr[ETH_ALEN]; + u32 rann_metric; bool is_root; bool is_gate; }; @@ -203,8 +222,8 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, char *addr6); int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, struct ieee80211_sub_if_data *sdata); -bool mesh_matches_local(struct ieee802_11_elems *ie, - struct ieee80211_sub_if_data *sdata); +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie); void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); @@ -220,7 +239,7 @@ int mesh_add_ds_params_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); int mesh_add_ht_cap_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); -int mesh_add_ht_info_ie(struct sk_buff *skb, +int mesh_add_ht_oper_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); @@ -232,6 +251,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); +struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); /* Mesh paths */ int mesh_nexthop_lookup(struct sk_buff *skb, @@ -256,9 +276,9 @@ int mesh_path_add_gate(struct mesh_path *mpath); int mesh_path_send_to_gates(struct mesh_path *mpath); int mesh_gate_num(struct ieee80211_sub_if_data *sdata); /* Mesh plinks */ -void mesh_neighbour_update(u8 *hw_addr, u32 rates, - struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *ie); +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, + struct ieee802_11_elems *ie); bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); void mesh_plink_broken(struct sta_info *sta); @@ -284,7 +304,6 @@ void mesh_pathtbl_unregister(void); int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); void mesh_path_timer(unsigned long data); void mesh_path_flush_by_nexthop(struct sta_info *sta); -void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); void mesh_path_discard_frame(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); @@ -325,6 +344,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); void mesh_plink_quiesce(struct sta_info *sta); void mesh_plink_restart(struct sta_info *sta); +void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); +void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); #else #define mesh_allocated 0 static inline void @@ -337,6 +358,8 @@ static inline void mesh_plink_quiesce(struct sta_info *sta) {} static inline void mesh_plink_restart(struct sta_info *sta) {} static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) { return false; } +static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) +{} #endif #endif /* IEEE80211S_H */ diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 1c6f3d02aeb..27e0c2f0679 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -86,8 +86,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) #define MSEC_TO_TU(x) (x*1000/1024) -#define SN_GT(x, y) ((long) (y) - (long) (x) < 0) -#define SN_LT(x, y) ((long) (x) - (long) (y) < 0) +#define SN_GT(x, y) ((s32)(y - x) < 0) +#define SN_LT(x, y) ((s32)(x - y) < 0) #define net_traversal_jiffies(s) \ msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) @@ -422,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, new_metric = MAX_METRIC; exp_time = TU_TO_EXP_TIME(orig_lifetime); - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) { + if (ether_addr_equal(orig_addr, sdata->vif.addr)) { /* This MP is the originator, we are not interested in this * frame, except for updating transmitter's path info. */ @@ -472,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, /* Update and check transmitter routing info */ ta = mgmt->sa; - if (compare_ether_addr(orig_addr, ta) == 0) + if (ether_addr_equal(orig_addr, ta)) fresh_info = false; else { fresh_info = true; @@ -533,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, mhwmp_dbg("received PREQ from %pM", orig_addr); - if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) { + if (ether_addr_equal(target_addr, sdata->vif.addr)) { mhwmp_dbg("PREQ is for us"); forward = false; reply = true; @@ -631,7 +631,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); orig_addr = PREP_IE_ORIG_ADDR(prep_elem); - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) + if (ether_addr_equal(orig_addr, sdata->vif.addr)) /* destination, no forwarding required */ return; @@ -709,7 +709,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, spin_lock_bh(&mpath->state_lock); sta = next_hop_deref_protected(mpath); if (mpath->flags & MESH_PATH_ACTIVE && - compare_ether_addr(ta, sta->sta.addr) == 0 && + ether_addr_equal(ta, sta->sta.addr) && (!(mpath->flags & MESH_PATH_SN_VALID) || SN_GT(target_sn, mpath->sn))) { mpath->flags &= ~MESH_PATH_ACTIVE; @@ -732,11 +732,12 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_rann_ie *rann) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; struct mesh_path *mpath; u8 ttl, flags, hopcount; u8 *orig_addr; - u32 orig_sn, metric; - u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; + u32 orig_sn, metric, metric_txsta, interval; bool root_is_gate; ttl = rann->rann_ttl; @@ -748,19 +749,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, flags = rann->rann_flags; root_is_gate = !!(flags & RANN_FLAG_IS_GATE); orig_addr = rann->rann_addr; - orig_sn = rann->rann_seq; + orig_sn = le32_to_cpu(rann->rann_seq); + interval = le32_to_cpu(rann->rann_interval); hopcount = rann->rann_hopcount; hopcount++; - metric = rann->rann_metric; + metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) + if (ether_addr_equal(orig_addr, sdata->vif.addr)) return; mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)", orig_addr, mgmt->sa, root_is_gate); rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + return; + } + + metric_txsta = airtime_link_metric_get(local, sta); + mpath = mesh_path_lookup(orig_addr, sdata); if (!mpath) { mesh_path_add(orig_addr, sdata); @@ -780,18 +790,21 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); } - if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) { + if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && + metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) { mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, cpu_to_le32(orig_sn), 0, NULL, 0, broadcast_addr, hopcount, ttl, cpu_to_le32(interval), - cpu_to_le32(metric + mpath->metric), + cpu_to_le32(metric + metric_txsta), 0, sdata); mpath->sn = orig_sn; + mpath->rann_metric = metric + metric_txsta; + /* Recording RANNs sender address to send individually + * addressed PREQs destined for root mesh STA */ + memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); } - /* Using individually addressed PREQ for root node */ - memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); mpath->is_root = true; if (root_is_gate) @@ -1086,7 +1099,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, if (time_after(jiffies, mpath->exp_time - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && - !compare_ether_addr(sdata->vif.addr, hdr->addr4) && + ether_addr_equal(sdata->vif.addr, hdr->addr4) && !(mpath->flags & MESH_PATH_RESOLVING) && !(mpath->flags & MESH_PATH_FIXED)) mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 49aaefd9963..b39224d8255 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -348,7 +348,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst, hlist_for_each_entry_rcu(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) { + ether_addr_equal(dst, mpath->dst)) { if (MPATH_EXPIRED(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; @@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) int err = 0; u32 hash_idx; - if (compare_ether_addr(dst, sdata->vif.addr) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; @@ -538,6 +538,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) read_lock_bh(&pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); + memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN); + new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); @@ -559,7 +561,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) + ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -650,7 +652,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) int err = 0; u32 hash_idx; - if (compare_ether_addr(dst, sdata->vif.addr) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; @@ -688,7 +690,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) + ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -882,7 +884,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(addr, mpath->dst) == 0) { + ether_addr_equal(addr, mpath->dst)) { __mesh_path_del(tbl, node); goto enddel; } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 4e53c4cbca9..8cc8461b48a 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -82,20 +82,14 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) } /* - * NOTE: This is just an alias for sta_info_alloc(), see notes - * on it in the lifecycle management section! + * Allocate mesh sta entry and insert into station table */ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, - u8 *hw_addr, u32 rates, - struct ieee802_11_elems *elems) + u8 *hw_addr) { - struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband; struct sta_info *sta; - sband = local->hw.wiphy->bands[local->oper_channel->band]; - - if (local->num_sta >= MESH_MAX_PLINKS) + if (sdata->local->num_sta >= MESH_MAX_PLINKS) return NULL; sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); @@ -108,16 +102,70 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, set_sta_flag(sta, WLAN_STA_WME); - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; - if (elems->ht_cap_elem) - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems->ht_cap_elem, - &sta->sta.ht_cap); - rate_control_rate_init(sta); - return sta; } +/** mesh_set_ht_prot_mode - set correct HT protection mode + * + * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for + * HT mesh STA in a MBSS. Three HT protection modes are supported for now, + * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed + * mode is selected if any non-HT peers are present in our MBSS. + * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support + * HT and atleast one HT20 peer is present. Otherwise no-protection mode is + * selected. + */ +static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u32 changed = 0; + u16 ht_opmode; + bool non_ht_sta = false, ht20_sta = false; + + if (local->_oper_channel_type == NL80211_CHAN_NO_HT) + return 0; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata == sta->sdata && + sta->plink_state == NL80211_PLINK_ESTAB) { + switch (sta->ch_type) { + case NL80211_CHAN_NO_HT: + mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present", + sdata->vif.addr, sta->sta.addr); + non_ht_sta = true; + goto out; + case NL80211_CHAN_HT20: + mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present", + sdata->vif.addr, sta->sta.addr); + ht20_sta = true; + default: + break; + } + } + } +out: + rcu_read_unlock(); + + if (non_ht_sta) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; + else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; + else + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { + sdata->vif.bss_conf.ht_operation_mode = ht_opmode; + sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; + changed = BSS_CHANGED_HT; + mpl_dbg("mesh_plink %pM: protection mode changed to %d", + sdata->vif.addr, ht_opmode); + } + + return changed; +} + /** * __mesh_plink_deactivate - deactivate mesh peer link * @@ -187,7 +235,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 2 + sdata->u.mesh.mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + 2 + 8 + /* peering IE */ sdata->u.mesh.ie_len); if (!skb) @@ -212,8 +260,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, 2); memcpy(pos + 2, &plid, 2); } - if (ieee80211_add_srates_ie(&sdata->vif, skb) || - ieee80211_add_ext_srates_ie(&sdata->vif, skb) || + if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || + ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || mesh_add_rsn_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata)) @@ -263,7 +311,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (action != WLAN_SP_MESH_PEERING_CLOSE) { if (mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata)) + mesh_add_ht_oper_ie(skb, sdata)) return -1; } @@ -274,43 +322,93 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, return 0; } -void mesh_neighbour_update(u8 *hw_addr, u32 rates, - struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *elems) +/* mesh_peer_init - initialize new mesh peer and return resulting sta_info + * + * @sdata: local meshif + * @addr: peer's address + * @elems: IEs from beacon or mesh peering frame + * + * call under RCU + */ +static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, + u8 *addr, + struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; + enum ieee80211_band band = local->oper_channel->band; + struct ieee80211_supported_band *sband; + u32 rates, basic_rates = 0; struct sta_info *sta; + bool insert = false; - rcu_read_lock(); + sband = local->hw.wiphy->bands[band]; + rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); - sta = sta_info_get(sdata, hw_addr); + sta = sta_info_get(sdata, addr); if (!sta) { - rcu_read_unlock(); - /* Userspace handles peer allocation when security is enabled - * */ - if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) - cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr, - elems->ie_start, elems->total_len, - GFP_KERNEL); - else - sta = mesh_plink_alloc(sdata, hw_addr, rates, elems); + sta = mesh_plink_alloc(sdata, addr); if (!sta) - return; - if (sta_info_insert_rcu(sta)) { - rcu_read_unlock(); - return; - } + return NULL; + insert = true; } + spin_lock_bh(&sta->lock); sta->last_rx = jiffies; - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + sta->sta.supp_rates[band] = rates; + if (elems->ht_cap_elem && + sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + elems->ht_cap_elem, + &sta->sta.ht_cap); + else + memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); + + if (elems->ht_operation) { + if (!(elems->ht_operation->ht_param & + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) + sta->sta.ht_cap.cap &= + ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + sta->ch_type = + ieee80211_ht_oper_to_channel_type(elems->ht_operation); + } + + rate_control_rate_init(sta); + spin_unlock_bh(&sta->lock); + + if (insert && sta_info_insert(sta)) + return NULL; + + return sta; +} + +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, + struct ieee802_11_elems *elems) +{ + struct sta_info *sta; + + /* Userspace handles peer allocation when security is enabled */ + if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr, + elems->ie_start, + elems->total_len, + GFP_KERNEL); + return; + } + + rcu_read_lock(); + sta = mesh_peer_init(sdata, hw_addr, elems); + if (!sta) + goto out; + if (mesh_peer_accepts_plinks(elems) && - sta->plink_state == NL80211_PLINK_LISTEN && - sdata->u.mesh.accepting_plinks && - sdata->u.mesh.mshcfg.auto_open_plinks && - rssi_threshold_check(sta, sdata)) + sta->plink_state == NL80211_PLINK_LISTEN && + sdata->u.mesh.accepting_plinks && + sdata->u.mesh.mshcfg.auto_open_plinks && + rssi_threshold_check(sta, sdata)) mesh_plink_open(sta); +out: rcu_read_unlock(); } @@ -456,15 +554,15 @@ void mesh_plink_block(struct sta_info *sta) void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { - struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; enum plink_event event; enum ieee80211_self_protected_actioncode ftype; size_t baselen; - bool deactivated, matches_local = true; + bool matches_local = true; u8 ie_len; u8 *baseaddr; + u32 changed = 0; __le16 plid, llid, reason; #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG static const char *mplstates[] = { @@ -560,7 +658,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m /* Now we will figure out the appropriate event... */ event = PLINK_UNDEFINED; if (ftype != WLAN_SP_MESH_PEERING_CLOSE && - (!mesh_matches_local(&elems, sdata))) { + !mesh_matches_local(sdata, &elems)) { matches_local = false; switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: @@ -583,29 +681,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m return; } else if (!sta) { /* ftype == WLAN_SP_MESH_PEERING_OPEN */ - u32 rates; - - rcu_read_unlock(); - if (!mesh_plink_free_count(sdata)) { mpl_dbg("Mesh plink error: no more free plinks\n"); - return; - } - - rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); - sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems); - if (!sta) { - mpl_dbg("Mesh plink error: plink table full\n"); - return; - } - if (sta_info_insert_rcu(sta)) { rcu_read_unlock(); return; } event = OPN_ACPT; - spin_lock_bh(&sta->lock); } else if (matches_local) { - spin_lock_bh(&sta->lock); switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: if (!mesh_plink_free_count(sdata) || @@ -642,12 +724,19 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m break; default: mpl_dbg("Mesh plink: unknown frame subtype\n"); - spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } - } else { - spin_lock_bh(&sta->lock); + } + + if (event == OPN_ACPT) { + /* allocate sta entry if necessary and update info */ + sta = mesh_peer_init(sdata, mgmt->sa, &elems); + if (!sta) { + mpl_dbg("Mesh plink: failed to init peer!\n"); + rcu_read_unlock(); + return; + } } mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", @@ -655,6 +744,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), event); reason = 0; + spin_lock_bh(&sta->lock); switch (sta->plink_state) { /* spin_unlock as soon as state is updated at each case */ case NL80211_PLINK_LISTEN: @@ -758,7 +848,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m sta->plink_state = NL80211_PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); break; @@ -793,7 +884,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m sta->plink_state = NL80211_PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); mesh_plink_frame_tx(sdata, @@ -811,13 +903,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CLS_ACPT: reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE); sta->reason = reason; - deactivated = __mesh_plink_deactivate(sta); + __mesh_plink_deactivate(sta); sta->plink_state = NL80211_PLINK_HOLDING; llid = sta->llid; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); - if (deactivated) - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, sta->sta.addr, llid, plid, reason); break; @@ -864,4 +956,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m } rcu_read_unlock(); + + if (changed) + ieee80211_bss_info_change_notify(sdata, changed); } diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c new file mode 100644 index 00000000000..38d30e8ce6d --- /dev/null +++ b/net/mac80211/mesh_sync.c @@ -0,0 +1,316 @@ +/* + * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com> + * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> + * Copyright 2011-2012, cozybit Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ieee80211_i.h" +#include "mesh.h" +#include "driver-ops.h" + +#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG +#define msync_dbg(fmt, args...) \ + printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args) +#else +#define msync_dbg(fmt, args...) do { (void)(0); } while (0) +#endif + +/* This is not in the standard. It represents a tolerable tbtt drift below + * which we do no TSF adjustment. + */ +#define TOFFSET_MINIMUM_ADJUSTMENT 10 + +/* This is not in the standard. It is a margin added to the + * Toffset setpoint to mitigate TSF overcorrection + * introduced by TSF adjustment latency. + */ +#define TOFFSET_SET_MARGIN 20 + +/* This is not in the standard. It represents the maximum Toffset jump above + * which we'll invalidate the Toffset setpoint and choose a new setpoint. This + * could be, for instance, in case a neighbor is restarted and its TSF counter + * reset. + */ +#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */ + +struct sync_method { + u8 method; + struct ieee80211_mesh_sync_ops ops; +}; + +/** + * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT + * + * @ie: information elements of a management frame from the mesh peer + */ +static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) +{ + return (ie->mesh_config->meshconf_cap & + MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; +} + +void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */ + u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500; + u64 tsf; + u64 tsfdelta; + + spin_lock_bh(&ifmsh->sync_offset_lock); + + if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { + msync_dbg("TBTT : max clockdrift=%lld; adjusting", + (long long) ifmsh->sync_offset_clockdrift_max); + tsfdelta = -ifmsh->sync_offset_clockdrift_max; + ifmsh->sync_offset_clockdrift_max = 0; + } else { + msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu", + (long long) ifmsh->sync_offset_clockdrift_max, + (unsigned long long) beacon_int_fraction); + tsfdelta = -beacon_int_fraction; + ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; + } + + tsf = drv_get_tsf(local, sdata); + if (tsf != -1ULL) + drv_set_tsf(local, sdata, tsf + tsfdelta); + spin_unlock_bh(&ifmsh->sync_offset_lock); +} + +static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u64 t_t, t_r; + + WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + + /* standard mentions only beacons */ + if (stype != IEEE80211_STYPE_BEACON) + return; + + /* The current tsf is a first approximation for the timestamp + * for the received beacon. Further down we try to get a + * better value from the rx_status->mactime field if + * available. Also we have to call drv_get_tsf() before + * entering the rcu-read section.*/ + t_r = drv_get_tsf(local, sdata); + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) + goto no_sync; + + /* check offset sync conditions (13.13.2.2.1) + * + * TODO also sync to + * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors + */ + + if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { + clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr); + goto no_sync; + } + + if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) { + /* + * The mactime is defined as the time the first data symbol + * of the frame hits the PHY, and the timestamp of the beacon + * is defined as "the time that the data symbol containing the + * first bit of the timestamp is transmitted to the PHY plus + * the transmitting STA's delays through its local PHY from the + * MAC-PHY interface to its interface with the WM" (802.11 + * 11.1.2) + * + * T_r, in 13.13.2.2.2, is just defined as "the frame reception + * time" but we unless we interpret that time to be the same + * time of the beacon timestamp, the offset calculation will be + * off. Below we adjust t_r to be "the time at which the first + * symbol of the timestamp element in the beacon is received". + * This correction depends on the rate. + * + * Based on similar code in ibss.c + */ + int rate; + + if (rx_status->flag & RX_FLAG_HT) { + /* TODO: + * In principle there could be HT-beacons (Dual Beacon + * HT Operation options), but for now ignore them and + * just use the primary (i.e. non-HT) beacons for + * synchronization. + * */ + goto no_sync; + } else + rate = local->hw.wiphy->bands[rx_status->band]-> + bitrates[rx_status->rate_idx].bitrate; + + /* 24 bytes of header * 8 bits/byte * + * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/ + t_r = rx_status->mactime + (24 * 8 * 10 / rate); + } + + /* Timing offset calculation (see 13.13.2.2.2) */ + t_t = le64_to_cpu(mgmt->u.beacon.timestamp); + sta->t_offset = t_t - t_r; + + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + s64 t_clockdrift = sta->t_offset_setpoint + - sta->t_offset; + msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld", + sta->sta.addr, + (long long) sta->t_offset, + (long long) + sta->t_offset_setpoint, + (long long) t_clockdrift); + + if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || + t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { + msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset", + sta->sta.addr, + (long long) t_clockdrift); + clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + goto no_sync; + } + + rcu_read_unlock(); + + spin_lock_bh(&ifmsh->sync_offset_lock); + if (t_clockdrift > + ifmsh->sync_offset_clockdrift_max) + ifmsh->sync_offset_clockdrift_max + = t_clockdrift; + spin_unlock_bh(&ifmsh->sync_offset_lock); + + } else { + sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; + set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + msync_dbg("STA %pM : offset was invalid, " + " sta->t_offset=%lld", + sta->sta.addr, + (long long) sta->t_offset); + rcu_read_unlock(); + } + return; + +no_sync: + rcu_read_unlock(); +} + +static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + WARN_ON(ifmsh->mesh_sp_id + != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + BUG_ON(!rcu_read_lock_held()); + + spin_lock_bh(&ifmsh->sync_offset_lock); + + if (ifmsh->sync_offset_clockdrift_max > + TOFFSET_MINIMUM_ADJUSTMENT) { + /* Since ajusting the tsf here would + * require a possibly blocking call + * to the driver tsf setter, we punt + * the tsf adjustment to the mesh tasklet + */ + msync_dbg("TBTT : kicking off TBTT " + "adjustment with " + "clockdrift_max=%lld", + ifmsh->sync_offset_clockdrift_max); + set_bit(MESH_WORK_DRIFT_ADJUST, + &ifmsh->wrkq_flags); + } else { + msync_dbg("TBTT : max clockdrift=%lld; " + "too small to adjust", + (long long) + ifmsh->sync_offset_clockdrift_max); + ifmsh->sync_offset_clockdrift_max = 0; + } + spin_unlock_bh(&ifmsh->sync_offset_lock); +} + +static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 offset; + + if (!ifmsh->ie || !ifmsh->ie_len) + return NULL; + + offset = ieee80211_ie_split_vendor(ifmsh->ie, + ifmsh->ie_len, 0); + + if (!offset) + return NULL; + + return ifmsh->ie + offset + 2; +} + +static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + const u8 *oui; + + WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); + msync_dbg("called mesh_sync_vendor_rx_bcn_presp"); + oui = mesh_get_vendor_oui(sdata); + /* here you would implement the vendor offset tracking for this oui */ +} + +static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + const u8 *oui; + + WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); + msync_dbg("called mesh_sync_vendor_adjust_tbtt"); + oui = mesh_get_vendor_oui(sdata); + /* here you would implement the vendor tsf adjustment for this oui */ +} + +/* global variable */ +static struct sync_method sync_methods[] = { + { + .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, + .ops = { + .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, + .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, + } + }, + { + .method = IEEE80211_SYNC_METHOD_VENDOR, + .ops = { + .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp, + .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt, + } + }, +}; + +struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) +{ + struct ieee80211_mesh_sync_ops *ops = NULL; + u8 i; + + for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { + if (sync_methods[i].method == method) { + ops = &sync_methods[i].ops; + break; + } + } + return ops; +} diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 20c680bfc3a..b3b3c264ff6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -171,122 +171,64 @@ static int ecw2cw(int ecw) return (1 << ecw) - 1; } -/* - * ieee80211_enable_ht should be called only after the operating band - * has been determined as ht configuration depends on the hw's - * HT abilities for a specific band. - */ -static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, - struct ieee80211_ht_info *hti, - const u8 *bssid, u16 ap_ht_cap_flags, - bool beacon_htcap_ie) +static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, + struct ieee80211_ht_operation *ht_oper, + const u8 *bssid, bool reconfig) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; u32 changed = 0; - int hti_cfreq; u16 ht_opmode; - bool enable_ht = true; - enum nl80211_channel_type prev_chantype; - enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT; - enum nl80211_channel_type tx_channel_type; + bool disable_40 = false; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - prev_chantype = sdata->vif.bss_conf.channel_type; - - hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, - sband->band); - /* check that channel matches the right operating channel */ - if (local->hw.conf.channel->center_freq != hti_cfreq) { - /* Some APs mess this up, evidently. - * Netgear WNDR3700 sometimes reports 4 higher than - * the actual channel, for instance. - */ - printk(KERN_DEBUG - "%s: Wrong control channel in association" - " response: configured center-freq: %d" - " hti-cfreq: %d hti->control_chan: %d" - " band: %d. Disabling HT.\n", - sdata->name, - local->hw.conf.channel->center_freq, - hti_cfreq, hti->control_chan, - sband->band); - enable_ht = false; - } - - if (enable_ht) { - rx_channel_type = NL80211_CHAN_HT20; - - if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && - !ieee80111_cfg_override_disables_ht40(sdata) && - (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && - (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { - switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rx_channel_type = NL80211_CHAN_HT40PLUS; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rx_channel_type = NL80211_CHAN_HT40MINUS; - break; - } - } + switch (sdata->vif.bss_conf.channel_type) { + case NL80211_CHAN_HT40PLUS: + if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS) + disable_40 = true; + break; + case NL80211_CHAN_HT40MINUS: + if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS) + disable_40 = true; + break; + default: + break; } - tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type); + /* This can change during the lifetime of the BSS */ + if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) + disable_40 = true; - if (local->tmp_channel) - local->tmp_channel_type = rx_channel_type; - - if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) { - /* can only fail due to HT40+/- mismatch */ - rx_channel_type = NL80211_CHAN_HT20; - WARN_ON(!ieee80211_set_channel_type(local, sdata, - rx_channel_type)); - } - - if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) { - /* - * Whenever the AP announces the HT mode change that can be - * 40MHz intolerant or etc., it would be safer to stop tx - * queues before doing hw config to avoid buffer overflow. - */ - ieee80211_stop_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, bssid); - /* flush out all packets */ - synchronize_net(); + WARN_ON_ONCE(!sta); - drv_flush(local, false); - } + if (sta && !sta->supports_40mhz) + disable_40 = true; - /* channel_type change automatically detected */ - ieee80211_hw_config(local, 0); + if (sta && (!reconfig || + (disable_40 != !(sta->sta.ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) { - if (prev_chantype != tx_channel_type) { - rcu_read_lock(); - sta = sta_info_get(sdata, bssid); - if (sta) - rate_control_rate_update(local, sband, sta, - IEEE80211_RC_HT_CHANGED, - tx_channel_type); - rcu_read_unlock(); + if (disable_40) + sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + else + sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - if (beacon_htcap_ie) - ieee80211_wake_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_BW_CHANGED); } + mutex_unlock(&local->sta_mtx); - ht_opmode = le16_to_cpu(hti->operation_mode); + ht_opmode = le16_to_cpu(ht_oper->operation_mode); /* if bss configuration changed store the new one */ - if (sdata->ht_opmode_valid != enable_ht || - sdata->vif.bss_conf.ht_operation_mode != ht_opmode || - prev_chantype != rx_channel_type) { + if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; - sdata->ht_opmode_valid = enable_ht; } return changed; @@ -316,12 +258,12 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, } static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, const u8 *ht_info_ie, + struct sk_buff *skb, const u8 *ht_oper_ie, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) { - struct ieee80211_ht_info *ht_info; + struct ieee80211_ht_operation *ht_oper; u8 *pos; u32 flags = channel->flags; u16 cap; @@ -329,21 +271,21 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); - if (!ht_info_ie) + if (!ht_oper_ie) return; - if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) + if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation)) return; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); - ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); + ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2); /* determine capability flags */ cap = ht_cap.cap; - switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (flags & IEEE80211_CHAN_NO_HT40PLUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; @@ -358,6 +300,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, break; } + /* + * If 40 MHz was disabled associate as though we weren't + * capable of 40 MHz -- some broken APs will never fall + * back to trying to transmit in 20 MHz. + */ + if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + /* set SM PS mode properly */ cap &= ~IEEE80211_HT_CAP_SM_PS; switch (smps) { @@ -557,7 +509,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie, + ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie, sband, local->oper_channel, ifmgd->ap_smps); /* if present, add any custom non-vendor IEs that go after HT */ @@ -1182,7 +1134,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, if (!local->ops->conf_tx) return; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return; if (!wmm_param) @@ -1435,7 +1387,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.assoc = false; /* on the next assoc, re-program HT parameters */ - sdata->ht_opmode_valid = false; memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); @@ -1496,19 +1447,24 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + mutex_lock(&local->mtx); if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL | - IEEE80211_STA_CONNECTION_POLL))) - return; + IEEE80211_STA_CONNECTION_POLL))) { + mutex_unlock(&local->mtx); + return; + } ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); - mutex_lock(&sdata->local->iflist_mtx); - ieee80211_recalc_ps(sdata->local, -1); - mutex_unlock(&sdata->local->iflist_mtx); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + mutex_unlock(&local->iflist_mtx); if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) - return; + goto out; /* * We've received a probe response, but are not sure whether @@ -1520,6 +1476,9 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); +out: + ieee80211_run_deferred_scan(local); + mutex_unlock(&local->mtx); } void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, @@ -1567,14 +1526,23 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, 0); } else { + int ssid_len; + ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); - ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, - (u32) -1, true, false); + if (WARN_ON_ONCE(ssid == NULL)) + ssid_len = 0; + else + ssid_len = ssid[1]; + + ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, + 0, (u32) -1, true, false); } ifmgd->probe_send_count++; ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(ifmgd, ifmgd->probe_timeout); + if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) + drv_flush(sdata->local, false); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, @@ -1586,21 +1554,22 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, if (!ieee80211_sdata_running(sdata)) return; - if (sdata->local->scanning) - return; - - if (sdata->local->tmp_channel) - return; - mutex_lock(&ifmgd->mtx); if (!ifmgd->associated) goto out; + mutex_lock(&sdata->local->mtx); + + if (sdata->local->tmp_channel || sdata->local->scanning) { + mutex_unlock(&sdata->local->mtx); + goto out; + } + #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (beacon && net_ratelimit()) - printk(KERN_DEBUG "%s: detected beacon loss from AP " - "- sending probe request\n", sdata->name); + if (beacon) + net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n", + sdata->name); #endif /* @@ -1623,6 +1592,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, else ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; + mutex_unlock(&sdata->local->mtx); + if (already) goto out; @@ -1643,6 +1614,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sk_buff *skb; const u8 *ssid; + int ssid_len; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; @@ -1653,8 +1625,13 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, return NULL; ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); + if (WARN_ON_ONCE(ssid == NULL)) + ssid_len = 0; + else + ssid_len = ssid[1]; + skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, - (u32) -1, ssid + 2, ssid[1], + (u32) -1, ssid + 2, ssid_len, NULL, 0, true); return skb; @@ -1799,7 +1776,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); - if (compare_ether_addr(bssid, mgmt->bssid)) + if (!ether_addr_equal(bssid, mgmt->bssid)) return RX_MGMT_NONE; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); @@ -1876,7 +1853,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, return RX_MGMT_NONE; if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return RX_MGMT_NONE; bssid = ifmgd->associated->bssid; @@ -1909,7 +1886,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, return RX_MGMT_NONE; if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return RX_MGMT_NONE; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); @@ -2000,7 +1977,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; int err; - u16 ap_ht_cap_flags; /* AssocResp and ReassocResp have identical structure */ @@ -2051,7 +2027,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, &sta->sta.ht_cap); - ap_ht_cap_flags = sta->sta.ht_cap.cap; + sta->supports_40mhz = + sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; rate_control_rate_init(sta); @@ -2092,11 +2069,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata, false); changed |= BSS_CHANGED_QOS; - if (elems.ht_info_elem && elems.wmm_param && + if (elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, - cbss->bssid, ap_ht_cap_flags, - false); + changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, + cbss->bssid, false); /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ @@ -2137,7 +2113,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, if (!assoc_data) return RX_MGMT_NONE; - if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid)) + if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) return RX_MGMT_NONE; /* @@ -2217,8 +2193,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, bool need_ps = false; if (sdata->u.mgd.associated && - compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) { bss = (void *)sdata->u.mgd.associated->priv; /* not previously set so we may need to recalc */ need_ps = !bss->dtim_period; @@ -2273,7 +2248,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ASSERT_MGD_MTX(ifmgd); - if (compare_ether_addr(mgmt->da, sdata->vif.addr)) + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; @@ -2286,12 +2261,11 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); if (ifmgd->associated && - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0) + ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) ieee80211_reset_ap_probe(sdata); if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies && - compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) { /* got probe response, continue with auth */ printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); ifmgd->auth_data->tries = 0; @@ -2319,7 +2293,7 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | - (1ULL << WLAN_EID_HT_INFORMATION); + (1ULL << WLAN_EID_HT_OPERATION); static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, @@ -2348,8 +2322,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, return; if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && - compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); @@ -2364,7 +2337,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return; bssid = ifmgd->associated->bssid; @@ -2431,10 +2404,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: cancelling probereq poll due " - "to a received beacon\n", sdata->name); - } + net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", + sdata->name); #endif ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; mutex_lock(&local->iflist_mtx); @@ -2468,11 +2439,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { if (directed_tim) { if (local->hw.conf.dynamic_ps_timeout > 0) { - local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, - IEEE80211_CONF_CHANGE_PS); + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + } ieee80211_send_nullfunc(local, sdata, 0); - } else { + } else if (!local->pspolling && sdata->u.mgd.powersave) { local->pspolling = true; /* @@ -2504,31 +2477,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, erp_valid, erp_value); - if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && + if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { - struct sta_info *sta; struct ieee80211_supported_band *sband; - u16 ap_ht_cap_flags; - - rcu_read_lock(); - - sta = sta_info_get(sdata, bssid); - if (WARN_ON(!sta)) { - rcu_read_unlock(); - return; - } sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems.ht_cap_elem, &sta->sta.ht_cap); - - ap_ht_cap_flags = sta->sta.ht_cap.cap; - - rcu_read_unlock(); - - changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, - bssid, ap_ht_cap_flags, true); + changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, + bssid, true); } /* Note: country IE parsing is done for us by cfg80211 */ @@ -3060,6 +3016,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; bool have_sta = false; int err; + int ht_cfreq; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + const u8 *ht_oper_ie; + const struct ieee80211_ht_operation *ht_oper = NULL; + struct ieee80211_supported_band *sband; if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) return -EINVAL; @@ -3081,17 +3042,76 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->mtx); /* switch to the right channel */ + sband = local->hw.wiphy->bands[cbss->channel->band]; + + ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ; + + if (sband->ht_cap.ht_supported) { + ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION, + cbss->information_elements, + cbss->len_information_elements); + if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) + ht_oper = (void *)(ht_oper_ie + 2); + } + + if (ht_oper) { + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, + cbss->channel->band); + /* check that channel matches the right operating channel */ + if (cbss->channel->center_freq != ht_cfreq) { + /* + * It's possible that some APs are confused here; + * Netgear WNDR3700 sometimes reports 4 higher than + * the actual channel in association responses, but + * since we look at probe response/beacon data here + * it should be OK. + */ + printk(KERN_DEBUG + "%s: Wrong control channel: center-freq: %d" + " ht-cfreq: %d ht->primary_chan: %d" + " band: %d. Disabling HT.\n", + sdata->name, cbss->channel->center_freq, + ht_cfreq, ht_oper->primary_chan, + cbss->channel->band); + ht_oper = NULL; + } + } + + if (ht_oper) { + channel_type = NL80211_CHAN_HT20; + + if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { + switch (ht_oper->ht_param & + IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + channel_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + channel_type = NL80211_CHAN_HT40MINUS; + break; + } + } + } + + if (!ieee80211_set_channel_type(local, sdata, channel_type)) { + /* can only fail due to HT40+/- mismatch */ + channel_type = NL80211_CHAN_HT20; + printk(KERN_DEBUG + "%s: disabling 40 MHz due to multi-vif mismatch\n", + sdata->name); + ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; + WARN_ON(!ieee80211_set_channel_type(local, sdata, + channel_type)); + } + local->oper_channel = cbss->channel; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + ieee80211_hw_config(local, 0); if (!have_sta) { - struct ieee80211_supported_band *sband; u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; - sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; - ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, @@ -3141,7 +3161,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, return err; } } else - WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid)); + WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid)); return 0; } @@ -3281,7 +3301,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, bool match; /* keep sta info, bssid if matching */ - match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0; + match = ether_addr_equal(ifmgd->bssid, req->bss->bssid); ieee80211_destroy_auth_data(sdata, match); } @@ -3311,7 +3331,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, /* Also disable HT if we don't support it or the AP doesn't use WMM */ sband = local->hw.wiphy->bands[req->bss->channel->band]; if (!sband->ht_cap.ht_supported || - local->hw.queues < 4 || !bss->wmm_used) + local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) ifmgd->flags |= IEEE80211_STA_DISABLE_11N; memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); @@ -3334,11 +3354,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->ap_smps = ifmgd->req_smps; assoc_data->capability = req->bss->capability; - assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4); + assoc_data->wmm = bss->wmm_used && + (local->hw.queues >= IEEE80211_NUM_ACS); assoc_data->supp_rates = bss->supp_rates; assoc_data->supp_rates_len = bss->supp_rates_len; - assoc_data->ht_information_ie = - ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); + assoc_data->ht_operation_ie = + ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); if (bss->wmm_used && bss->uapsd_supported && (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { @@ -3440,7 +3461,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, sdata->name, req->bssid, req->reason_code); if (ifmgd->associated && - compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0) + ether_addr_equal(ifmgd->associated->bssid, req->bssid)) ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, true, frame_buf); else diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index ef8eba1d736..af1c4e26e96 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -127,6 +127,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) drv_remove_interface(local, sdata); } + sdata = rtnl_dereference(local->monitor_sdata); + if (sdata) + drv_remove_interface(local, sdata); + /* stop hardware - this must stop RX */ if (local->open_count) ieee80211_stop_device(local); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index fbb1efdc4d0..6e4fd32c661 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -17,6 +17,7 @@ #include <net/mac80211.h> #include "ieee80211_i.h" #include "sta_info.h" +#include "driver-ops.h" struct rate_control_ref { struct ieee80211_local *local; @@ -63,8 +64,7 @@ static inline void rate_control_rate_init(struct sta_info *sta) static inline void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, - struct sta_info *sta, u32 changed, - enum nl80211_channel_type oper_chan_type) + struct sta_info *sta, u32 changed) { struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; @@ -72,7 +72,8 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, if (ref && ref->ops->rate_update) ref->ops->rate_update(ref->priv, sband, ista, - priv_sta, changed, oper_chan_type); + priv_sta, changed); + drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); } static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index b39dda523f3..79633ae06fd 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -334,14 +334,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void -calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, +calc_rate_durations(enum ieee80211_band band, + struct minstrel_rate *d, struct ieee80211_rate *rate) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); - d->perfect_tx_time = ieee80211_frame_duration(local, 1200, + d->perfect_tx_time = ieee80211_frame_duration(band, 1200, rate->bitrate, erp, 1); - d->ack_time = ieee80211_frame_duration(local, 10, + d->ack_time = ieee80211_frame_duration(band, 10, rate->bitrate, erp, 1); } @@ -379,14 +380,14 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, { struct minstrel_sta_info *mi = priv_sta; struct minstrel_priv *mp = priv; - struct ieee80211_local *local = hw_to_local(mp->hw); struct ieee80211_rate *ctl_rate; unsigned int i, n = 0; unsigned int t_slot = 9; /* FIXME: get real slot time */ mi->lowest_rix = rate_lowest_index(sband, sta); ctl_rate = &sband->bitrates[mi->lowest_rix]; - mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, + mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10, + ctl_rate->bitrate, !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); for (i = 0; i < sband->n_bitrates; i++) { @@ -402,7 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, mr->rix = i; mr->bitrate = sband->bitrates[i].bitrate / 5; - calc_rate_durations(local, mr, &sband->bitrates[i]); + calc_rate_durations(sband->band, mr, &sband->bitrates[i]); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 16e0b277b9a..2d1acc6c544 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -686,14 +686,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, static void minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - enum nl80211_channel_type oper_chan_type) + struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_priv *mp = priv; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; - struct ieee80211_local *local = hw_to_local(mp->hw); u16 sta_cap = sta->ht_cap.cap; int n_supported = 0; int ack_dur; @@ -712,8 +710,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, memset(mi, 0, sizeof(*mi)); mi->stats_update = jiffies; - ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); - mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; + ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1); + mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur; mi->overhead_rtscts = mi->overhead + 2 * ack_dur; mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); @@ -735,10 +733,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) mi->tx_flags |= IEEE80211_TX_CTL_LDPC; - if (oper_chan_type != NL80211_CHAN_HT40MINUS && - oper_chan_type != NL80211_CHAN_HT40PLUS) - sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; @@ -788,17 +782,15 @@ static void minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { - struct minstrel_priv *mp = priv; - - minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); + minstrel_ht_update_caps(priv, sband, sta, priv_sta); } static void minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - u32 changed, enum nl80211_channel_type oper_chan_type) + u32 changed) { - minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); + minstrel_ht_update_caps(priv, sband, sta, priv_sta); } static void * diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d64e285400a..8257a09eeed 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -426,6 +426,7 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) if (test_bit(SCAN_HW_SCANNING, &local->scanning) || test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || local->sched_scanning) return ieee80211_scan_rx(rx->sdata, skb); @@ -491,12 +492,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) if (ieee80211_has_tods(hdr->frame_control) || !ieee80211_has_fromds(hdr->frame_control)) return RX_DROP_MONITOR; - if (compare_ether_addr(hdr->addr3, dev_addr) == 0) + if (ether_addr_equal(hdr->addr3, dev_addr)) return RX_DROP_MONITOR; } else { if (!ieee80211_has_a4(hdr->frame_control)) return RX_DROP_MONITOR; - if (compare_ether_addr(hdr->addr4, dev_addr) == 0) + if (ether_addr_equal(hdr->addr4, dev_addr)) return RX_DROP_MONITOR; } } @@ -794,8 +795,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) /* reset session timer */ if (tid_agg_rx->timeout) - mod_timer(&tid_agg_rx->session_timer, - TU_TO_EXP_TIME(tid_agg_rx->timeout)); + tid_agg_rx->last_rx = jiffies; /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); @@ -1275,7 +1275,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, NL80211_IFTYPE_ADHOC); - if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) { + if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) { sta->last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) { sta->last_rx_rate_idx = status->rate_idx; @@ -1438,8 +1438,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, */ if (((hdr->frame_control ^ f_hdr->frame_control) & cpu_to_le16(IEEE80211_FCTL_FTYPE)) || - compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || - compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) + !ether_addr_equal(hdr->addr1, f_hdr->addr1) || + !ether_addr_equal(hdr->addr2, f_hdr->addr2)) continue; if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { @@ -1714,8 +1714,8 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) * of whether the frame was encrypted or not. */ if (ehdr->h_proto == rx->sdata->control_port_protocol && - (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || - compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) + (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || + ether_addr_equal(ehdr->h_dest, pae_group_addr))) return true; if (ieee80211_802_1x_port_control(rx) || @@ -1752,9 +1752,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) * local net stack and back to the wireless medium */ xmit_skb = skb_copy(skb, GFP_ATOMIC); - if (!xmit_skb && net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone " - "multicast frame\n", dev->name); + if (!xmit_skb) + net_dbg_ratelimited("%s: failed to clone multicast frame\n", + dev->name); } else { dsta = sta_info_get(sdata, skb->data); if (dsta) { @@ -1925,7 +1925,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) mpp_path_add(proxied_addr, mpp_addr, sdata); } else { spin_lock_bh(&mppath->state_lock); - if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) + if (!ether_addr_equal(mppath->mpp, mpp_addr)) memcpy(mppath->mpp, mpp_addr, ETH_ALEN); spin_unlock_bh(&mppath->state_lock); } @@ -1934,7 +1934,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) /* Frame has reached destination. Don't forward */ if (!is_multicast_ether_addr(hdr->addr1) && - compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) + ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; q = ieee80211_select_queue_80211(local, skb, hdr); @@ -1957,9 +1957,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) fwd_skb = skb_copy(skb, GFP_ATOMIC); if (!fwd_skb) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone mesh frame\n", - sdata->name); + net_dbg_ratelimited("%s: failed to clone mesh frame\n", + sdata->name); goto out; } @@ -2122,13 +2121,13 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb; struct ieee80211_mgmt *resp; - if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { /* Not to own unicast address */ return; } - if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || - compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { + if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || + !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { /* Not from the current AP or not associated yet. */ return; } @@ -2270,11 +2269,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) sband = rx->local->hw.wiphy->bands[status->band]; - rate_control_rate_update( - local, sband, rx->sta, - IEEE80211_RC_SMPS_CHANGED, - ieee80211_get_tx_channel_type( - local, local->_oper_channel_type)); + rate_control_rate_update(local, sband, rx->sta, + IEEE80211_RC_SMPS_CHANGED); goto handled; } default: @@ -2341,7 +2337,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (sdata->vif.type != NL80211_IFTYPE_STATION) break; - if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid)) + if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) break; goto queue; @@ -2775,7 +2771,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, if (!bssid && !sdata->u.mgd.use_4addr) return 0; if (!multicast && - compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC) || sdata->u.mgd.use_4addr) return 0; @@ -2793,8 +2789,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, return 0; status->rx_flags &= ~IEEE80211_RX_RA_MATCH; } else if (!multicast && - compare_ether_addr(sdata->vif.addr, - hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC)) return 0; status->rx_flags &= ~IEEE80211_RX_RA_MATCH; @@ -2810,8 +2805,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, break; case NL80211_IFTYPE_MESH_POINT: if (!multicast && - compare_ether_addr(sdata->vif.addr, - hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC)) return 0; @@ -2821,8 +2815,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: if (!bssid) { - if (compare_ether_addr(sdata->vif.addr, - hdr->addr1)) + if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) return 0; } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { @@ -2844,7 +2837,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, case NL80211_IFTYPE_WDS: if (bssid || !ieee80211_is_data(hdr->frame_control)) return 0; - if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) + if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) return 0; break; default: @@ -2921,6 +2914,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, local->dot11ReceivedFragmentCount++; if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || test_bit(SCAN_SW_SCANNING, &local->scanning))) status->rx_flags |= IEEE80211_RX_IN_SCAN; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index c70e1767713..169da0742c8 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -29,20 +29,6 @@ #define IEEE80211_CHANNEL_TIME (HZ / 33) #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) -struct ieee80211_bss * -ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, - u8 *ssid, u8 ssid_len) -{ - struct cfg80211_bss *cbss; - - cbss = cfg80211_get_bss(local->hw.wiphy, - ieee80211_get_channel(local->hw.wiphy, freq), - bssid, ssid, ssid_len, 0, 0); - if (!cbss) - return NULL; - return (void *)cbss->priv; -} - static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) { struct ieee80211_bss *bss = (void *)cbss->priv; @@ -208,7 +194,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) presp = ieee80211_is_probe_resp(fc); if (presp) { /* ignore ProbeResp to foreign address */ - if (compare_ether_addr(mgmt->da, sdata->vif.addr)) + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return RX_DROP_MONITOR; presp = true; @@ -387,6 +373,57 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) return 0; } +static bool ieee80211_can_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + if (!list_empty(&local->work_list)) + return false; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL)) + return false; + + return true; +} + +void ieee80211_run_deferred_scan(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->mtx); + + if (!local->scan_req || local->scanning) + return; + + if (!ieee80211_can_scan(local, local->scan_sdata)) + return; + + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, + round_jiffies_relative(0)); +} + +static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int i; + struct ieee80211_sub_if_data *sdata = local->scan_sdata; + enum ieee80211_band band = local->hw.conf.channel->band; + + for (i = 0; i < local->scan_req->n_ssids; i++) + ieee80211_send_probe_req( + sdata, NULL, + local->scan_req->ssids[i].ssid, + local->scan_req->ssids[i].ssid_len, + local->scan_req->ie, local->scan_req->ie_len, + local->scan_req->rates[band], false, + local->scan_req->no_cck); + + /* + * After sending probe requests, wait for probe responses + * on the channel. + */ + *next_delay = IEEE80211_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; +} static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) @@ -399,7 +436,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if (local->scan_req) return -EBUSY; - if (!list_empty(&local->work_list)) { + if (!ieee80211_can_scan(local, sdata)) { /* wait for the work to finish/time out */ local->scan_req = req; local->scan_sdata = sdata; @@ -438,10 +475,47 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, local->scan_req = req; local->scan_sdata = sdata; - if (local->ops->hw_scan) + if (local->ops->hw_scan) { __set_bit(SCAN_HW_SCANNING, &local->scanning); - else + } else if ((req->n_channels == 1) && + (req->channels[0]->center_freq == + local->hw.conf.channel->center_freq)) { + + /* If we are scanning only on the current channel, then + * we do not need to stop normal activities + */ + unsigned long next_delay; + + __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); + + ieee80211_recalc_idle(local); + + /* Notify driver scan is starting, keep order of operations + * same as normal software scan, in case that matters. */ + drv_sw_scan_start(local); + + ieee80211_configure_filter(local); /* accept probe-responses */ + + /* We need to ensure power level is at max for scanning. */ + ieee80211_hw_config(local, 0); + + if ((req->channels[0]->flags & + IEEE80211_CHAN_PASSIVE_SCAN) || + !local->scan_req->n_ssids) { + next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + } else { + ieee80211_scan_state_send_probe(local, &next_delay); + next_delay = IEEE80211_CHANNEL_TIME; + } + + /* Now, just wait a bit and we are all done! */ + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, + next_delay); + return 0; + } else { + /* Do normal software scan */ __set_bit(SCAN_SW_SCANNING, &local->scanning); + } ieee80211_recalc_idle(local); @@ -598,30 +672,6 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, local->next_scan_state = SCAN_SEND_PROBE; } -static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, - unsigned long *next_delay) -{ - int i; - struct ieee80211_sub_if_data *sdata = local->scan_sdata; - enum ieee80211_band band = local->hw.conf.channel->band; - - for (i = 0; i < local->scan_req->n_ssids; i++) - ieee80211_send_probe_req( - sdata, NULL, - local->scan_req->ssids[i].ssid, - local->scan_req->ssids[i].ssid_len, - local->scan_req->ie, local->scan_req->ie_len, - local->scan_req->rates[band], false, - local->scan_req->no_cck); - - /* - * After sending probe requests, wait for probe responses - * on the channel. - */ - *next_delay = IEEE80211_CHANNEL_TIME; - local->next_scan_state = SCAN_DECISION; -} - static void ieee80211_scan_state_suspend(struct ieee80211_local *local, unsigned long *next_delay) { @@ -672,6 +722,12 @@ void ieee80211_scan_work(struct work_struct *work) sdata = local->scan_sdata; + /* When scanning on-channel, the first-callback means completed. */ + if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { + aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); + goto out_complete; + } + if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 38137cb5f6f..f5b1638fbf8 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -102,7 +102,7 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && - compare_ether_addr(sta->sta.addr, addr) == 0) + ether_addr_equal(sta->sta.addr, addr)) break; sta = rcu_dereference_check(sta->hnext, lockdep_is_held(&local->sta_mtx)); @@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, while (sta) { if ((sta->sdata == sdata || (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && - compare_ether_addr(sta->sta.addr, addr) == 0) + ether_addr_equal(sta->sta.addr, addr)) break; sta = rcu_dereference_check(sta->hnext, lockdep_is_held(&local->sta_mtx)); @@ -302,7 +302,7 @@ static int sta_info_insert_check(struct sta_info *sta) if (unlikely(!ieee80211_sdata_running(sdata))) return -ENETDOWN; - if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || + if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || is_multicast_ether_addr(sta->sta.addr))) return -EINVAL; @@ -912,7 +912,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, */ for_each_sta_info(hw_to_local(hw), addr, sta, nxt) { if (localaddr && - compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0) + !ether_addr_equal(sta->sdata->vif.addr, localaddr)) continue; if (!sta->uploaded) return NULL; @@ -1195,13 +1195,15 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, ieee80211_is_qos_nullfunc(hdr->frame_control)) qoshdr = ieee80211_get_qos_ctl(hdr); - /* set EOSP for the frame */ - if (reason == IEEE80211_FRAME_RELEASE_UAPSD && - qoshdr && skb_queue_empty(&frames)) - *qoshdr |= IEEE80211_QOS_CTL_EOSP; + /* end service period after last frame */ + if (skb_queue_empty(&frames)) { + if (reason == IEEE80211_FRAME_RELEASE_UAPSD && + qoshdr) + *qoshdr |= IEEE80211_QOS_CTL_EOSP; - info->flags |= IEEE80211_TX_STATUS_EOSP | - IEEE80211_TX_CTL_REQ_TX_STATUS; + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; + } if (qoshdr) tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK); @@ -1415,15 +1417,19 @@ int sta_info_move_state(struct sta_info *sta, if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_dec(&sta->sdata->u.ap.num_sta_authorized); + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !sta->sdata->u.vlan.sta)) + atomic_dec(&sta->sdata->bss->num_mcast_sta); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state == IEEE80211_STA_ASSOC) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_inc(&sta->sdata->u.ap.num_sta_authorized); + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !sta->sdata->u.vlan.sta)) + atomic_inc(&sta->sdata->bss->num_mcast_sta); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index ab0576827ba..3bb24a121c9 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -55,6 +55,7 @@ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. * @WLAN_STA_INSERTED: This station is inserted into the hash table. * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. + * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, @@ -76,6 +77,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_4ADDR_EVENT, WLAN_STA_INSERTED, WLAN_STA_RATE_CONTROL, + WLAN_STA_TOFFSET_KNOWN, }; #define STA_TID_NUM 16 @@ -101,6 +103,7 @@ enum ieee80211_sta_info_flags { * @dialog_token: dialog token for aggregation session * @timeout: session timeout value to be filled in ADDBA requests * @state: session state (see above) + * @last_tx: jiffies of last tx activity * @stop_initiator: initiator of a session stop * @tx_stop: TX DelBA frame when stopping * @buf_size: reorder buffer size at receiver @@ -122,6 +125,7 @@ struct tid_ampdu_tx { struct timer_list addba_resp_timer; struct sk_buff_head pending; unsigned long state; + unsigned long last_tx; u16 timeout; u8 dialog_token; u8 stop_initiator; @@ -139,6 +143,7 @@ struct tid_ampdu_tx { * @reorder_time: jiffies when skb was added * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) * @reorder_timer: releases expired frames from the reorder buffer. + * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. @@ -163,6 +168,7 @@ struct tid_ampdu_rx { unsigned long *reorder_time; struct timer_list session_timer; struct timer_list reorder_timer; + unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; @@ -264,6 +270,7 @@ struct sta_ampdu_mlme { * @plink_timeout: timeout of peer link * @plink_timer: peer link watch timer * @plink_timer_was_running: used by suspend/resume to restore timers + * @t_offset: timing offset relative to this host * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver @@ -353,6 +360,9 @@ struct sta_info { enum nl80211_plink_state plink_state; u32 plink_timeout; struct timer_list plink_timer; + s64 t_offset; + s64 t_offset_setpoint; + enum nl80211_channel_type ch_type; #endif #ifdef CONFIG_MAC80211_DEBUGFS @@ -365,6 +375,8 @@ struct sta_info { unsigned int lost_packets; unsigned int beacon_loss_count; + bool supports_40mhz; + /* keep last! */ struct ieee80211_sta sta; }; @@ -490,7 +502,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local, nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ ) \ /* compare address and run code only if it matches */ \ - if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0) + if (ether_addr_equal(_sta->sta.addr, (_addr))) /* * Get STA info by index, BROKEN! diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5f8f89e89d6..28cfa981cfb 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -355,7 +355,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) int rtap_len; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (info->status.rates[i].idx < 0) { + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) { + /* just the first aggr frame carry status info */ + info->status.rates[i].idx = -1; + info->status.rates[i].count = 0; + break; + } else if (info->status.rates[i].idx < 0) { break; } else if (i >= hw->max_report_rates) { /* the HW cannot have attempted that rate */ @@ -378,7 +384,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) for_each_sta_info(local, hdr->addr1, sta, tmp) { /* skip wrong virtual interface */ - if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr)) + if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) continue; if (info->flags & IEEE80211_TX_STATUS_EOSP) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e76facc69e9..5f827a6b0d8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -159,7 +159,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ - dur = ieee80211_frame_duration(local, 10, rate, erp, + dur = ieee80211_frame_duration(sband->band, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble); if (next_frag_len) { @@ -167,7 +167,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ - dur += ieee80211_frame_duration(local, next_frag_len, + dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble); } @@ -230,9 +230,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) * changed via debugfs, user needs to reassociate manually to have * everything in sync. */ - if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) - && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - && skb_get_queue_mapping(tx->skb) == 0) + if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && + (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && + skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) return TX_CONTINUE; if (local->hw.conf.flags & IEEE80211_CONF_PS) { @@ -306,7 +306,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) } } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_is_data(hdr->frame_control) && - !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) { + !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) { /* * No associated STAs - no need to send multicast * frames. @@ -400,6 +400,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; + if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + info->hw_queue = tx->sdata->vif.cab_queue; /* device releases frame after DTIM beacon */ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) @@ -411,9 +413,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", - tx->sdata->name); + net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n", + tx->sdata->name); #endif dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); } else @@ -474,10 +475,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: STA %pM TX buffer for " - "AC %d full - dropping oldest frame\n", - tx->sdata->name, sta->sta.addr, ac); + net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n", + tx->sdata->name, sta->sta.addr, ac); #endif dev_kfree_skb(old); } else @@ -1118,8 +1117,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, /* reset session timer */ if (reset_agg_timer && tid_tx->timeout) - mod_timer(&tid_tx->session_timer, - TU_TO_EXP_TIME(tid_tx->timeout)); + tid_tx->last_tx = jiffies; return queued; } @@ -1216,11 +1214,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, bool txpending) { struct sk_buff *skb, *tmp; - struct ieee80211_tx_info *info; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { - int q = skb_get_queue_mapping(skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int q = info->hw_queue; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (WARN_ON_ONCE(q >= local->hw.queues)) { + __skb_unlink(skb, skbs); + dev_kfree_skb(skb); + continue; + } +#endif spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || @@ -1242,7 +1248,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); - info = IEEE80211_SKB_CB(skb); info->control.vif = vif; info->control.sta = sta; @@ -1285,8 +1290,16 @@ static bool __ieee80211_tx(struct ieee80211_local *local, switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: - sdata = NULL; - vif = NULL; + sdata = rcu_dereference(local->monitor_sdata); + if (sdata) { + vif = &sdata->vif; + info->hw_queue = + vif->hw_queue[skb_get_queue_mapping(skb)]; + } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) { + dev_kfree_skb(skb); + return true; + } else + vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, @@ -1401,6 +1414,12 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, tx.channel = local->hw.conf.channel; info->band = tx.channel->band; + /* set up hw_queue value early */ + if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || + !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) + info->hw_queue = + sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; + if (!invoke_tx_handlers(&tx)) result = __ieee80211_tx(local, &tx.skbs, led_len, tx.sta, txpending); @@ -1469,12 +1488,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) if (ieee80211_vif_is_mesh(&sdata->vif) && ieee80211_is_data(hdr->frame_control) && - !is_multicast_ether_addr(hdr->addr1)) - if (mesh_nexthop_resolve(skb, sdata)) { - /* skb queued: don't free */ - rcu_read_unlock(); - return; - } + !is_multicast_ether_addr(hdr->addr1) && + mesh_nexthop_resolve(skb, sdata)) { + /* skb queued: don't free */ + rcu_read_unlock(); + return; + } ieee80211_set_qos_hdr(sdata, skb); ieee80211_tx(sdata, skb, false); @@ -1643,7 +1662,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; - if (compare_ether_addr(payload, rfc1042_header) == 0) + if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } @@ -1676,7 +1695,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN || tmp_sdata->vif.type == NL80211_IFTYPE_WDS) continue; - if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) { + if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } @@ -1793,9 +1812,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * is being proxied by a portal (i.e. portal address * differs from proxied address) */ - if (compare_ether_addr(sdata->vif.addr, - skb->data + ETH_ALEN) == 0 && - !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { + if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && + !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); rcu_read_unlock(); @@ -1930,7 +1948,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, wme_sta = true; /* receiver and we are QoS enabled, use a QoS type frame */ - if (wme_sta && local->hw.queues >= 4) { + if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } @@ -1942,12 +1960,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && !is_multicast_ether_addr(hdr.addr1) && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || - compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) { + !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: dropped frame to %pM" - " (unauthorized port)\n", dev->name, - hdr.addr1); + net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", + dev->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); @@ -2171,7 +2187,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; - struct ieee80211_sub_if_data *sdata; unsigned long flags; int i; bool txok; @@ -2208,8 +2223,7 @@ void ieee80211_tx_pending(unsigned long data) } if (skb_queue_empty(&local->pending[i])) - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_wake_subqueue(sdata->dev, i); + ieee80211_propagate_queue_wake(local, i); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); @@ -2375,6 +2389,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, IEEE80211_STYPE_BEACON); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_mgmt *mgmt; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 *pos; int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + sizeof(mgmt->u.beacon); @@ -2384,6 +2399,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, goto out; #endif + if (ifmsh->sync_ops) + ifmsh->sync_ops->adjust_tbtt( + sdata); + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* NULL SSID */ @@ -2391,7 +2410,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2 + 3 + /* DS params */ 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + 2 + sdata->u.mesh.mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + sdata->u.mesh.ie_len); @@ -2415,12 +2434,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, *pos++ = WLAN_EID_SSID; *pos++ = 0x0; - if (ieee80211_add_srates_ie(&sdata->vif, skb) || + if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || mesh_add_ds_params_ie(skb, sdata) || - ieee80211_add_ext_srates_ie(&sdata->vif, skb) || + ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || mesh_add_rsn_ie(skb, sdata) || mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata) || + mesh_add_ht_oper_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata) || mesh_add_vendor_ies(skb, sdata)) { @@ -2604,7 +2623,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, pos = skb_put(skb, ie_ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; - if (ssid) + if (ssid_len) memcpy(pos, ssid, ssid_len); pos += ssid_len; @@ -2711,11 +2730,13 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc); void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid) { + int ac = ieee802_1d_to_ac[tid]; + skb_set_mac_header(skb, 0); skb_set_network_header(skb, 0); skb_set_transport_header(skb, 0); - skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); + skb_set_queue_mapping(skb, ac); skb->priority = tid; /* diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 32f7a3b3d43..22f2216b397 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -106,7 +106,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) } } -int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, +int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble) { int dur; @@ -120,7 +120,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, * DIV_ROUND_UP() operations. */ - if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { + if (band == IEEE80211_BAND_5GHZ || erp) { /* * OFDM: * @@ -162,10 +162,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_band band, size_t frame_len, struct ieee80211_rate *rate) { - struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; u16 dur; int erp; @@ -179,7 +179,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, erp = rate->flags & IEEE80211_RATE_ERP_G; } - dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, + dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); @@ -198,7 +198,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, u16 dur; struct ieee80211_supported_band *sband; - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; @@ -213,13 +213,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, } /* CTS duration */ - dur = ieee80211_frame_duration(local, 10, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); /* Data frame duration */ - dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); @@ -239,7 +239,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, u16 dur; struct ieee80211_supported_band *sband; - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; @@ -253,11 +253,11 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, } /* Data frame duration */ - dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); } @@ -265,17 +265,45 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_ctstoself_duration); +void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) +{ + struct ieee80211_sub_if_data *sdata; + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) + continue; + + if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE && + local->queue_stop_reasons[sdata->vif.cab_queue] != 0) + continue; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + int ac_queue = sdata->vif.hw_queue[ac]; + + if (ac_queue == queue || + (sdata->vif.cab_queue == queue && + local->queue_stop_reasons[ac_queue] == 0 && + skb_queue_empty(&local->pending[ac_queue]))) + netif_wake_subqueue(sdata->dev, ac); + } + } +} + static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_sub_if_data *sdata; trace_wake_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; + if (!test_bit(reason, &local->queue_stop_reasons[queue])) + return; + __clear_bit(reason, &local->queue_stop_reasons[queue]); if (local->queue_stop_reasons[queue] != 0) @@ -284,11 +312,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, if (skb_queue_empty(&local->pending[queue])) { rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) - continue; - netif_wake_subqueue(sdata->dev, queue); - } + ieee80211_propagate_queue_wake(local, queue); rcu_read_unlock(); } else tasklet_schedule(&local->tx_pending_tasklet); @@ -323,11 +347,21 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, if (WARN_ON(queue >= hw->queues)) return; + if (test_bit(reason, &local->queue_stop_reasons[queue])) + return; + __set_bit(reason, &local->queue_stop_reasons[queue]); rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_stop_subqueue(sdata->dev, queue); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (sdata->vif.hw_queue[ac] == queue || + sdata->vif.cab_queue == queue) + netif_stop_subqueue(sdata->dev, ac); + } + } rcu_read_unlock(); } @@ -354,8 +388,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, { struct ieee80211_hw *hw = &local->hw; unsigned long flags; - int queue = skb_get_queue_mapping(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int queue = info->hw_queue; if (WARN_ON(!info->control.vif)) { kfree_skb(skb); @@ -379,10 +413,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, int queue, i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - for (i = 0; i < hw->queues; i++) - __ieee80211_stop_queue(hw, i, - IEEE80211_QUEUE_STOP_REASON_SKB_ADD); - while ((skb = skb_dequeue(skbs))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -391,7 +421,11 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, continue; } - queue = skb_get_queue_mapping(skb); + queue = info->hw_queue; + + __ieee80211_stop_queue(hw, queue, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + __skb_queue_tail(&local->pending[queue], skb); } @@ -404,12 +438,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } -void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs) -{ - ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); -} - void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, enum queue_stop_reason reason) { @@ -684,9 +712,9 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, else elem_parse_failed = true; break; - case WLAN_EID_HT_INFORMATION: - if (elen >= sizeof(struct ieee80211_ht_info)) - elems->ht_info_elem = (void *)pos; + case WLAN_EID_HT_OPERATION: + if (elen >= sizeof(struct ieee80211_ht_operation)) + elems->ht_operation = (void *)pos; else elem_parse_failed = true; break; @@ -775,19 +803,22 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_queue_params qparam; - int queue; + int ac; bool use_11b; int aCWmin, aCWmax; if (!local->ops->conf_tx) return; + if (local->hw.queues < IEEE80211_NUM_ACS) + return; + memset(&qparam, 0, sizeof(qparam)); use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); - for (queue = 0; queue < local->hw.queues; queue++) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) @@ -795,21 +826,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, else aCWmin = 15; - switch (queue) { - case 3: /* AC_BK */ + switch (ac) { + case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 7; break; default: /* never happens but let's not leave undefined */ - case 2: /* AC_BE */ + case IEEE80211_AC_BE: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 3; break; - case 1: /* AC_VI */ + case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; if (use_11b) @@ -818,7 +849,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.txop = 3008/32; qparam.aifs = 2; break; - case 0: /* AC_VO */ + case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; if (use_11b) @@ -831,8 +862,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.uapsd = false; - sdata->tx_conf[queue] = qparam; - drv_conf_tx(local, sdata, queue, &qparam); + sdata->tx_conf[ac] = qparam; + drv_conf_tx(local, sdata, ac, &qparam); } /* after reinitialize QoS TX queues setting to default, @@ -878,10 +909,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local, int i; sband = local->hw.wiphy->bands[band]; - if (!sband) { - WARN_ON(1); - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - } + if (WARN_ON(!sband)) + return 1; if (band == IEEE80211_BAND_2GHZ) mandatory_flag = IEEE80211_RATE_MANDATORY_B; @@ -1106,7 +1135,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, u32 ieee80211_sta_get_rates(struct ieee80211_local *local, struct ieee802_11_elems *elems, - enum ieee80211_band band) + enum ieee80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; struct ieee80211_rate *bitrates; @@ -1115,10 +1144,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, int i, j; sband = local->hw.wiphy->bands[band]; - if (!sband) { - WARN_ON(1); - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - } + if (WARN_ON(!sband)) + return 1; bitrates = sband->bitrates; num_rates = sband->n_bitrates; @@ -1127,15 +1154,25 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, elems->ext_supp_rates_len; i++) { u8 rate = 0; int own_rate; + bool is_basic; if (i < elems->supp_rates_len) rate = elems->supp_rates[i]; else if (elems->ext_supp_rates) rate = elems->ext_supp_rates [i - elems->supp_rates_len]; own_rate = 5 * (rate & 0x7f); - for (j = 0; j < num_rates; j++) - if (bitrates[j].bitrate == own_rate) + is_basic = !!(rate & 0x80); + + if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + continue; + + for (j = 0; j < num_rates; j++) { + if (bitrates[j].bitrate == own_rate) { supp_rates |= BIT(j); + if (basic_rates && is_basic) + *basic_rates |= BIT(j); + } + } } return supp_rates; } @@ -1210,6 +1247,16 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ + sdata = rtnl_dereference(local->monitor_sdata); + if (sdata) { + res = drv_add_interface(local, sdata); + if (WARN_ON(res)) { + rcu_assign_pointer(local->monitor_sdata, NULL); + synchronize_net(); + kfree(sdata); + } + } + list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && @@ -1232,14 +1279,17 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); /* reconfigure tx conf */ - list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MONITOR || - !ieee80211_sdata_running(sdata)) - continue; + if (hw->queues >= IEEE80211_NUM_ACS) { + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MONITOR || + !ieee80211_sdata_running(sdata)) + continue; - for (i = 0; i < hw->queues; i++) - drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); + for (i = 0; i < IEEE80211_NUM_ACS; i++) + drv_conf_tx(local, sdata, i, + &sdata->tx_conf[i]); + } } /* reconfigure hardware */ @@ -1611,57 +1661,55 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, return pos; } -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type) + enum nl80211_channel_type channel_type, + u16 prot_mode) { - struct ieee80211_ht_info *ht_info; + struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ - *pos++ = WLAN_EID_HT_INFORMATION; - *pos++ = sizeof(struct ieee80211_ht_info); - ht_info = (struct ieee80211_ht_info *)pos; - ht_info->control_chan = + *pos++ = WLAN_EID_HT_OPERATION; + *pos++ = sizeof(struct ieee80211_ht_operation); + ht_oper = (struct ieee80211_ht_operation *)pos; + ht_oper->primary_chan = ieee80211_frequency_to_channel(channel->center_freq); switch (channel_type) { case NL80211_CHAN_HT40MINUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; case NL80211_CHAN_HT40PLUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; break; case NL80211_CHAN_HT20: default: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } - if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && + channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20) + ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; - /* - * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and - * RIFS Mode are reserved in IBSS mode, therefore keep them at 0 - */ - ht_info->operation_mode = 0x0000; - ht_info->stbc_param = 0x0000; + ht_oper->operation_mode = cpu_to_le16(prot_mode); + ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set are identical for the first 10 bytes */ - memset(&ht_info->basic_set, 0, 16); - memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); + memset(&ht_oper->basic_set, 0, 16); + memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); - return pos + sizeof(struct ieee80211_ht_info); + return pos + sizeof(struct ieee80211_ht_operation); } enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) +ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) { enum nl80211_channel_type channel_type; - if (!ht_info) + if (!ht_oper) return NL80211_CHAN_NO_HT; - switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_NONE: channel_type = NL80211_CHAN_HT20; break; @@ -1678,13 +1726,15 @@ ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) return channel_type; } -int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) +int ieee80211_add_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, rates, *pos; + u32 basic_rates = vif->bss_conf.basic_rates; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; rates = sband->n_bitrates; @@ -1698,20 +1748,25 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) *pos++ = WLAN_EID_SUPP_RATES; *pos++ = rates; for (i = 0; i < rates; i++) { + u8 basic = 0; + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + *pos++ = basic | (u8) (rate / 5); } return 0; } -int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) +int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, exrates, *pos; + u32 basic_rates = vif->bss_conf.basic_rates; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; exrates = sband->n_bitrates; @@ -1728,9 +1783,25 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; for (i = 8; i < sband->n_bitrates; i++) { + u8 basic = 0; + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + *pos++ = basic | (u8) (rate / 5); } } return 0; } + +int ieee80211_ave_rssi(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) { + /* non-managed type inferfaces */ + return 0; + } + return ifmgd->ave_beacon_signal; +} +EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 89511be3111..c3d643a6536 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -52,6 +52,26 @@ static int wme_downgrade_ac(struct sk_buff *skb) } } +static u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb) +{ + /* in case we are a client verify acm is not set for this ac */ + while (unlikely(local->wmm_acm & BIT(skb->priority))) { + if (wme_downgrade_ac(skb)) { + /* + * This should not really happen. The AP has marked all + * lower ACs to require admission control which is not + * a reasonable configuration. Allow the frame to be + * transmitted using AC_BK as a workaround. + */ + break; + } + } + + /* look up which queue to use for frames with this 1d tag */ + return ieee802_1d_to_ac[skb->priority]; +} + /* Indicate which queue to use for this fully formed 802.11 frame */ u16 ieee80211_select_queue_80211(struct ieee80211_local *local, struct sk_buff *skb, @@ -59,7 +79,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local, { u8 *p; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (!ieee80211_is_data(hdr->frame_control)) { @@ -86,9 +106,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, const u8 *ra = NULL; bool qos = false; - if (local->hw.queues < 4 || skb->len < 6) { + if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ - return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE); + return 0; } rcu_read_lock(); @@ -139,26 +159,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, return ieee80211_downgrade_queue(local, skb); } -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, - struct sk_buff *skb) -{ - /* in case we are a client verify acm is not set for this ac */ - while (unlikely(local->wmm_acm & BIT(skb->priority))) { - if (wme_downgrade_ac(skb)) { - /* - * This should not really happen. The AP has marked all - * lower ACs to require admission control which is not - * a reasonable configuration. Allow the frame to be - * transmitted using AC_BK as a workaround. - */ - break; - } - } - - /* look up which queue to use for frames with this 1d tag */ - return ieee802_1d_to_ac[skb->priority]; -} - void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 94edceb617f..ca80818b7b6 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h @@ -22,8 +22,5 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, - struct sk_buff *skb); - #endif /* _WME_H */ diff --git a/net/mac80211/work.c b/net/mac80211/work.c index c6e230efa04..b2650a9d45f 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -122,9 +122,6 @@ static void ieee80211_work_work(struct work_struct *work) enum work_action rma; bool remain_off_channel = false; - if (local->scanning) - return; - /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. @@ -134,6 +131,11 @@ static void ieee80211_work_work(struct work_struct *work) mutex_lock(&local->mtx); + if (local->scanning) { + mutex_unlock(&local->mtx); + return; + } + ieee80211_recalc_idle(local); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { @@ -226,13 +228,8 @@ static void ieee80211_work_work(struct work_struct *work) run_again(local, jiffies + HZ/2); } - if (list_empty(&local->work_list) && local->scan_req && - !local->scanning) - ieee80211_queue_delayed_work(&local->hw, - &local->scan_work, - round_jiffies_relative(0)); - ieee80211_recalc_idle(local); + ieee80211_run_deferred_scan(local); mutex_unlock(&local->mtx); diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig new file mode 100644 index 00000000000..a967ddaa4e2 --- /dev/null +++ b/net/mac802154/Kconfig @@ -0,0 +1,16 @@ +config MAC802154 + tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" + depends on IEEE802154 && EXPERIMENTAL + select CRC_CCITT + ---help--- + This option enables the hardware independent IEEE 802.15.4 + networking stack for SoftMAC devices (the ones implementing + only PHY level of IEEE 802.15.4 standard). + + Note: this implementation is neither certified, nor feature + complete! Compatibility with other implementations hasn't + been tested yet! + + If you plan to use HardMAC IEEE 802.15.4 devices, you can + say N here. Alternatievly you can say M to compile it as + module. diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile new file mode 100644 index 00000000000..ec1bd3fc127 --- /dev/null +++ b/net/mac802154/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MAC802154) += mac802154.o +mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c new file mode 100644 index 00000000000..e3edfb0661b --- /dev/null +++ b/net/mac802154/ieee802154_dev.c @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * Written by: + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + * + * Based on the code from 'linux-zigbee.sourceforge.net' project. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> + +#include <net/netlink.h> +#include <linux/nl802154.h> +#include <net/mac802154.h> +#include <net/route.h> +#include <net/wpan-phy.h> + +#include "mac802154.h" + +int mac802154_slave_open(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *ipriv = priv->hw; + int res = 0; + + if (ipriv->open_count++ == 0) { + res = ipriv->ops->start(&ipriv->hw); + WARN_ON(res); + if (res) + goto err; + } + + if (ipriv->ops->ieee_addr) { + res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr); + WARN_ON(res); + if (res) + goto err; + mac802154_dev_set_ieee_addr(dev); + } + + netif_start_queue(dev); + return 0; +err: + priv->hw->open_count--; + + return res; +} + +int mac802154_slave_close(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *ipriv = priv->hw; + + netif_stop_queue(dev); + + if (!--ipriv->open_count) + ipriv->ops->stop(&ipriv->hw); + + return 0; +} + +static int +mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + struct mac802154_priv *ipriv; + int err; + + ipriv = wpan_phy_priv(phy); + + priv = netdev_priv(dev); + priv->dev = dev; + priv->hw = ipriv; + + dev->needed_headroom = ipriv->hw.extra_tx_headroom; + + SET_NETDEV_DEV(dev, &ipriv->phy->dev); + + mutex_lock(&ipriv->slaves_mtx); + if (!ipriv->running) { + mutex_unlock(&ipriv->slaves_mtx); + return -ENODEV; + } + mutex_unlock(&ipriv->slaves_mtx); + + err = register_netdev(dev); + if (err < 0) + return err; + + rtnl_lock(); + mutex_lock(&ipriv->slaves_mtx); + list_add_tail_rcu(&priv->list, &ipriv->slaves); + mutex_unlock(&ipriv->slaves_mtx); + rtnl_unlock(); + + return 0; +} + +static void +mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev) +{ + struct mac802154_sub_if_data *sdata; + ASSERT_RTNL(); + + sdata = netdev_priv(dev); + + BUG_ON(sdata->hw->phy != phy); + + mutex_lock(&sdata->hw->slaves_mtx); + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->hw->slaves_mtx); + + synchronize_rcu(); + unregister_netdevice(sdata->dev); +} + +static struct net_device * +mac802154_add_iface(struct wpan_phy *phy, const char *name, int type) +{ + struct net_device *dev; + int err = -ENOMEM; + + switch (type) { + case IEEE802154_DEV_MONITOR: + dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), + name, mac802154_monitor_setup); + break; + default: + dev = NULL; + err = -EINVAL; + break; + } + if (!dev) + goto err; + + err = mac802154_netdev_register(phy, dev); + if (err) + goto err_free; + + dev_hold(dev); /* we return an incremented device refcount */ + return dev; + +err_free: + free_netdev(dev); +err: + return ERR_PTR(err); +} + +struct ieee802154_dev * +ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops) +{ + struct wpan_phy *phy; + struct mac802154_priv *priv; + size_t priv_size; + + if (!ops || !ops->xmit || !ops->ed || !ops->start || + !ops->stop || !ops->set_channel) { + printk(KERN_ERR + "undefined IEEE802.15.4 device operations\n"); + return NULL; + } + + /* Ensure 32-byte alignment of our private data and hw private data. + * We use the wpan_phy priv data for both our mac802154_priv and for + * the driver's private data + * + * in memory it'll be like this: + * + * +-----------------------+ + * | struct wpan_phy | + * +-----------------------+ + * | struct mac802154_priv | + * +-----------------------+ + * | driver's private data | + * +-----------------------+ + * + * Due to ieee802154 layer isn't aware of driver and MAC structures, + * so lets allign them here. + */ + + priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len; + + phy = wpan_phy_alloc(priv_size); + if (!phy) { + printk(KERN_ERR + "failure to allocate master IEEE802.15.4 device\n"); + return NULL; + } + + priv = wpan_phy_priv(phy); + priv->hw.phy = priv->phy = phy; + priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN); + priv->ops = ops; + + INIT_LIST_HEAD(&priv->slaves); + mutex_init(&priv->slaves_mtx); + + return &priv->hw; +} +EXPORT_SYMBOL(ieee802154_alloc_device); + +void ieee802154_free_device(struct ieee802154_dev *hw) +{ + struct mac802154_priv *priv = mac802154_to_priv(hw); + + BUG_ON(!list_empty(&priv->slaves)); + + wpan_phy_free(priv->phy); + + mutex_destroy(&priv->slaves_mtx); +} +EXPORT_SYMBOL(ieee802154_free_device); + +int ieee802154_register_device(struct ieee802154_dev *dev) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + int rc = -ENOMEM; + + priv->dev_workqueue = + create_singlethread_workqueue(wpan_phy_name(priv->phy)); + if (!priv->dev_workqueue) + goto out; + + wpan_phy_set_dev(priv->phy, priv->hw.parent); + + priv->phy->add_iface = mac802154_add_iface; + priv->phy->del_iface = mac802154_del_iface; + + rc = wpan_phy_register(priv->phy); + if (rc < 0) + goto out_wq; + + rtnl_lock(); + + mutex_lock(&priv->slaves_mtx); + priv->running = MAC802154_DEVICE_RUN; + mutex_unlock(&priv->slaves_mtx); + + rtnl_unlock(); + + return 0; + +out_wq: + destroy_workqueue(priv->dev_workqueue); +out: + return rc; +} +EXPORT_SYMBOL(ieee802154_register_device); + +void ieee802154_unregister_device(struct ieee802154_dev *dev) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + struct mac802154_sub_if_data *sdata, *next; + + flush_workqueue(priv->dev_workqueue); + destroy_workqueue(priv->dev_workqueue); + + rtnl_lock(); + + mutex_lock(&priv->slaves_mtx); + priv->running = MAC802154_DEVICE_STOPPED; + mutex_unlock(&priv->slaves_mtx); + + list_for_each_entry_safe(sdata, next, &priv->slaves, list) { + mutex_lock(&sdata->hw->slaves_mtx); + list_del(&sdata->list); + mutex_unlock(&sdata->hw->slaves_mtx); + + unregister_netdevice(sdata->dev); + } + + rtnl_unlock(); + + wpan_phy_unregister(priv->phy); +} +EXPORT_SYMBOL(ieee802154_unregister_device); + +MODULE_DESCRIPTION("IEEE 802.15.4 implementation"); +MODULE_LICENSE("GPL v2"); diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h new file mode 100644 index 00000000000..789d9c948ae --- /dev/null +++ b/net/mac802154/mac802154.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ +#ifndef MAC802154_H +#define MAC802154_H + +/* mac802154 device private data */ +struct mac802154_priv { + struct ieee802154_dev hw; + struct ieee802154_ops *ops; + + /* ieee802154 phy */ + struct wpan_phy *phy; + + int open_count; + + /* As in mac80211 slaves list is modified: + * 1) under the RTNL + * 2) protected by slaves_mtx; + * 3) in an RCU manner + * + * So atomic readers can use any of this protection methods. + */ + struct list_head slaves; + struct mutex slaves_mtx; + + /* This one is used for scanning and other jobs not to be interfered + * with serial driver. + */ + struct workqueue_struct *dev_workqueue; + + /* SoftMAC device is registered and running. One can add subinterfaces. + * This flag should be modified under slaves_mtx and RTNL, so you can + * read them using any of protection methods. + */ + bool running; +}; + +#define MAC802154_DEVICE_STOPPED 0x00 +#define MAC802154_DEVICE_RUN 0x01 + +/* Slave interface definition. + * + * Slaves represent typical network interfaces available from userspace. + * Each ieee802154 device/transceiver may have several slaves and able + * to be associated with several networks at the same time. + */ +struct mac802154_sub_if_data { + struct list_head list; /* the ieee802154_priv->slaves list */ + + struct mac802154_priv *hw; + struct net_device *dev; + + int type; + + spinlock_t mib_lock; + + __le16 pan_id; + __le16 short_addr; + + u8 chan; + u8 page; + + /* MAC BSN field */ + u8 bsn; + /* MAC DSN field */ + u8 dsn; +}; + +#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) + +#define MAC802154_MAX_XMIT_ATTEMPTS 3 + +#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */ + +extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; + +int mac802154_slave_open(struct net_device *dev); +int mac802154_slave_close(struct net_device *dev); + +void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb); +void mac802154_monitor_setup(struct net_device *dev); + +netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, + u8 page, u8 chan); + +/* MIB callbacks */ +void mac802154_dev_set_ieee_addr(struct net_device *dev); + +#endif /* MAC802154_H */ diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c new file mode 100644 index 00000000000..7a5d0e052cd --- /dev/null +++ b/net/mac802154/mac_cmd.c @@ -0,0 +1,45 @@ +/* + * MAC commands interface + * + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin <slapin@ossfans.org> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/skbuff.h> +#include <linux/if_arp.h> + +#include <net/ieee802154_netdev.h> +#include <net/wpan-phy.h> +#include <net/mac802154.h> + +#include "mac802154.h" + +struct wpan_phy *mac802154_get_phy(const struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return to_phy(get_device(&priv->hw->phy->dev)); +} + +struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { + .get_phy = mac802154_get_phy, +}; diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c new file mode 100644 index 00000000000..ab59821ec72 --- /dev/null +++ b/net/mac802154/mib.c @@ -0,0 +1,93 @@ +/* + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Sergey Lapin <slapin@ossfans.org> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/if_arp.h> + +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +#include "mac802154.h" + +struct hw_addr_filt_notify_work { + struct work_struct work; + struct net_device *dev; + unsigned long changed; +}; + +struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return priv->hw; +} + +static void hw_addr_notify(struct work_struct *work) +{ + struct hw_addr_filt_notify_work *nw = container_of(work, + struct hw_addr_filt_notify_work, work); + struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev); + int res; + + res = hw->ops->set_hw_addr_filt(&hw->hw, + &hw->hw.hw_filt, + nw->changed); + if (res) + pr_debug("failed changed mask %lx\n", nw->changed); + + kfree(nw); + + return; +} + +static void set_hw_addr_filt(struct net_device *dev, unsigned long changed) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct hw_addr_filt_notify_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, hw_addr_notify); + work->dev = dev; + work->changed = changed; + queue_work(priv->hw->dev_workqueue, &work->work); + + return; +} + +void mac802154_dev_set_ieee_addr(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *mac = priv->hw; + + if (mac->ops->set_hw_addr_filt && + memcmp(mac->hw.hw_filt.ieee_addr, + dev->dev_addr, IEEE802154_ADDR_LEN)) { + memcpy(mac->hw.hw_filt.ieee_addr, + dev->dev_addr, IEEE802154_ADDR_LEN); + set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED); + } +} diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c new file mode 100644 index 00000000000..434a26f76a8 --- /dev/null +++ b/net/mac802154/monitor.c @@ -0,0 +1,116 @@ +/* + * Copyright 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Sergey Lapin <slapin@ossfans.org> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> +#include <linux/crc-ccitt.h> + +#include <net/ieee802154.h> +#include <net/mac802154.h> +#include <net/netlink.h> +#include <net/wpan-phy.h> +#include <linux/nl802154.h> + +#include "mac802154.h" + +static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + u8 chan, page; + + priv = netdev_priv(dev); + + /* FIXME: locking */ + chan = priv->hw->phy->current_channel; + page = priv->hw->phy->current_page; + + if (chan == MAC802154_CHAN_NONE) /* not initialized */ + return NETDEV_TX_OK; + + if (WARN_ON(page >= WPAN_NUM_PAGES) || + WARN_ON(chan >= WPAN_NUM_CHANNELS)) + return NETDEV_TX_OK; + + skb->skb_iif = dev->ifindex; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return mac802154_tx(priv->hw, skb, page, chan); +} + + +void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *skb2; + struct mac802154_sub_if_data *sdata; + u16 crc = crc_ccitt(0, skb->data, skb->len); + u8 *data; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &priv->slaves, list) { + if (sdata->type != IEEE802154_DEV_MONITOR) + continue; + + skb2 = skb_clone(skb, GFP_ATOMIC); + skb2->dev = sdata->dev; + skb2->pkt_type = PACKET_HOST; + data = skb_put(skb2, 2); + data[0] = crc & 0xff; + data[1] = crc >> 8; + + netif_rx_ni(skb2); + } + rcu_read_unlock(); +} + +static const struct net_device_ops mac802154_monitor_ops = { + .ndo_open = mac802154_slave_open, + .ndo_stop = mac802154_slave_close, + .ndo_start_xmit = mac802154_monitor_xmit, +}; + +void mac802154_monitor_setup(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + + dev->addr_len = 0; + dev->hard_header_len = 0; + dev->needed_tailroom = 2; /* room for FCS */ + dev->mtu = IEEE802154_MTU; + dev->tx_queue_len = 10; + dev->type = ARPHRD_IEEE802154_MONITOR; + dev->flags = IFF_NOARP | IFF_BROADCAST; + dev->watchdog_timeo = 0; + + dev->destructor = free_netdev; + dev->netdev_ops = &mac802154_monitor_ops; + dev->ml_priv = &mac802154_mlme_reduced; + + priv = netdev_priv(dev); + priv->type = IEEE802154_DEV_MONITOR; + + priv->chan = MAC802154_CHAN_NONE; /* not initialized */ + priv->page = 0; +} diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c new file mode 100644 index 00000000000..4a7d76d4f8b --- /dev/null +++ b/net/mac802154/rx.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/netdevice.h> +#include <linux/crc-ccitt.h> + +#include <net/mac802154.h> +#include <net/ieee802154_netdev.h> + +#include "mac802154.h" + +/* The IEEE 802.15.4 standard defines 4 MAC packet types: + * - beacon frame + * - MAC command frame + * - acknowledgement frame + * - data frame + * + * and only the data frame should be pushed to the upper layers, other types + * are just internal MAC layer management information. So only data packets + * are going to be sent to the networking queue, all other will be processed + * right here by using the device workqueue. + */ +struct rx_work { + struct sk_buff *skb; + struct work_struct work; + struct ieee802154_dev *dev; + u8 lqi; +}; + +static void +mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) +{ + struct mac802154_priv *priv = mac802154_to_priv(hw); + + mac_cb(skb)->lqi = lqi; + skb->protocol = htons(ETH_P_IEEE802154); + skb_reset_mac_header(skb); + + BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb)); + + if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { + u16 crc; + + if (skb->len < 2) { + pr_debug("got invalid frame\n"); + goto out; + } + crc = crc_ccitt(0, skb->data, skb->len); + if (crc) { + pr_debug("CRC mismatch\n"); + goto out; + } + skb_trim(skb, skb->len - 2); /* CRC */ + } + + mac802154_monitors_rx(priv, skb); +out: + dev_kfree_skb(skb); + return; +} + +static void mac802154_rx_worker(struct work_struct *work) +{ + struct rx_work *rw = container_of(work, struct rx_work, work); + struct sk_buff *skb = rw->skb; + + mac802154_subif_rx(rw->dev, skb, rw->lqi); + kfree(rw); +} + +void +ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + struct rx_work *work; + + if (!skb) + return; + + work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, mac802154_rx_worker); + work->skb = skb; + work->dev = dev; + work->lqi = lqi; + + queue_work(priv->dev_workqueue, &work->work); +} +EXPORT_SYMBOL(ieee802154_rx_irqsafe); diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c new file mode 100644 index 00000000000..8781d8f904d --- /dev/null +++ b/net/mac802154/tx.c @@ -0,0 +1,116 @@ +/* + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Sergey Lapin <slapin@ossfans.org> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/crc-ccitt.h> + +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +#include "mac802154.h" + +/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process + * packets through the workqueue. + */ +struct xmit_work { + struct sk_buff *skb; + struct work_struct work; + struct mac802154_priv *priv; + u8 chan; + u8 page; + u8 xmit_attempts; +}; + +static void mac802154_xmit_worker(struct work_struct *work) +{ + struct xmit_work *xw = container_of(work, struct xmit_work, work); + int res; + + mutex_lock(&xw->priv->phy->pib_lock); + if (xw->priv->phy->current_channel != xw->chan || + xw->priv->phy->current_page != xw->page) { + res = xw->priv->ops->set_channel(&xw->priv->hw, + xw->page, + xw->chan); + if (res) { + pr_debug("set_channel failed\n"); + goto out; + } + } + + res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb); + +out: + mutex_unlock(&xw->priv->phy->pib_lock); + + if (res) { + if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) { + queue_work(xw->priv->dev_workqueue, &xw->work); + return; + } else + pr_debug("transmission failed for %d times", + MAC802154_MAX_XMIT_ATTEMPTS); + } + + dev_kfree_skb(xw->skb); + + kfree(xw); +} + +netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, + u8 page, u8 chan) +{ + struct xmit_work *work; + + if (!(priv->phy->channels_supported[page] & (1 << chan))) + WARN_ON(1); + return NETDEV_TX_OK; + + if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { + u16 crc = crc_ccitt(0, skb->data, skb->len); + u8 *data = skb_put(skb, 2); + data[0] = crc & 0xff; + data[1] = crc >> 8; + } + + if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); + if (!work) + return NETDEV_TX_BUSY; + + INIT_WORK(&work->work, mac802154_xmit_worker); + work->skb = skb; + work->priv = priv; + work->page = page; + work->chan = chan; + work->xmit_attempts = 0; + + queue_work(priv->dev_workqueue, &work->work); + + return NETDEV_TX_OK; +} diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0c6f67e8f2e..209c1ed4336 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -509,6 +509,21 @@ config NETFILTER_XT_TARGET_HL since you can easily create immortal packets that loop forever on the network. +config NETFILTER_XT_TARGET_HMARK + tristate '"HMARK" target support' + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) + depends on NETFILTER_ADVANCED + ---help--- + This option adds the "HMARK" target. + + The target allows you to create rules in the "raw" and "mangle" tables + which set the skbuff mark by means of hash calculation within a given + range. The nfmark can influence the routing method (see "Use netfilter + MARK value as routing key") and can also be used by other subsystems to + change their behaviour. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_TARGET_IDLETIMER tristate "IDLETIMER target support" depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ca3676586f5..4e7960cc7b9 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o +obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o diff --git a/net/netfilter/core.c b/net/netfilter/core.c index e1b7e051332..e19f3653db2 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -290,12 +290,3 @@ void __init netfilter_init(void) if (netfilter_log_init() < 0) panic("cannot initialize nf_log"); } - -#ifdef CONFIG_SYSCTL -struct ctl_path nf_net_netfilter_sysctl_path[] = { - { .procname = "net", }, - { .procname = "netfilter", }, - { } -}; -EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path); -#endif /* CONFIG_SYSCTL */ diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index a72a4dff003..7e1b061aeeb 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id * map->hosts)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id * map->hosts))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id * map->hosts)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(members[id]))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id * map->hosts)) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(members[id])))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, adt); @@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); - if (map->netmask != 32) - NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->memsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) || + (map->netmask != 32 && + nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->memsize)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 81324c12c5b..d7eaf10edb6 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -111,7 +111,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags) return -EAGAIN; case MAC_FILLED: return data->ether == NULL || - compare_ether_addr(data->ether, elem->ether) == 0; + ether_addr_equal(data->ether, elem->ether); } return 0; } @@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id)); - if (elem->match == MAC_FILLED) - NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, - elem->ether); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id)) || + (elem->match == MAC_FILLED && + nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, + elem->ether))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -224,7 +225,7 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) return -EAGAIN; case MAC_FILLED: return (data->ether == NULL || - compare_ether_addr(data->ether, elem->ether) == 0) && + ether_addr_equal(data->ether, elem->ether)) && !bitmap_expired(map, data->id); } return 0; @@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id)); - if (elem->match == MAC_FILLED) - NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, - elem->ether); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id)) || + (elem->match == MAC_FILLED && + nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, + elem->ether))) + goto nla_put_failure; timeout = elem->match == MAC_UNSET ? elem->timeout : ip_set_timeout_get(elem->timeout); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)); + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) - + (map->last_ip - map->first_ip + 1) * map->dsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + + ((map->last_ip - map->first_ip + 1) * + map->dsize))) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 382ec28ba72..b9f1fce7053 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, - htons(map->first_port + id)); + if (nla_put_net16(skb, IPSET_ATTR_PORT, + htons(map->first_port + id))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, - htons(map->first_port + id)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(members[id]))); + if (nla_put_net16(skb, IPSET_ATTR_PORT, + htons(map->first_port + id)) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(members[id])))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, adt); @@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->memsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) || + nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->memsize)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index e6c1c9605a5..819c342f5b3 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1092,19 +1092,21 @@ dump_last: ret = -EMSGSIZE; goto release_refcount; } - NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name); + if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) + goto nla_put_failure; if (dump_flags & IPSET_FLAG_LIST_SETNAME) goto next_set; switch (cb->args[2]) { case 0: /* Core header data */ - NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME, - set->type->name); - NLA_PUT_U8(skb, IPSET_ATTR_FAMILY, - set->family); - NLA_PUT_U8(skb, IPSET_ATTR_REVISION, - set->revision); + if (nla_put_string(skb, IPSET_ATTR_TYPENAME, + set->type->name) || + nla_put_u8(skb, IPSET_ATTR_FAMILY, + set->family) || + nla_put_u8(skb, IPSET_ATTR_REVISION, + set->revision)) + goto nla_put_failure; ret = set->variant->head(set, skb); if (ret < 0) goto release_refcount; @@ -1410,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_HEADER); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name); - NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name); - NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || + nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || + nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || + nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); @@ -1469,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_TYPE); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename); - NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) || + nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) || + nla_put_u8(skb2, IPSET_ATTR_REVISION, max) || + nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); @@ -1517,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_PROTOCOL); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); @@ -1613,7 +1618,7 @@ static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = { static int ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) { - unsigned *op; + unsigned int *op; void *data; int copylen = *len, ret = 0; @@ -1621,7 +1626,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) return -EPERM; if (optval != SO_IP_SET) return -EBADF; - if (*len < sizeof(unsigned)) + if (*len < sizeof(unsigned int)) return -EINVAL; data = vmalloc(*len); @@ -1631,7 +1636,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) ret = -EFAULT; goto done; } - op = (unsigned *) data; + op = (unsigned int *) data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 828ce46cb34..a68dbd4f1e4 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem) static inline bool hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip)) + goto nla_put_failure; return 0; nla_put_failure: @@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) const struct hash_ip4_telem *tdata = (const struct hash_ip4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; @@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix) static bool hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6)) + goto nla_put_failure; return 0; nla_put_failure: @@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) const struct hash_ip6_telem *e = (const struct hash_ip6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index e8dbb498af8..92722bb82ee 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -93,9 +93,10 @@ static bool hash_ipport4_data_list(struct sk_buff *skb, const struct hash_ipport4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb, const struct hash_ipport4_telem *tdata = (const struct hash_ipport4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -308,9 +309,10 @@ static bool hash_ipport6_data_list(struct sk_buff *skb, const struct hash_ipport6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb, const struct hash_ipport6_telem *e = (const struct hash_ipport6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 52f79d8ef74..0637ce096de 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -94,10 +94,11 @@ static bool hash_ipportip4_data_list(struct sk_buff *skb, const struct hash_ipportip4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb, const struct hash_ipportip4_telem *tdata = (const struct hash_ipportip4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -319,10 +320,11 @@ static bool hash_ipportip6_data_list(struct sk_buff *skb, const struct hash_ipportip6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb, const struct hash_ipportip6_telem *e = (const struct hash_ipportip6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 97583f5af74..1ce21ca976e 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -124,13 +124,14 @@ hash_ipportnet4_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -145,16 +146,16 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb, (const struct hash_ipportnet4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -436,13 +437,14 @@ hash_ipportnet6_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -457,15 +459,16 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb, (const struct hash_ipportnet6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 1721cdecc9f..c57a6a09906 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -111,10 +111,11 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -128,13 +129,13 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data) (const struct hash_net4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -339,10 +340,11 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -356,12 +358,13 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data) (const struct hash_net6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 33bafc97ca6..ee863943c82 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -252,11 +252,12 @@ hash_netiface4_data_list(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -273,13 +274,14 @@ hash_netiface4_data_tlist(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; @@ -555,11 +557,12 @@ hash_netiface6_data_list(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -576,13 +579,14 @@ hash_netiface6_data_tlist(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 3a5e198641d..fc3143a2d41 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -124,12 +124,13 @@ hash_netport4_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -144,15 +145,15 @@ hash_netport4_data_tlist(struct sk_buff *skb, (const struct hash_netport4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -402,12 +403,13 @@ hash_netport6_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -422,14 +424,15 @@ hash_netport6_data_tlist(struct sk_buff *skb, (const struct hash_netport6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 7e095f9005f..6cb1225765f 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->size * map->dsize)); + if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->size * map->dsize))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; @@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_STRING(skb, IPSET_ATTR_NAME, - ip_set_name_byindex(e->id)); + if (nla_put_string(skb, IPSET_ATTR_NAME, + ip_set_name_byindex(e->id))) + goto nla_put_failure; if (with_timeout(map->timeout)) { const struct set_telem *te = (const struct set_telem *) e; - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(te->timeout))); + __be32 to = htonl(ip_set_timeout_get(te->timeout)); + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to)) + goto nla_put_failure; } ipset_nest_end(skb, nested); } diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 52856178c9d..64f9e8f1320 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -313,7 +313,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) * Assumes already checked proto==IPPROTO_TCP and diff!=0. */ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, - unsigned flag, __u32 seq, int diff) + unsigned int flag, __u32 seq, int diff) { /* spinlock is to keep updating cp->flags atomic */ spin_lock(&cp->lock); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 29fa5badde7..1548df9a752 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -86,42 +86,42 @@ struct ip_vs_aligned_lock static struct ip_vs_aligned_lock __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; -static inline void ct_read_lock(unsigned key) +static inline void ct_read_lock(unsigned int key) { read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_unlock(unsigned key) +static inline void ct_read_unlock(unsigned int key) { read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_lock(unsigned key) +static inline void ct_write_lock(unsigned int key) { write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_unlock(unsigned key) +static inline void ct_write_unlock(unsigned int key) { write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_lock_bh(unsigned key) +static inline void ct_read_lock_bh(unsigned int key) { read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_unlock_bh(unsigned key) +static inline void ct_read_unlock_bh(unsigned int key) { read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_lock_bh(unsigned key) +static inline void ct_write_lock_bh(unsigned int key) { write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_unlock_bh(unsigned key) +static inline void ct_write_unlock_bh(unsigned int key) { write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } @@ -130,7 +130,7 @@ static inline void ct_write_unlock_bh(unsigned key) /* * Returns hash value for IPVS connection entry */ -static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto, +static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto, const union nf_inet_addr *addr, __be16 port) { @@ -188,7 +188,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) */ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) { - unsigned hash; + unsigned int hash; int ret; if (cp->flags & IP_VS_CONN_F_ONE_PACKET) @@ -224,7 +224,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) */ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) { - unsigned hash; + unsigned int hash; int ret; /* unhash it and decrease its reference counter */ @@ -257,7 +257,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) static inline struct ip_vs_conn * __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp; struct hlist_node *n; @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto); /* Get reference to connection template */ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp; struct hlist_node *n; @@ -394,7 +394,7 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) * p->vaddr, p->vport: pkt dest address (foreign host) */ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp, *ret=NULL; struct hlist_node *n; @@ -548,6 +548,7 @@ static inline void ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) { unsigned int conn_flags; + __u32 flags; /* if dest is NULL, then return directly */ if (!dest) @@ -559,17 +560,19 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) conn_flags = atomic_read(&dest->conn_flags); if (cp->protocol != IPPROTO_UDP) conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; + flags = cp->flags; /* Bind with the destination and its corresponding transmitter */ - if (cp->flags & IP_VS_CONN_F_SYNC) { + if (flags & IP_VS_CONN_F_SYNC) { /* if the connection is not template and is created * by sync, preserve the activity flag. */ - if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) + if (!(flags & IP_VS_CONN_F_TEMPLATE)) conn_flags &= ~IP_VS_CONN_F_INACTIVE; /* connections inherit forwarding method from dest */ - cp->flags &= ~IP_VS_CONN_F_FWD_MASK; + flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT); } - cp->flags |= conn_flags; + flags |= conn_flags; + cp->flags = flags; cp->dest = dest; IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " @@ -584,12 +587,12 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) atomic_read(&dest->refcnt)); /* Update the connection counters */ - if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { - /* It is a normal connection, so increase the inactive - connection counter because it is in TCP SYNRECV - state (inactive) or other protocol inacive state */ - if ((cp->flags & IP_VS_CONN_F_SYNC) && - (!(cp->flags & IP_VS_CONN_F_INACTIVE))) + if (!(flags & IP_VS_CONN_F_TEMPLATE)) { + /* It is a normal connection, so modify the counters + * according to the flags, later the protocol can + * update them on state change + */ + if (!(flags & IP_VS_CONN_F_INACTIVE)) atomic_inc(&dest->activeconns); else atomic_inc(&dest->inactconns); @@ -613,14 +616,40 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) { struct ip_vs_dest *dest; - if ((cp) && (!cp->dest)) { - dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, - cp->dport, &cp->vaddr, cp->vport, - cp->protocol, cp->fwmark, cp->flags); + dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, + cp->dport, &cp->vaddr, cp->vport, + cp->protocol, cp->fwmark, cp->flags); + if (dest) { + struct ip_vs_proto_data *pd; + + spin_lock(&cp->lock); + if (cp->dest) { + spin_unlock(&cp->lock); + return dest; + } + + /* Applications work depending on the forwarding method + * but better to reassign them always when binding dest */ + if (cp->app) + ip_vs_unbind_app(cp); + ip_vs_bind_dest(cp, dest); - return dest; - } else - return NULL; + spin_unlock(&cp->lock); + + /* Update its packet transmitter */ + cp->packet_xmit = NULL; +#ifdef CONFIG_IP_VS_IPV6 + if (cp->af == AF_INET6) + ip_vs_bind_xmit_v6(cp); + else +#endif + ip_vs_bind_xmit(cp); + + pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol); + if (pd && atomic_read(&pd->appcnt)) + ip_vs_bind_app(cp, pd->pp); + } + return dest; } @@ -743,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct) static void ip_vs_conn_expire(unsigned long data) { struct ip_vs_conn *cp = (struct ip_vs_conn *)data; - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); + struct net *net = ip_vs_conn_net(cp); + struct netns_ipvs *ipvs = net_ipvs(net); cp->timeout = 60*HZ; @@ -808,6 +838,9 @@ static void ip_vs_conn_expire(unsigned long data) atomic_read(&cp->refcnt)-1, atomic_read(&cp->n_control)); + if (ipvs->sync_state & IP_VS_STATE_MASTER) + ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs)); + ip_vs_conn_put(cp); } @@ -824,7 +857,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) */ struct ip_vs_conn * ip_vs_conn_new(const struct ip_vs_conn_param *p, - const union nf_inet_addr *daddr, __be16 dport, unsigned flags, + const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark) { struct ip_vs_conn *cp; @@ -881,6 +914,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, /* Set its state and timeout */ cp->state = 0; cp->timeout = 3*HZ; + cp->sync_endtime = jiffies & ~3UL; /* Bind its packet transmitter */ #ifdef CONFIG_IP_VS_IPV6 @@ -1057,7 +1091,7 @@ static const struct file_operations ip_vs_conn_fops = { .release = seq_release_net, }; -static const char *ip_vs_origin_name(unsigned flags) +static const char *ip_vs_origin_name(unsigned int flags) { if (flags & IP_VS_CONN_F_SYNC) return "SYNC"; @@ -1169,7 +1203,7 @@ void ip_vs_random_dropentry(struct net *net) * Randomly scan 1/32 of the whole table every second */ for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { - unsigned hash = net_random() & ip_vs_conn_tab_mask; + unsigned int hash = net_random() & ip_vs_conn_tab_mask; struct hlist_node *n; /* diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 00bdb1d9d69..a54b018c6ee 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -80,7 +80,7 @@ static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0); #define icmp_id(icmph) (((icmph)->un).echo.id) #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) -const char *ip_vs_proto_name(unsigned proto) +const char *ip_vs_proto_name(unsigned int proto) { static char buf[20]; @@ -1613,34 +1613,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) else pkts = atomic_add_return(1, &cp->in_pkts); - if ((ipvs->sync_state & IP_VS_STATE_MASTER) && - cp->protocol == IPPROTO_SCTP) { - if ((cp->state == IP_VS_SCTP_S_ESTABLISHED && - (pkts % sysctl_sync_period(ipvs) - == sysctl_sync_threshold(ipvs))) || - (cp->old_state != cp->state && - ((cp->state == IP_VS_SCTP_S_CLOSED) || - (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) || - (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) { - ip_vs_sync_conn(net, cp); - goto out; - } - } - - /* Keep this block last: TCP and others with pp->num_states <= 1 */ - else if ((ipvs->sync_state & IP_VS_STATE_MASTER) && - (((cp->protocol != IPPROTO_TCP || - cp->state == IP_VS_TCP_S_ESTABLISHED) && - (pkts % sysctl_sync_period(ipvs) - == sysctl_sync_threshold(ipvs))) || - ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && - ((cp->state == IP_VS_TCP_S_FIN_WAIT) || - (cp->state == IP_VS_TCP_S_CLOSE) || - (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || - (cp->state == IP_VS_TCP_S_TIME_WAIT))))) - ip_vs_sync_conn(net, cp); -out: - cp->old_state = cp->state; + if (ipvs->sync_state & IP_VS_STATE_MASTER) + ip_vs_sync_conn(net, cp, pkts); ip_vs_conn_put(cp); return ret; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index f5589987fc8..dd811b8dd97 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -265,11 +265,11 @@ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; /* * Returns hash value for virtual service */ -static inline unsigned -ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, +static inline unsigned int +ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto, const union nf_inet_addr *addr, __be16 port) { - register unsigned porth = ntohs(port); + register unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 @@ -286,7 +286,7 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, /* * Returns hash value of fwmark for virtual service lookup */ -static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) +static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) { return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; } @@ -298,7 +298,7 @@ static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) */ static int ip_vs_svc_hash(struct ip_vs_service *svc) { - unsigned hash; + unsigned int hash; if (svc->flags & IP_VS_SVC_F_HASHED) { pr_err("%s(): request for already hashed, called from %pF\n", @@ -361,7 +361,7 @@ static inline struct ip_vs_service * __ip_vs_service_find(struct net *net, int af, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { - unsigned hash; + unsigned int hash; struct ip_vs_service *svc; /* Check for "full" addressed entries */ @@ -388,7 +388,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol, static inline struct ip_vs_service * __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) { - unsigned hash; + unsigned int hash; struct ip_vs_service *svc; /* Check for fwmark addressed entries */ @@ -489,11 +489,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) /* * Returns hash value for real service */ -static inline unsigned ip_vs_rs_hashkey(int af, +static inline unsigned int ip_vs_rs_hashkey(int af, const union nf_inet_addr *addr, __be16 port) { - register unsigned porth = ntohs(port); + register unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 @@ -512,7 +512,7 @@ static inline unsigned ip_vs_rs_hashkey(int af, */ static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) { - unsigned hash; + unsigned int hash; if (!list_empty(&dest->d_list)) { return 0; @@ -555,7 +555,7 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, __be16 dport) { struct netns_ipvs *ipvs = net_ipvs(net); - unsigned hash; + unsigned int hash; struct ip_vs_dest *dest; /* @@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, struct ip_vs_dest **dest_p) { struct ip_vs_dest *dest; - unsigned atype; + unsigned int atype; EnterFunction(2); @@ -1599,6 +1599,10 @@ static int ip_vs_zero_all(struct net *net) } #ifdef CONFIG_SYSCTL + +static int zero; +static int three = 3; + static int proc_do_defense_mode(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1632,7 +1636,8 @@ proc_do_sync_threshold(ctl_table *table, int write, memcpy(val, valp, sizeof(val)); rc = proc_dointvec(table, write, buffer, lenp, ppos); - if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { + if (write && (valp[0] < 0 || valp[1] < 0 || + (valp[0] >= valp[1] && valp[1]))) { /* Restore the correct value */ memcpy(valp, val, sizeof(val)); } @@ -1652,9 +1657,24 @@ proc_do_sync_mode(ctl_table *table, int write, if ((*valp < 0) || (*valp > 1)) { /* Restore the correct value */ *valp = val; - } else { - struct net *net = current->nsproxy->net_ns; - ip_vs_sync_switch_mode(net, val); + } + } + return rc; +} + +static int +proc_do_sync_ports(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int *valp = table->data; + int val = *valp; + int rc; + + rc = proc_dointvec(table, write, buffer, lenp, ppos); + if (write && (*valp != val)) { + if (*valp < 1 || !is_power_of_2(*valp)) { + /* Restore the correct value */ + *valp = val; } } return rc; @@ -1718,6 +1738,24 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_do_sync_mode, }, { + .procname = "sync_ports", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_do_sync_ports, + }, + { + .procname = "sync_qlen_max", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sync_sock_size", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "cache_bypass", .maxlen = sizeof(int), .mode = 0644, @@ -1743,6 +1781,20 @@ static struct ctl_table vs_vars[] = { .proc_handler = proc_do_sync_threshold, }, { + .procname = "sync_refresh_period", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { + .procname = "sync_retries", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &three, + }, + { .procname = "nat_icmp_send", .maxlen = sizeof(int), .mode = 0644, @@ -1846,13 +1898,6 @@ static struct ctl_table vs_vars[] = { { } }; -const struct ctl_path net_vs_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "vs", }, - { } -}; -EXPORT_SYMBOL_GPL(net_vs_ctl_path); #endif #ifdef CONFIG_PROC_FS @@ -1867,7 +1912,7 @@ struct ip_vs_iter { * Write the contents of the VS rule table to a PROCfs file. * (It is kept just for backward compatibility) */ -static inline const char *ip_vs_fwd_name(unsigned flags) +static inline const char *ip_vs_fwd_name(unsigned int flags) { switch (flags & IP_VS_CONN_F_FWD_MASK) { case IP_VS_CONN_F_LOCALNODE: @@ -2816,17 +2861,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, ip_vs_copy_stats(&ustats, stats); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts); - NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes); - NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps); - + if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) || + nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) || + nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps)) + goto nla_put_failure; nla_nest_end(skb, nl_stats); return 0; @@ -2847,23 +2892,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, if (!nl_service) return -EMSGSIZE; - NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); - + if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af)) + goto nla_put_failure; if (svc->fwmark) { - NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); + if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark)) + goto nla_put_failure; } else { - NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); - NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); - NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); + if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) || + nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) || + nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port)) + goto nla_put_failure; } - NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); - if (svc->pe) - NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name); - NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); - NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); - NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); - + if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) || + (svc->pe && + nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) || + nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || + nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || + nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask)) + goto nla_put_failure; if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) goto nla_put_failure; @@ -3038,21 +3085,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) if (!nl_dest) return -EMSGSIZE; - NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); - NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); - - NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, - atomic_read(&dest->activeconns)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, - atomic_read(&dest->inactconns)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, - atomic_read(&dest->persistconns)); - + if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) || + nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) || + nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD, + (atomic_read(&dest->conn_flags) & + IP_VS_CONN_F_FWD_MASK)) || + nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT, + atomic_read(&dest->weight)) || + nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) || + nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) || + nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, + atomic_read(&dest->activeconns)) || + nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS, + atomic_read(&dest->inactconns)) || + nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, + atomic_read(&dest->persistconns))) + goto nla_put_failure; if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) goto nla_put_failure; @@ -3181,10 +3229,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state, if (!nl_daemon) return -EMSGSIZE; - NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); - NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); - NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); - + if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) || + nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) || + nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid)) + goto nla_put_failure; nla_nest_end(skb, nl_daemon); return 0; @@ -3473,21 +3521,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) __ip_vs_get_timeouts(net, &t); #ifdef CONFIG_IP_VS_PROTO_TCP - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, - t.tcp_fin_timeout); + if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, + t.tcp_timeout) || + nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, + t.tcp_fin_timeout)) + goto nla_put_failure; #endif #ifdef CONFIG_IP_VS_PROTO_UDP - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); + if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout)) + goto nla_put_failure; #endif break; } case IPVS_CMD_GET_INFO: - NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); - NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, - ip_vs_conn_tab_size); + if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION, + IP_VS_VERSION_CODE) || + nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, + ip_vs_conn_tab_size)) + goto nla_put_failure; break; } @@ -3654,6 +3707,12 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_snat_reroute; ipvs->sysctl_sync_ver = 1; tbl[idx++].data = &ipvs->sysctl_sync_ver; + ipvs->sysctl_sync_ports = 1; + tbl[idx++].data = &ipvs->sysctl_sync_ports; + ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32; + tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; + ipvs->sysctl_sync_sock_size = 0; + tbl[idx++].data = &ipvs->sysctl_sync_sock_size; tbl[idx++].data = &ipvs->sysctl_cache_bypass; tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; @@ -3661,11 +3720,14 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); + ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; + tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; + ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3); + tbl[idx++].data = &ipvs->sysctl_sync_retries; tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; - ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path, - tbl); + ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); if (ipvs->sysctl_hdr == NULL) { if (!net_eq(net, &init_net)) kfree(tbl); diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 1c269e56200..8b7dca9ea42 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -68,7 +68,7 @@ struct ip_vs_dh_bucket { /* * Returns hash value for IPVS DH entry */ -static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) +static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -149,7 +149,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc) /* allocate the DH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, - GFP_ATOMIC); + GFP_KERNEL); if (tbl == NULL) return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index e39f693dd3e..b20b29c903e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -177,7 +177,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, __be16 port; struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ - unsigned buf_len; + unsigned int buf_len; int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; @@ -485,7 +485,7 @@ static struct pernet_operations ip_vs_ftp_ops = { .exit = __ip_vs_ftp_exit, }; -int __init ip_vs_ftp_init(void) +static int __init ip_vs_ftp_init(void) { int rv; diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index caa43704e55..df646ccf08a 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -142,7 +142,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) /* * Returns hash value for IPVS LBLC entry */ -static inline unsigned +static inline unsigned int ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -163,7 +163,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) static void ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) { - unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr); + unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr); list_add(&en->list, &tbl->bucket[hash]); atomic_inc(&tbl->entries); @@ -178,7 +178,7 @@ static inline struct ip_vs_lblc_entry * ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, const union nf_inet_addr *addr) { - unsigned hash = ip_vs_lblc_hashkey(af, addr); + unsigned int hash = ip_vs_lblc_hashkey(af, addr); struct ip_vs_lblc_entry *en; list_for_each_entry(en, &tbl->bucket[hash], list) @@ -342,7 +342,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) /* * Allocate the ip_vs_lblc_table for this service */ - tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); if (tbl == NULL) return -ENOMEM; @@ -566,8 +566,7 @@ static int __net_init __ip_vs_lblc_init(struct net *net) ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration; ipvs->lblc_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblc_ctl_table); + register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table); if (!ipvs->lblc_ctl_header) { if (!net_eq(net, &init_net)) kfree(ipvs->lblc_ctl_table); diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 548bf37aa29..570e31ea427 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -311,7 +311,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) /* * Returns hash value for IPVS LBLCR entry */ -static inline unsigned +static inline unsigned int ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -332,7 +332,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) static void ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) { - unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr); + unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr); list_add(&en->list, &tbl->bucket[hash]); atomic_inc(&tbl->entries); @@ -347,7 +347,7 @@ static inline struct ip_vs_lblcr_entry * ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *addr) { - unsigned hash = ip_vs_lblcr_hashkey(af, addr); + unsigned int hash = ip_vs_lblcr_hashkey(af, addr); struct ip_vs_lblcr_entry *en; list_for_each_entry(en, &tbl->bucket[hash], list) @@ -511,7 +511,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) /* * Allocate the ip_vs_lblcr_table for this service */ - tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); if (tbl == NULL) return -ENOMEM; @@ -760,8 +760,7 @@ static int __net_init __ip_vs_lblcr_init(struct net *net) ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; ipvs->lblcr_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblcr_ctl_table); + register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table); if (!ipvs->lblcr_ctl_header) { if (!net_eq(net, &init_net)) kfree(ipvs->lblcr_ctl_table); diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index ed835e67a07..50d82186da8 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -48,7 +48,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; */ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) { - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp->next = ip_vs_proto_table[hash]; ip_vs_proto_table[hash] = pp; @@ -66,9 +66,9 @@ static int register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) { struct netns_ipvs *ipvs = net_ipvs(net); - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); struct ip_vs_proto_data *pd = - kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); + kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL); if (!pd) return -ENOMEM; @@ -97,7 +97,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) { struct ip_vs_protocol **pp_p; - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp_p = &ip_vs_proto_table[hash]; for (; *pp_p; pp_p = &(*pp_p)->next) { @@ -120,7 +120,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data **pd_p; - unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); pd_p = &ipvs->proto_data_table[hash]; for (; *pd_p; pd_p = &(*pd_p)->next) { @@ -142,7 +142,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) { struct ip_vs_protocol *pp; - unsigned hash = IP_VS_PROTO_HASH(proto); + unsigned int hash = IP_VS_PROTO_HASH(proto); for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { if (pp->protocol == proto) @@ -156,11 +156,11 @@ EXPORT_SYMBOL(ip_vs_proto_get); /* * get ip_vs_protocol object data by netns and proto */ -struct ip_vs_proto_data * +static struct ip_vs_proto_data * __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) { struct ip_vs_proto_data *pd; - unsigned hash = IP_VS_PROTO_HASH(proto); + unsigned int hash = IP_VS_PROTO_HASH(proto); for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { if (pd->pp->protocol == proto) @@ -199,7 +199,7 @@ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) int * ip_vs_create_timeout_table(int *table, int size) { - return kmemdup(table, size, GFP_ATOMIC); + return kmemdup(table, size, GFP_KERNEL); } diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 069e8d4d5c0..05126521743 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -70,7 +70,7 @@ struct ip_vs_sh_bucket { /* * Returns hash value for IPVS SH entry */ -static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) +static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -162,7 +162,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc) /* allocate the SH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, - GFP_ATOMIC); + GFP_KERNEL); if (tbl == NULL) return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8a0d6d6889f..effa10c9e4e 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -196,6 +196,7 @@ struct ip_vs_sync_thread_data { struct net *net; struct socket *sock; char *buf; + int id; }; /* Version 0 definition of packet sizes */ @@ -271,13 +272,6 @@ struct ip_vs_sync_buff { unsigned char *end; }; -/* multicast addr */ -static struct sockaddr_in mcast_addr = { - .sin_family = AF_INET, - .sin_port = cpu_to_be16(IP_VS_SYNC_PORT), - .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), -}; - /* * Copy of struct ip_vs_seq * From unaligned network order to aligned host order @@ -300,18 +294,22 @@ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) put_unaligned_be32(ho->previous_delta, &no->previous_delta); } -static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs) +static inline struct ip_vs_sync_buff * +sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_lock); - if (list_empty(&ipvs->sync_queue)) { + if (list_empty(&ms->sync_queue)) { sb = NULL; + __set_current_state(TASK_INTERRUPTIBLE); } else { - sb = list_entry(ipvs->sync_queue.next, - struct ip_vs_sync_buff, + sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); + ms->sync_queue_len--; + if (!ms->sync_queue_len) + ms->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); @@ -334,7 +332,7 @@ ip_vs_sync_buff_create(struct netns_ipvs *ipvs) kfree(sb); return NULL; } - sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */ + sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ sb->mesg->version = SYNC_PROTO_VER; sb->mesg->syncid = ipvs->master_syncid; sb->mesg->size = sizeof(struct ip_vs_sync_mesg); @@ -353,14 +351,22 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) kfree(sb); } -static inline void sb_queue_tail(struct netns_ipvs *ipvs) +static inline void sb_queue_tail(struct netns_ipvs *ipvs, + struct ipvs_master_sync_state *ms) { - struct ip_vs_sync_buff *sb = ipvs->sync_buff; + struct ip_vs_sync_buff *sb = ms->sync_buff; spin_lock(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) - list_add_tail(&sb->list, &ipvs->sync_queue); - else + if (ipvs->sync_state & IP_VS_STATE_MASTER && + ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { + if (!ms->sync_queue_len) + schedule_delayed_work(&ms->master_wakeup_work, + max(IPVS_SYNC_SEND_DELAY, 1)); + ms->sync_queue_len++; + list_add_tail(&sb->list, &ms->sync_queue); + if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) + wake_up_process(ms->master_thread); + } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); } @@ -370,49 +376,26 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs) * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * -get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) +get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, + unsigned long time) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_buff_lock); - if (ipvs->sync_buff && - time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) { - sb = ipvs->sync_buff; - ipvs->sync_buff = NULL; + sb = ms->sync_buff; + if (sb && time_after_eq(jiffies - sb->firstuse, time)) { + ms->sync_buff = NULL; + __set_current_state(TASK_RUNNING); } else sb = NULL; spin_unlock_bh(&ipvs->sync_buff_lock); return sb; } -/* - * Switch mode from sending version 0 or 1 - * - must handle sync_buf - */ -void ip_vs_sync_switch_mode(struct net *net, int mode) +static inline int +select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(net); - - if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) - return; - if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff) - return; - - spin_lock_bh(&ipvs->sync_buff_lock); - /* Buffer empty ? then let buf_create do the job */ - if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) { - kfree(ipvs->sync_buff); - ipvs->sync_buff = NULL; - } else { - spin_lock_bh(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) - list_add_tail(&ipvs->sync_buff->list, - &ipvs->sync_queue); - else - ip_vs_sync_buff_release(ipvs->sync_buff); - spin_unlock_bh(&ipvs->sync_lock); - } - spin_unlock_bh(&ipvs->sync_buff_lock); + return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; } /* @@ -442,15 +425,101 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) return sb; } +/* Check if conn should be synced. + * pkts: conn packets, use sysctl_sync_threshold to avoid packet check + * - (1) sync_refresh_period: reduce sync rate. Additionally, retry + * sync_retries times with period of sync_refresh_period/8 + * - (2) if both sync_refresh_period and sync_period are 0 send sync only + * for state changes or only once when pkts matches sync_threshold + * - (3) templates: rate can be reduced only with sync_refresh_period or + * with (2) + */ +static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, + struct ip_vs_conn *cp, int pkts) +{ + unsigned long orig = ACCESS_ONCE(cp->sync_endtime); + unsigned long now = jiffies; + unsigned long n = (now + cp->timeout) & ~3UL; + unsigned int sync_refresh_period; + int sync_period; + int force; + + /* Check if we sync in current state */ + if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) + force = 0; + else if (likely(cp->protocol == IPPROTO_TCP)) { + if (!((1 << cp->state) & + ((1 << IP_VS_TCP_S_ESTABLISHED) | + (1 << IP_VS_TCP_S_FIN_WAIT) | + (1 << IP_VS_TCP_S_CLOSE) | + (1 << IP_VS_TCP_S_CLOSE_WAIT) | + (1 << IP_VS_TCP_S_TIME_WAIT)))) + return 0; + force = cp->state != cp->old_state; + if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) + goto set; + } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { + if (!((1 << cp->state) & + ((1 << IP_VS_SCTP_S_ESTABLISHED) | + (1 << IP_VS_SCTP_S_CLOSED) | + (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) | + (1 << IP_VS_SCTP_S_SHUT_ACK_SER)))) + return 0; + force = cp->state != cp->old_state; + if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) + goto set; + } else { + /* UDP or another protocol with single state */ + force = 0; + } + + sync_refresh_period = sysctl_sync_refresh_period(ipvs); + if (sync_refresh_period > 0) { + long diff = n - orig; + long min_diff = max(cp->timeout >> 1, 10UL * HZ); + + /* Avoid sync if difference is below sync_refresh_period + * and below the half timeout. + */ + if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { + int retries = orig & 3; + + if (retries >= sysctl_sync_retries(ipvs)) + return 0; + if (time_before(now, orig - cp->timeout + + (sync_refresh_period >> 3))) + return 0; + n |= retries + 1; + } + } + sync_period = sysctl_sync_period(ipvs); + if (sync_period > 0) { + if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && + pkts % sync_period != sysctl_sync_threshold(ipvs)) + return 0; + } else if (sync_refresh_period <= 0 && + pkts != sysctl_sync_threshold(ipvs)) + return 0; + +set: + cp->old_state = cp->state; + n = cmpxchg(&cp->sync_endtime, orig, n); + return n == orig || force; +} + /* * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. */ -void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) +static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, + int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg_v0 *m; struct ip_vs_sync_conn_v0 *s; + struct ip_vs_sync_buff *buff; + struct ipvs_master_sync_state *ms; + int id; int len; if (unlikely(cp->af != AF_INET)) @@ -459,21 +528,41 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) if (cp->flags & IP_VS_CONN_F_ONE_PACKET) return; + if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) + return; + spin_lock(&ipvs->sync_buff_lock); - if (!ipvs->sync_buff) { - ipvs->sync_buff = - ip_vs_sync_buff_create_v0(ipvs); - if (!ipvs->sync_buff) { + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + spin_unlock(&ipvs->sync_buff_lock); + return; + } + + id = select_master_thread_id(ipvs, cp); + ms = &ipvs->ms[id]; + buff = ms->sync_buff; + if (buff) { + m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; + /* Send buffer if it is for v1 */ + if (!m->nr_conns) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; + buff = NULL; + } + } + if (!buff) { + buff = ip_vs_sync_buff_create_v0(ipvs); + if (!buff) { spin_unlock(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } + ms->sync_buff = buff; } len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; - m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg; - s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head; + m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; + s = (struct ip_vs_sync_conn_v0 *) buff->head; /* copy members */ s->reserved = 0; @@ -494,18 +583,24 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) m->nr_conns++; m->size += len; - ipvs->sync_buff->head += len; + buff->head += len; /* check if there is a space for next one */ - if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; + if (buff->head + FULL_CONN_SIZE > buff->end) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; } spin_unlock(&ipvs->sync_buff_lock); /* synchronize its controller if it has */ - if (cp->control) - ip_vs_sync_conn(net, cp->control); + cp = cp->control; + if (cp) { + if (cp->flags & IP_VS_CONN_F_TEMPLATE) + pkts = atomic_add_return(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); + ip_vs_sync_conn(net, cp->control, pkts); + } } /* @@ -513,23 +608,29 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) * Called by ip_vs_in. * Sending Version 1 messages */ -void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) +void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m; union ip_vs_sync_conn *s; + struct ip_vs_sync_buff *buff; + struct ipvs_master_sync_state *ms; + int id; __u8 *p; unsigned int len, pe_name_len, pad; /* Handle old version of the protocol */ if (sysctl_sync_ver(ipvs) == 0) { - ip_vs_sync_conn_v0(net, cp); + ip_vs_sync_conn_v0(net, cp, pkts); return; } /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) goto control; sloop: + if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) + goto control; + /* Sanity checks */ pe_name_len = 0; if (cp->pe_data_len) { @@ -541,6 +642,13 @@ sloop: } spin_lock(&ipvs->sync_buff_lock); + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + spin_unlock(&ipvs->sync_buff_lock); + return; + } + + id = select_master_thread_id(ipvs, cp); + ms = &ipvs->ms[id]; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) @@ -559,27 +667,32 @@ sloop: /* check if there is a space for this one */ pad = 0; - if (ipvs->sync_buff) { - pad = (4 - (size_t)ipvs->sync_buff->head) & 3; - if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; + buff = ms->sync_buff; + if (buff) { + m = buff->mesg; + pad = (4 - (size_t) buff->head) & 3; + /* Send buffer if it is for v0 */ + if (buff->head + len + pad > buff->end || m->reserved) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; + buff = NULL; pad = 0; } } - if (!ipvs->sync_buff) { - ipvs->sync_buff = ip_vs_sync_buff_create(ipvs); - if (!ipvs->sync_buff) { + if (!buff) { + buff = ip_vs_sync_buff_create(ipvs); + if (!buff) { spin_unlock(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } + ms->sync_buff = buff; + m = buff->mesg; } - m = ipvs->sync_buff->mesg; - p = ipvs->sync_buff->head; - ipvs->sync_buff->head += pad + len; + p = buff->head; + buff->head += pad + len; m->size += pad + len; /* Add ev. padding from prev. sync_conn */ while (pad--) @@ -644,16 +757,10 @@ control: cp = cp->control; if (!cp) return; - /* - * Reduce sync rate for templates - * i.e only increment in_pkts for Templates. - */ - if (cp->flags & IP_VS_CONN_F_TEMPLATE) { - int pkts = atomic_add_return(1, &cp->in_pkts); - - if (pkts % sysctl_sync_period(ipvs) != 1) - return; - } + if (cp->flags & IP_VS_CONN_F_TEMPLATE) + pkts = atomic_add_return(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); goto sloop; } @@ -731,9 +838,32 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, else cp = ip_vs_ct_in_get(param); - if (cp && param->pe_data) /* Free pe_data */ + if (cp) { + /* Free pe_data */ kfree(param->pe_data); - if (!cp) { + + dest = cp->dest; + spin_lock(&cp->lock); + if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && + !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { + if (flags & IP_VS_CONN_F_INACTIVE) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + } else { + atomic_inc(&dest->activeconns); + atomic_dec(&dest->inactconns); + } + } + flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; + flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; + cp->flags = flags; + spin_unlock(&cp->lock); + if (!dest) { + dest = ip_vs_try_bind_dest(cp); + if (dest) + atomic_dec(&dest->refcnt); + } + } else { /* * Find the appropriate destination for the connection. * If it is not found the connection will remain unbound @@ -742,18 +872,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, param->vport, protocol, fwmark, flags); - /* Set the approprite ativity flag */ - if (protocol == IPPROTO_TCP) { - if (state != IP_VS_TCP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } else if (protocol == IPPROTO_SCTP) { - if (state != IP_VS_SCTP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); if (dest) atomic_dec(&dest->refcnt); @@ -763,34 +881,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } - } else if (!cp->dest) { - dest = ip_vs_try_bind_dest(cp); - if (dest) - atomic_dec(&dest->refcnt); - } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) && - (cp->state != state)) { - /* update active/inactive flag for the connection */ - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_TCP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags |= IP_VS_CONN_F_INACTIVE; - } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && - (state == IP_VS_TCP_S_ESTABLISHED)) { - atomic_inc(&dest->activeconns); - atomic_dec(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } - } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) && - (cp->state != state)) { - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_SCTP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } } if (opt) @@ -839,7 +929,7 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer, p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); for (i=0; i<m->nr_conns; i++) { - unsigned flags, state; + unsigned int flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); @@ -1109,7 +1199,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer, for (i=0; i<nr_conns; i++) { union ip_vs_sync_conn *s; - unsigned size; + unsigned int size; int retc; p = msg_end; @@ -1149,6 +1239,28 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer, /* + * Setup sndbuf (mode=1) or rcvbuf (mode=0) + */ +static void set_sock_size(struct sock *sk, int mode, int val) +{ + /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ + /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ + lock_sock(sk); + if (mode) { + val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, + sysctl_wmem_max); + sk->sk_sndbuf = val * 2; + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + } else { + val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, + sysctl_rmem_max); + sk->sk_rcvbuf = val * 2; + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + } + release_sock(sk); +} + +/* * Setup loopback of outgoing multicasts on a sending socket */ static void set_mcast_loop(struct sock *sk, u_char loop) @@ -1298,9 +1410,15 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) /* * Set up sending multicast socket over UDP */ -static struct socket *make_send_sock(struct net *net) +static struct socket *make_send_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); + /* multicast addr */ + struct sockaddr_in mcast_addr = { + .sin_family = AF_INET, + .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), + .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), + }; struct socket *sock; int result; @@ -1324,6 +1442,9 @@ static struct socket *make_send_sock(struct net *net) set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, 1); + result = sysctl_sync_sock_size(ipvs); + if (result > 0) + set_sock_size(sock->sk, 1, result); result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); if (result < 0) { @@ -1349,9 +1470,15 @@ error: /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct net *net) +static struct socket *make_receive_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); + /* multicast addr */ + struct sockaddr_in mcast_addr = { + .sin_family = AF_INET, + .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), + .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), + }; struct socket *sock; int result; @@ -1368,7 +1495,10 @@ static struct socket *make_receive_sock(struct net *net) */ sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; + result = sysctl_sync_sock_size(ipvs); + if (result > 0) + set_sock_size(sock->sk, 0, result); result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr)); @@ -1411,18 +1541,22 @@ ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) return len; } -static void +static int ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) { int msize; + int ret; msize = msg->size; /* Put size in network byte order */ msg->size = htons(msg->size); - if (ip_vs_send_async(sock, (char *)msg, msize) != msize) - pr_err("ip_vs_send_async error\n"); + ret = ip_vs_send_async(sock, (char *)msg, msize); + if (ret >= 0 || ret == -EAGAIN) + return ret; + pr_err("ip_vs_send_async error %d\n", ret); + return 0; } static int @@ -1438,48 +1572,90 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) iov.iov_base = buffer; iov.iov_len = (size_t)buflen; - len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0); + len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); if (len < 0) - return -1; + return len; LeaveFunction(7); return len; } +/* Wakeup the master thread for sending */ +static void master_wakeup_work_handler(struct work_struct *work) +{ + struct ipvs_master_sync_state *ms = + container_of(work, struct ipvs_master_sync_state, + master_wakeup_work.work); + struct netns_ipvs *ipvs = ms->ipvs; + + spin_lock_bh(&ipvs->sync_lock); + if (ms->sync_queue_len && + ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { + ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; + wake_up_process(ms->master_thread); + } + spin_unlock_bh(&ipvs->sync_lock); +} + +/* Get next buffer to send */ +static inline struct ip_vs_sync_buff * +next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) +{ + struct ip_vs_sync_buff *sb; + + sb = sb_dequeue(ipvs, ms); + if (sb) + return sb; + /* Do not delay entries in buffer for more than 2 seconds */ + return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); +} static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); + struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; + struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " - "syncid = %d\n", - ipvs->master_mcast_ifn, ipvs->master_syncid); + "syncid = %d, id = %d\n", + ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); - while (!kthread_should_stop()) { - while ((sb = sb_dequeue(ipvs))) { - ip_vs_send_sync_msg(tinfo->sock, sb->mesg); - ip_vs_sync_buff_release(sb); + for (;;) { + sb = next_sync_buff(ipvs, ms); + if (unlikely(kthread_should_stop())) + break; + if (!sb) { + schedule_timeout(IPVS_SYNC_CHECK_PERIOD); + continue; } - - /* check if entries stay in ipvs->sync_buff for 2 seconds */ - sb = get_curr_sync_buff(ipvs, 2 * HZ); - if (sb) { - ip_vs_send_sync_msg(tinfo->sock, sb->mesg); - ip_vs_sync_buff_release(sb); + while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { + int ret = 0; + + __wait_event_interruptible(*sk_sleep(sk), + sock_writeable(sk) || + kthread_should_stop(), + ret); + if (unlikely(kthread_should_stop())) + goto done; } - - schedule_timeout_interruptible(HZ); + ip_vs_sync_buff_release(sb); } +done: + __set_current_state(TASK_RUNNING); + if (sb) + ip_vs_sync_buff_release(sb); + /* clean up the sync_buff queue */ - while ((sb = sb_dequeue(ipvs))) + while ((sb = sb_dequeue(ipvs, ms))) ip_vs_sync_buff_release(sb); + __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ - sb = get_curr_sync_buff(ipvs, 0); + sb = get_curr_sync_buff(ipvs, ms, 0); if (sb) ip_vs_sync_buff_release(sb); @@ -1498,8 +1674,8 @@ static int sync_thread_backup(void *data) int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " - "syncid = %d\n", - ipvs->backup_mcast_ifn, ipvs->backup_syncid); + "syncid = %d, id = %d\n", + ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(tinfo->sock->sk), @@ -1511,7 +1687,8 @@ static int sync_thread_backup(void *data) len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->recv_mesg_maxlen); if (len <= 0) { - pr_err("receiving message error\n"); + if (len != -EAGAIN) + pr_err("receiving message error\n"); break; } @@ -1535,86 +1712,140 @@ static int sync_thread_backup(void *data) int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) { struct ip_vs_sync_thread_data *tinfo; - struct task_struct **realtask, *task; + struct task_struct **array = NULL, *task; struct socket *sock; struct netns_ipvs *ipvs = net_ipvs(net); - char *name, *buf = NULL; + char *name; int (*threadfn)(void *data); + int id, count; int result = -ENOMEM; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + if (!ipvs->sync_state) { + count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); + ipvs->threads_mask = count - 1; + } else + count = ipvs->threads_mask + 1; if (state == IP_VS_STATE_MASTER) { - if (ipvs->master_thread) + if (ipvs->ms) return -EEXIST; strlcpy(ipvs->master_mcast_ifn, mcast_ifn, sizeof(ipvs->master_mcast_ifn)); ipvs->master_syncid = syncid; - realtask = &ipvs->master_thread; - name = "ipvs_master:%d"; + name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; - sock = make_send_sock(net); } else if (state == IP_VS_STATE_BACKUP) { - if (ipvs->backup_thread) + if (ipvs->backup_threads) return -EEXIST; strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, sizeof(ipvs->backup_mcast_ifn)); ipvs->backup_syncid = syncid; - realtask = &ipvs->backup_thread; - name = "ipvs_backup:%d"; + name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; - sock = make_receive_sock(net); } else { return -EINVAL; } - if (IS_ERR(sock)) { - result = PTR_ERR(sock); - goto out; - } + if (state == IP_VS_STATE_MASTER) { + struct ipvs_master_sync_state *ms; - set_sync_mesg_maxlen(net, state); - if (state == IP_VS_STATE_BACKUP) { - buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); - if (!buf) - goto outsocket; + ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); + if (!ipvs->ms) + goto out; + ms = ipvs->ms; + for (id = 0; id < count; id++, ms++) { + INIT_LIST_HEAD(&ms->sync_queue); + ms->sync_queue_len = 0; + ms->sync_queue_delay = 0; + INIT_DELAYED_WORK(&ms->master_wakeup_work, + master_wakeup_work_handler); + ms->ipvs = ipvs; + } + } else { + array = kzalloc(count * sizeof(struct task_struct *), + GFP_KERNEL); + if (!array) + goto out; } + set_sync_mesg_maxlen(net, state); - tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); - if (!tinfo) - goto outbuf; - - tinfo->net = net; - tinfo->sock = sock; - tinfo->buf = buf; + tinfo = NULL; + for (id = 0; id < count; id++) { + if (state == IP_VS_STATE_MASTER) + sock = make_send_sock(net, id); + else + sock = make_receive_sock(net, id); + if (IS_ERR(sock)) { + result = PTR_ERR(sock); + goto outtinfo; + } + tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); + if (!tinfo) + goto outsocket; + tinfo->net = net; + tinfo->sock = sock; + if (state == IP_VS_STATE_BACKUP) { + tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, + GFP_KERNEL); + if (!tinfo->buf) + goto outtinfo; + } + tinfo->id = id; - task = kthread_run(threadfn, tinfo, name, ipvs->gen); - if (IS_ERR(task)) { - result = PTR_ERR(task); - goto outtinfo; + task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); + if (IS_ERR(task)) { + result = PTR_ERR(task); + goto outtinfo; + } + tinfo = NULL; + if (state == IP_VS_STATE_MASTER) + ipvs->ms[id].master_thread = task; + else + array[id] = task; } /* mark as active */ - *realtask = task; + + if (state == IP_VS_STATE_BACKUP) + ipvs->backup_threads = array; + spin_lock_bh(&ipvs->sync_buff_lock); ipvs->sync_state |= state; + spin_unlock_bh(&ipvs->sync_buff_lock); /* increase the module use count */ ip_vs_use_count_inc(); return 0; -outtinfo: - kfree(tinfo); -outbuf: - kfree(buf); outsocket: sk_release_kernel(sock->sk); + +outtinfo: + if (tinfo) { + sk_release_kernel(tinfo->sock->sk); + kfree(tinfo->buf); + kfree(tinfo); + } + count = id; + while (count-- > 0) { + if (state == IP_VS_STATE_MASTER) + kthread_stop(ipvs->ms[count].master_thread); + else + kthread_stop(array[count]); + } + kfree(array); + out: + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + kfree(ipvs->ms); + ipvs->ms = NULL; + } return result; } @@ -1622,38 +1853,60 @@ out: int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); + struct task_struct **array; + int id; int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); if (state == IP_VS_STATE_MASTER) { - if (!ipvs->master_thread) + if (!ipvs->ms) return -ESRCH; - pr_info("stopping master sync thread %d ...\n", - task_pid_nr(ipvs->master_thread)); - /* * The lock synchronizes with sb_queue_tail(), so that we don't * add sync buffers to the queue, when we are already in * progress of stopping the master sync daemon. */ - spin_lock_bh(&ipvs->sync_lock); + spin_lock_bh(&ipvs->sync_buff_lock); + spin_lock(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; - spin_unlock_bh(&ipvs->sync_lock); - retc = kthread_stop(ipvs->master_thread); - ipvs->master_thread = NULL; + spin_unlock(&ipvs->sync_lock); + spin_unlock_bh(&ipvs->sync_buff_lock); + + retc = 0; + for (id = ipvs->threads_mask; id >= 0; id--) { + struct ipvs_master_sync_state *ms = &ipvs->ms[id]; + int ret; + + pr_info("stopping master sync thread %d ...\n", + task_pid_nr(ms->master_thread)); + cancel_delayed_work_sync(&ms->master_wakeup_work); + ret = kthread_stop(ms->master_thread); + if (retc >= 0) + retc = ret; + } + kfree(ipvs->ms); + ipvs->ms = NULL; } else if (state == IP_VS_STATE_BACKUP) { - if (!ipvs->backup_thread) + if (!ipvs->backup_threads) return -ESRCH; - pr_info("stopping backup sync thread %d ...\n", - task_pid_nr(ipvs->backup_thread)); - ipvs->sync_state &= ~IP_VS_STATE_BACKUP; - retc = kthread_stop(ipvs->backup_thread); - ipvs->backup_thread = NULL; + array = ipvs->backup_threads; + retc = 0; + for (id = ipvs->threads_mask; id >= 0; id--) { + int ret; + + pr_info("stopping backup sync thread %d ...\n", + task_pid_nr(array[id])); + ret = kthread_stop(array[id]); + if (retc >= 0) + retc = ret; + } + kfree(array); + ipvs->backup_threads = NULL; } /* decrease the module use count */ @@ -1670,13 +1923,8 @@ int __net_init ip_vs_sync_net_init(struct net *net) struct netns_ipvs *ipvs = net_ipvs(net); __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); - INIT_LIST_HEAD(&ipvs->sync_queue); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); - - ipvs->sync_mcast_addr.sin_family = AF_INET; - ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT); - ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP); return 0; } diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index fd0d4e09876..231be7dd547 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -84,7 +84,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc) /* * Allocate the mark variable for WRR scheduling */ - mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); + mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL); if (mark == NULL) return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index f4f8cda0598..d61e0782a79 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c @@ -69,8 +69,8 @@ static int nf_conntrack_acct_init_sysctl(struct net *net) table[0].data = &net->ct.sysctl_acct; - net->ct.acct_sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", + table); if (!net->ct.acct_sysctl_header) { printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 13fd2c55e32..f2de8c55ac5 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -107,8 +107,7 @@ static int amanda_help(struct sk_buff *skb, /* No data? */ dataoff = protoff + sizeof(struct udphdr); if (dataoff >= skb->len) { - if (net_ratelimit()) - printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len); + net_err_ratelimited("amanda_help: skblen = %u\n", skb->len); return NF_ACCEPT; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 729f157a0ef..ac3af97cc46 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -683,10 +683,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone, unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash_bucket(hash, net))) { atomic_dec(&net->ct.count); - if (net_ratelimit()) - printk(KERN_WARNING - "nf_conntrack: table full, dropping" - " packet.\n"); + net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); return ERR_PTR(-ENOMEM); } } @@ -1152,8 +1149,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); - NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); + if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || + nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) + goto nla_put_failure; return 0; nla_put_failure: @@ -1335,7 +1333,6 @@ static void nf_conntrack_cleanup_init_net(void) while (untrack_refs() > 0) schedule(); - nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); @@ -1353,6 +1350,7 @@ static void nf_conntrack_cleanup_net(struct net *net) } nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); + nf_conntrack_helper_fini(net); nf_conntrack_timeout_fini(net); nf_conntrack_ecache_fini(net); nf_conntrack_tstamp_fini(net); @@ -1503,10 +1501,6 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_proto; - ret = nf_conntrack_helper_init(); - if (ret < 0) - goto err_helper; - #ifdef CONFIG_NF_CONNTRACK_ZONES ret = nf_ct_extend_register(&nf_ct_zone_extend); if (ret < 0) @@ -1524,10 +1518,8 @@ static int nf_conntrack_init_init_net(void) #ifdef CONFIG_NF_CONNTRACK_ZONES err_extend: - nf_conntrack_helper_fini(); -#endif -err_helper: nf_conntrack_proto_fini(); +#endif err_proto: return ret; } @@ -1588,9 +1580,14 @@ static int nf_conntrack_init_net(struct net *net) ret = nf_conntrack_timeout_init(net); if (ret < 0) goto err_timeout; + ret = nf_conntrack_helper_init(net); + if (ret < 0) + goto err_helper; return 0; +err_helper: + nf_conntrack_timeout_fini(net); err_timeout: nf_conntrack_ecache_fini(net); err_ecache: diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 5bd3047ddee..e7be79e640d 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *new) { - int ret = 0; + int ret; struct nf_ct_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); @@ -95,8 +95,7 @@ int nf_conntrack_register_notifier(struct net *net, goto out_unlock; } rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); - mutex_unlock(&nf_ct_ecache_mutex); - return ret; + ret = 0; out_unlock: mutex_unlock(&nf_ct_ecache_mutex); @@ -121,7 +120,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *new) { - int ret = 0; + int ret; struct nf_exp_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); @@ -132,8 +131,7 @@ int nf_ct_expect_register_notifier(struct net *net, goto out_unlock; } rcu_assign_pointer(net->ct.nf_expect_event_cb, new); - mutex_unlock(&nf_ct_ecache_mutex); - return ret; + ret = 0; out_unlock: mutex_unlock(&nf_ct_ecache_mutex); @@ -199,8 +197,7 @@ static int nf_conntrack_event_init_sysctl(struct net *net) table[1].data = &net->ct.sysctl_events_retry_timeout; net->ct.event_sysctl_header = - register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + register_net_sysctl(net, "net/netfilter", table); if (!net->ct.event_sysctl_header) { printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 4147ba3f653..45cf602a76b 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -424,9 +424,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) } if (net->ct.expect_count >= nf_ct_expect_max) { - if (net_ratelimit()) - printk(KERN_WARNING - "nf_conntrack: expectation table full\n"); + net_warn_ratelimited("nf_conntrack: expectation table full\n"); ret = -EMFILE; } out: diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 722291f8af7..46d69d7f1bb 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -605,8 +605,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_h245: packet dropped\n"); + net_info_ratelimited("nf_ct_h245: packet dropped\n"); return NF_DROP; } @@ -1156,8 +1155,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_q931: packet dropped\n"); + net_info_ratelimited("nf_ct_q931: packet dropped\n"); return NF_DROP; } @@ -1230,7 +1228,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, /****************************************************************************/ static int set_expect_timeout(struct nf_conntrack_expect *exp, - unsigned timeout) + unsigned int timeout) { if (!exp || !del_timer(&exp->timeout)) return 0; @@ -1731,8 +1729,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_ras: packet dropped\n"); + net_info_ratelimited("nf_ct_ras: packet dropped\n"); return NF_DROP; } @@ -1833,4 +1830,6 @@ MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); MODULE_DESCRIPTION("H.323 connection tracking helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_h323"); -MODULE_ALIAS_NFCT_HELPER("h323"); +MODULE_ALIAS_NFCT_HELPER("RAS"); +MODULE_ALIAS_NFCT_HELPER("Q.931"); +MODULE_ALIAS_NFCT_HELPER("H.245"); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 436b7cb79ba..4fa2ff961f5 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -34,6 +34,67 @@ static struct hlist_head *nf_ct_helper_hash __read_mostly; static unsigned int nf_ct_helper_hsize __read_mostly; static unsigned int nf_ct_helper_count __read_mostly; +static bool nf_ct_auto_assign_helper __read_mostly = true; +module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644); +MODULE_PARM_DESC(nf_conntrack_helper, + "Enable automatic conntrack helper assignment (default 1)"); + +#ifdef CONFIG_SYSCTL +static struct ctl_table helper_sysctl_table[] = { + { + .procname = "nf_conntrack_helper", + .data = &init_net.ct.sysctl_auto_assign_helper, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; + +static int nf_conntrack_helper_init_sysctl(struct net *net) +{ + struct ctl_table *table; + + table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table), + GFP_KERNEL); + if (!table) + goto out; + + table[0].data = &net->ct.sysctl_auto_assign_helper; + + net->ct.helper_sysctl_header = + register_net_sysctl(net, "net/netfilter", table); + + if (!net->ct.helper_sysctl_header) { + pr_err("nf_conntrack_helper: can't register to sysctl.\n"); + goto out_register; + } + return 0; + +out_register: + kfree(table); +out: + return -ENOMEM; +} + +static void nf_conntrack_helper_fini_sysctl(struct net *net) +{ + struct ctl_table *table; + + table = net->ct.helper_sysctl_header->ctl_table_arg; + unregister_net_sysctl_table(net->ct.helper_sysctl_header); + kfree(table); +} +#else +static int nf_conntrack_helper_init_sysctl(struct net *net) +{ + return 0; +} + +static void nf_conntrack_helper_fini_sysctl(struct net *net) +{ +} +#endif /* CONFIG_SYSCTL */ /* Stupid hash, but collision free for the default registrations of the * helpers currently in the kernel. */ @@ -118,17 +179,38 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, { struct nf_conntrack_helper *helper = NULL; struct nf_conn_help *help; + struct net *net = nf_ct_net(ct); int ret = 0; + /* We already got a helper explicitly attached. The function + * nf_conntrack_alter_reply - in case NAT is in use - asks for looking + * the helper up again. Since now the user is in full control of + * making consistent helper configurations, skip this automatic + * re-lookup, otherwise we'll lose the helper. + */ + if (test_bit(IPS_HELPER_BIT, &ct->status)) + return 0; + if (tmpl != NULL) { help = nfct_help(tmpl); - if (help != NULL) + if (help != NULL) { helper = help->helper; + set_bit(IPS_HELPER_BIT, &ct->status); + } } help = nfct_help(ct); - if (helper == NULL) + if (net->ct.sysctl_auto_assign_helper && helper == NULL) { helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + if (unlikely(!net->ct.auto_assign_helper_warned && helper)) { + pr_info("nf_conntrack: automatic helper " + "assignment is deprecated and it will " + "be removed soon. Use the iptables CT target " + "to attach helpers instead.\n"); + net->ct.auto_assign_helper_warned = true; + } + } + if (helper == NULL) { if (help) RCU_INIT_POINTER(help->helper, NULL); @@ -315,28 +397,44 @@ static struct nf_ct_ext_type helper_extend __read_mostly = { .id = NF_CT_EXT_HELPER, }; -int nf_conntrack_helper_init(void) +int nf_conntrack_helper_init(struct net *net) { int err; - nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ - nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); - if (!nf_ct_helper_hash) - return -ENOMEM; + net->ct.auto_assign_helper_warned = false; + net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper; + + if (net_eq(net, &init_net)) { + nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ + nf_ct_helper_hash = + nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); + if (!nf_ct_helper_hash) + return -ENOMEM; - err = nf_ct_extend_register(&helper_extend); + err = nf_ct_extend_register(&helper_extend); + if (err < 0) + goto err1; + } + + err = nf_conntrack_helper_init_sysctl(net); if (err < 0) - goto err1; + goto out_sysctl; return 0; +out_sysctl: + if (net_eq(net, &init_net)) + nf_ct_extend_unregister(&helper_extend); err1: nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); return err; } -void nf_conntrack_helper_fini(void) +void nf_conntrack_helper_fini(struct net *net) { - nf_ct_extend_unregister(&helper_extend); - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + nf_conntrack_helper_fini_sysctl(net); + if (net_eq(net, &init_net)) { + nf_ct_extend_unregister(&helper_extend); + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + } } diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 4f9390b9869..81366c11827 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -185,11 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, tuple = &ct->tuplehash[dir].tuple; if (tuple->src.u3.ip != dcc_ip && tuple->dst.u3.ip != dcc_ip) { - if (net_ratelimit()) - printk(KERN_WARNING - "Forged DCC command from %pI4: %pI4:%u\n", - &tuple->src.u3.ip, - &dcc_ip, dcc_port); + net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", + &tuple->src.u3.ip, + &dcc_ip, dcc_port); continue; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ca7e8354e4f..6f4b00a8fc7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb, nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); + if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) + goto nla_put_failure; if (likely(l4proto->tuple_to_nlattr)) ret = l4proto->tuple_to_nlattr(skb, tuple); @@ -126,7 +127,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb, static inline int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); + if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) + goto nla_put_failure; return 0; nla_put_failure: @@ -141,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) if (timeout < 0) timeout = 0; - NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); + if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) + goto nla_put_failure; return 0; nla_put_failure: @@ -190,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); if (!nest_helper) goto nla_put_failure; - NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); + if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) + goto nla_put_failure; if (helper->to_nlattr) helper->to_nlattr(skb, ct); @@ -214,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes, if (!nest_count) goto nla_put_failure; - NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); + if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) || + nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes))) + goto nla_put_failure; nla_nest_end(skb, nest_count); @@ -260,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) if (!nest_count) goto nla_put_failure; - NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); - if (tstamp->stop != 0) { - NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, - cpu_to_be64(tstamp->stop)); - } + if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) || + (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, + cpu_to_be64(tstamp->stop)))) + goto nla_put_failure; nla_nest_end(skb, nest_count); return 0; @@ -277,7 +281,8 @@ nla_put_failure: static inline int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); + if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) + goto nla_put_failure; return 0; nla_put_failure: @@ -304,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) if (!nest_secctx) goto nla_put_failure; - NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); + if (nla_put_string(skb, CTA_SECCTX_NAME, secctx)) + goto nla_put_failure; nla_nest_end(skb, nest_secctx); ret = 0; @@ -349,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) if (!nest_parms) goto nla_put_failure; - NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, - htonl(natseq->correction_pos)); - NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, - htonl(natseq->offset_before)); - NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, - htonl(natseq->offset_after)); + if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS, + htonl(natseq->correction_pos)) || + nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, + htonl(natseq->offset_before)) || + nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER, + htonl(natseq->offset_after))) + goto nla_put_failure; nla_nest_end(skb, nest_parms); @@ -390,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); + if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct))) + goto nla_put_failure; return 0; nla_put_failure: @@ -400,7 +408,8 @@ nla_put_failure: static inline int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); + if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -440,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, goto nla_put_failure; nla_nest_end(skb, nest_parms); - if (nf_ct_zone(ct)) - NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (nf_ct_zone(ct) && + nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct)))) + goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || @@ -617,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) goto nla_put_failure; nla_nest_end(skb, nest_parms); - if (nf_ct_zone(ct)) - NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (nf_ct_zone(ct) && + nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct)))) + goto nla_put_failure; if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; @@ -1705,7 +1716,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, if (!nest_parms) goto nla_put_failure; - NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)); + if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) + goto nla_put_failure; nat_tuple.src.l3num = nf_ct_l3num(master); nat_tuple.src.u3.ip = exp->saved_ip; @@ -1718,21 +1730,24 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, nla_nest_end(skb, nest_parms); } #endif - NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); - NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); - NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); - NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class)); + if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || + nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) || + nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || + nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) + goto nla_put_failure; help = nfct_help(master); if (help) { struct nf_conntrack_helper *helper; helper = rcu_dereference(help->helper); - if (helper) - NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); + if (helper && + nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) + goto nla_put_failure; } expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); - if (expfn != NULL) - NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name); + if (expfn != NULL && + nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) + goto nla_put_failure; return 0; @@ -2065,7 +2080,15 @@ static int ctnetlink_change_expect(struct nf_conntrack_expect *x, const struct nlattr * const cda[]) { - return -EOPNOTSUPP; + if (cda[CTA_EXPECT_TIMEOUT]) { + if (!del_timer(&x->timeout)) + return -ETIME; + + x->timeout.expires = jiffies + + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; + add_timer(&x->timeout); + } + return 0; } static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index be3da2c8cdc..8b631b07a64 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -36,11 +36,11 @@ static DEFINE_MUTEX(nf_ct_proto_mutex); #ifdef CONFIG_SYSCTL static int -nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, +nf_ct_register_sysctl(struct ctl_table_header **header, const char *path, struct ctl_table *table, unsigned int *users) { if (*header == NULL) { - *header = register_sysctl_paths(path, table); + *header = register_net_sysctl(&init_net, path, table); if (*header == NULL) return -ENOMEM; } @@ -56,7 +56,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header, if (users != NULL && --*users > 0) return; - unregister_sysctl_table(*header); + unregister_net_sysctl_table(*header); *header = NULL; } #endif @@ -250,7 +250,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) #ifdef CONFIG_SYSCTL if (l4proto->ctl_table != NULL) { err = nf_ct_register_sysctl(l4proto->ctl_table_header, - nf_net_netfilter_sysctl_path, + "net/netfilter", l4proto->ctl_table, l4proto->ctl_table_users); if (err < 0) @@ -259,7 +259,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT if (l4proto->ctl_compat_table != NULL) { err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, - nf_net_ipv4_netfilter_sysctl_path, + "net/ipv4/netfilter", l4proto->ctl_compat_table, NULL); if (err == 0) goto out; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 24fdce256cb..ef706a485be 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -643,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); - NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, - ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); - NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, - cpu_to_be64(ct->proto.dccp.handshake_seq)); + if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || + nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, + ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || + nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, + cpu_to_be64(ct->proto.dccp.handshake_seq))) + goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); return 0; @@ -739,9 +740,10 @@ dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) const unsigned int *timeouts = data; int i; - for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) - NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); - + for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) { + if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ))) + goto nla_put_failure; + } return 0; nla_put_failure: @@ -908,8 +910,8 @@ static __net_init int dccp_net_init(struct net *net) dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; dn->sysctl_table[7].data = &dn->dccp_loose; - dn->sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, dn->sysctl_table); + dn->sysctl_header = register_net_sysctl(net, "net/netfilter", + dn->sysctl_table); if (!dn->sysctl_header) { kfree(dn->sysctl_table); return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 835e24c58f0..d8923d54b35 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -90,7 +90,8 @@ generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 659648c4b14..4bf6b4e4b77 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -321,10 +321,11 @@ gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED, - htonl(timeouts[GRE_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED, - htonl(timeouts[GRE_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED, + htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED, + htonl(timeouts[GRE_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 72b5088592d..996db2fa21f 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -482,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); - - NLA_PUT_BE32(skb, - CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, - ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); - - NLA_PUT_BE32(skb, - CTA_PROTOINFO_SCTP_VTAG_REPLY, - ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); + if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) || + nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, + ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) || + nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY, + ct->proto.sctp.vtag[IP_CT_DIR_REPLY])) + goto nla_put_failure; spin_unlock_bh(&ct->lock); @@ -578,9 +575,10 @@ sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) const unsigned int *timeouts = data; int i; - for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) - NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); - + for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) { + if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ))) + goto nla_put_failure; + } return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 0d07a1dcf60..21ff1a99f53 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -952,7 +952,8 @@ static int tcp_packet(struct nf_conn *ct, spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_TCP)) nf_log_packet(pf, 0, skb, NULL, NULL, NULL, - "nf_ct_tcp: invalid packet ignored "); + "nf_ct_tcp: invalid packet ignored in " + "state %s ", tcp_conntrack_names[old_state]); return NF_ACCEPT; case TCP_CONNTRACK_MAX: /* Invalid packet */ @@ -1147,21 +1148,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state); - - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, - ct->proto.tcp.seen[0].td_scale); - - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, - ct->proto.tcp.seen[1].td_scale); + if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) || + nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, + ct->proto.tcp.seen[0].td_scale) || + nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, + ct->proto.tcp.seen[1].td_scale)) + goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[0].flags; - NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, - sizeof(struct nf_ct_tcp_flags), &tmp); + if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + sizeof(struct nf_ct_tcp_flags), &tmp)) + goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[1].flags; - NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, - sizeof(struct nf_ct_tcp_flags), &tmp); + if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, + sizeof(struct nf_ct_tcp_flags), &tmp)) + goto nla_put_failure; spin_unlock_bh(&ct->lock); nla_nest_end(skb, nest_parms); @@ -1310,28 +1312,29 @@ tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT, - htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV, - htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, - htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, - htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, - htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK, - htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, - htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE, - htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, - htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS, - htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK, - htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT, + htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV, + htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, + htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, + htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, + htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK, + htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, + htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE, + htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, + htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS, + htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK, + htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index a9073dc1548..7259a6bdeb4 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -181,10 +181,11 @@ udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED, - htonl(timeouts[UDP_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED, - htonl(timeouts[UDP_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED, + htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED, + htonl(timeouts[UDP_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index e0606392cda..4d60a5376aa 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -185,10 +185,11 @@ udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, - htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, - htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, + htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, + htonl(timeouts[UDPLITE_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 885f5ab9bc2..9b3943252a5 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -468,18 +468,13 @@ static ctl_table nf_ct_netfilter_table[] = { { } }; -static struct ctl_path nf_ct_path[] = { - { .procname = "net", }, - { } -}; - static int nf_conntrack_standalone_init_sysctl(struct net *net) { struct ctl_table *table; if (net_eq(net, &init_net)) { nf_ct_netfilter_header = - register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); + register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); if (!nf_ct_netfilter_header) goto out; } @@ -494,8 +489,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; - net->ct.sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.sysctl_header) goto out_unregister_netfilter; @@ -505,7 +499,7 @@ out_unregister_netfilter: kfree(table); out_kmemdup: if (net_eq(net, &init_net)) - unregister_sysctl_table(nf_ct_netfilter_header); + unregister_net_sysctl_table(nf_ct_netfilter_header); out: printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); return -ENOMEM; @@ -516,7 +510,7 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net) struct ctl_table *table; if (net_eq(net, &init_net)) - unregister_sysctl_table(nf_ct_netfilter_header); + unregister_net_sysctl_table(nf_ct_netfilter_header); table = net->ct.sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.sysctl_header); kfree(table); diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index e8d27afbbdb..dbb364f62d6 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c @@ -51,8 +51,8 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net) table[0].data = &net->ct.sysctl_tstamp; - net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", + table); if (!net->ct.tstamp_sysctl_header) { printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 957374a234d..703fb26aa48 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -214,13 +214,6 @@ static const struct file_operations nflog_file_ops = { #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL -static struct ctl_path nf_log_sysctl_path[] = { - { .procname = "net", }, - { .procname = "netfilter", }, - { .procname = "nf_log", }, - { } -}; - static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; static struct ctl_table_header *nf_log_dir_header; @@ -283,7 +276,7 @@ static __init int netfilter_log_sysctl_init(void) nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } - nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, + nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log", nf_log_sysctl_table); if (!nf_log_dir_header) return -ENOMEM; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index e6ddde16561..3e797d1fcb9 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -103,7 +103,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group) EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, - unsigned group, int echo, gfp_t flags) + unsigned int group, int echo, gfp_t flags) { return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); } diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index d98c868c148..b2e7310ca0b 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - NLA_PUT_STRING(skb, NFACCT_NAME, acct->name); + if (nla_put_string(skb, NFACCT_NAME, acct->name)) + goto nla_put_failure; if (type == NFNL_MSG_ACCT_GET_CTRZERO) { pkts = atomic64_xchg(&acct->pkts, 0); @@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, pkts = atomic64_read(&acct->pkts); bytes = atomic64_read(&acct->bytes); } - NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes)); - NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))); + if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) || + nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) || + nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) + goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 2b9e79f5ef0..3e655288d1d 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -170,11 +170,12 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name); - NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)); - NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto); - NLA_PUT_BE32(skb, CTA_TIMEOUT_USE, - htonl(atomic_read(&timeout->refcnt))); + if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) || + nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || + nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || + nla_put_be32(skb, CTA_TIMEOUT_USE, + htonl(atomic_read(&timeout->refcnt)))) + goto nla_put_failure; if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { struct nlattr *nest_parms; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 66b2c54c544..3c3cfc0cc9b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst, pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; - NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); + if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg)) + goto nla_put_failure; - if (prefix) - NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); + if (prefix && + nla_put(inst->skb, NFULA_PREFIX, plen, prefix)) + goto nla_put_failure; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(indev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, - htonl(indev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, + htonl(indev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(br_port_get_rcu(indev)->br->dev->ifindex)); + nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(br_port_get_rcu(indev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(indev->ifindex)); - if (skb->nf_bridge && skb->nf_bridge->physindev) - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, - htonl(skb->nf_bridge->physindev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; + if (skb->nf_bridge && skb->nf_bridge->physindev && + nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, + htonl(skb->nf_bridge->physindev->ifindex))) + goto nla_put_failure; } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + htonl(outdev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); + nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); - if (skb->nf_bridge && skb->nf_bridge->physoutdev) - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, - htonl(skb->nf_bridge->physoutdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; + if (skb->nf_bridge && skb->nf_bridge->physoutdev && + nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + htonl(skb->nf_bridge->physoutdev->ifindex))) + goto nla_put_failure; } #endif } - if (skb->mark) - NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); + if (skb->mark && + nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark))) + goto nla_put_failure; if (indev && skb->dev && skb->mac_header != skb->network_header) { @@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst, int len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); - NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); + if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) + goto nla_put_failure; } } if (indev && skb_mac_header_was_set(skb)) { - NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); - NLA_PUT_BE16(inst->skb, NFULA_HWLEN, - htons(skb->dev->hard_header_len)); - NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, - skb_mac_header(skb)); + if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || + nla_put_be16(inst->skb, NFULA_HWLEN, + htons(skb->dev->hard_header_len)) || + nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, + skb_mac_header(skb))) + goto nla_put_failure; } if (skb->tstamp.tv64) { @@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); + if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; } /* UID */ @@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst, struct file *file = skb->sk->sk_socket->file; __be32 uid = htonl(file->f_cred->fsuid); __be32 gid = htonl(file->f_cred->fsgid); - /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); - NLA_PUT_BE32(inst->skb, NFULA_UID, uid); - NLA_PUT_BE32(inst->skb, NFULA_GID, gid); + if (nla_put_be32(inst->skb, NFULA_UID, uid) || + nla_put_be32(inst->skb, NFULA_GID, gid)) + goto nla_put_failure; } else read_unlock_bh(&skb->sk->sk_callback_lock); } /* local sequence number */ - if (inst->flags & NFULNL_CFG_F_SEQ) - NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); + if ((inst->flags & NFULNL_CFG_F_SEQ) && + nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++))) + goto nla_put_failure; /* global sequence number */ - if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) - NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, - htonl(atomic_inc_return(&global_seq))); + if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && + nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, + htonl(atomic_inc_return(&global_seq)))) + goto nla_put_failure; if (data_len) { struct nlattr *nla; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index a80b0cb03f1..4162437b836 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, indev = entry->indev; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) + goto nla_put_failure; #else if (entry->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, - htonl(indev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, + htonl(indev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, - htonl(br_port_get_rcu(indev)->br->dev->ifindex)); + nla_put_be32(skb, NFQA_IFINDEX_INDEV, + htonl(br_port_get_rcu(indev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, - htonl(indev->ifindex)); - if (entskb->nf_bridge && entskb->nf_bridge->physindev) - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, - htonl(entskb->nf_bridge->physindev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; + if (entskb->nf_bridge && entskb->nf_bridge->physindev && + nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, + htonl(entskb->nf_bridge->physindev->ifindex))) + goto nla_put_failure; } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) + goto nla_put_failure; #else if (entry->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, + htonl(outdev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, - htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); + nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, + htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); - if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, - htonl(entskb->nf_bridge->physoutdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; + if (entskb->nf_bridge && entskb->nf_bridge->physoutdev && + nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, + htonl(entskb->nf_bridge->physoutdev->ifindex))) + goto nla_put_failure; } #endif } - if (entskb->mark) - NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); + if (entskb->mark && + nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) + goto nla_put_failure; if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { @@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, int len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); - NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); + if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) + goto nla_put_failure; } } @@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); + if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; } if (data_len) { @@ -384,8 +395,7 @@ nlmsg_failure: nla_put_failure: if (skb) kfree_skb(skb); - if (net_ratelimit()) - printk(KERN_ERR "nf_queue: error creating packet message\n"); + net_err_ratelimited("nf_queue: error creating packet message\n"); return NULL; } @@ -422,10 +432,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) } if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; - if (net_ratelimit()) - printk(KERN_WARNING "nf_queue: full at %d entries, " - "dropping packets(s).\n", - queue->queue_total); + net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", + queue->queue_total); goto err_out_free_nskb; } entry->id = ++queue->id_sequence; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 3746d8b9a47..a51de9b052b 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -17,7 +17,6 @@ #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_ecache.h> -#include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_zones.h> diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c new file mode 100644 index 00000000000..0a96a43108e --- /dev/null +++ b/net/netfilter/xt_HMARK.c @@ -0,0 +1,362 @@ +/* + * xt_HMARK - Netfilter module to set mark by means of hashing + * + * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com> + * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/icmp.h> + +#include <linux/netfilter/x_tables.h> +#include <linux/netfilter/xt_HMARK.h> + +#include <net/ip.h> +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <net/netfilter/nf_conntrack.h> +#endif +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#include <net/ipv6.h> +#include <linux/netfilter_ipv6/ip6_tables.h> +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>"); +MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); +MODULE_ALIAS("ipt_HMARK"); +MODULE_ALIAS("ip6t_HMARK"); + +struct hmark_tuple { + u32 src; + u32 dst; + union hmark_ports uports; + uint8_t proto; +}; + +static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) +{ + return (addr32[0] & mask[0]) ^ + (addr32[1] & mask[1]) ^ + (addr32[2] & mask[2]) ^ + (addr32[3] & mask[3]); +} + +static inline u32 +hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) +{ + switch (l3num) { + case AF_INET: + return *addr32 & *mask; + case AF_INET6: + return hmark_addr6_mask(addr32, mask); + } + return 0; +} + +static int +hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + struct nf_conntrack_tuple *otuple; + struct nf_conntrack_tuple *rtuple; + + if (ct == NULL || nf_ct_is_untracked(ct)) + return -1; + + otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; + + t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, + info->src_mask.all); + t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, + info->dst_mask.all); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = nf_ct_protonum(ct); + if (t->proto != IPPROTO_ICMP) { + t->uports.p16.src = otuple->src.u.all; + t->uports.p16.dst = rtuple->src.u.all; + t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | + info->port_set.v32; + if (t->uports.p16.dst < t->uports.p16.src) + swap(t->uports.p16.dst, t->uports.p16.src); + } + + return 0; +#else + return -1; +#endif +} + +static inline u32 +hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) +{ + u32 hash; + + if (t->dst < t->src) + swap(t->src, t->dst); + + hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); + hash = hash ^ (t->proto & info->proto_mask); + + return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; +} + +static void +hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, + struct hmark_tuple *t, const struct xt_hmark_info *info) +{ + int protoff; + + protoff = proto_ports_offset(t->proto); + if (protoff < 0) + return; + + nhoff += protoff; + if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) + return; + + t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | + info->port_set.v32; + + if (t->uports.p16.dst < t->uports.p16.src) + swap(t->uports.p16.dst, t->uports.p16.src); +} + +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +static int get_inner6_hdr(const struct sk_buff *skb, int *offset) +{ + struct icmp6hdr *icmp6h, _ih6; + + icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6); + if (icmp6h == NULL) + return 0; + + if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) { + *offset += sizeof(struct icmp6hdr); + return 1; + } + return 0; +} + +static int +hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ + struct ipv6hdr *ip6, _ip6; + int flag = IP6T_FH_F_AUTH; + unsigned int nhoff = 0; + u16 fragoff = 0; + int nexthdr; + + ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb)); + nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); + if (nexthdr < 0) + return 0; + /* No need to check for icmp errors on fragments */ + if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) + goto noicmp; + /* Use inner header in case of ICMP errors */ + if (get_inner6_hdr(skb, &nhoff)) { + ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6); + if (ip6 == NULL) + return -1; + /* If AH present, use SPI like in ESP. */ + flag = IP6T_FH_F_AUTH; + nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); + if (nexthdr < 0) + return -1; + } +noicmp: + t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); + t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = nexthdr; + if (t->proto == IPPROTO_ICMPV6) + return 0; + + if (flag & IP6T_FH_F_FRAG) + return 0; + + hmark_set_tuple_ports(skb, nhoff, t, info); + return 0; +} + +static unsigned int +hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + struct hmark_tuple t; + + memset(&t, 0, sizeof(struct hmark_tuple)); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { + if (hmark_ct_set_htuple(skb, &t, info) < 0) + return XT_CONTINUE; + } else { + if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0) + return XT_CONTINUE; + } + + skb->mark = hmark_hash(&t, info); + return XT_CONTINUE; +} +#endif + +static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) +{ + const struct icmphdr *icmph; + struct icmphdr _ih; + + /* Not enough header? */ + icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); + if (icmph == NULL || icmph->type > NR_ICMP_TYPES) + return 0; + + /* Error message? */ + if (icmph->type != ICMP_DEST_UNREACH && + icmph->type != ICMP_SOURCE_QUENCH && + icmph->type != ICMP_TIME_EXCEEDED && + icmph->type != ICMP_PARAMETERPROB && + icmph->type != ICMP_REDIRECT) + return 0; + + *nhoff += iphsz + sizeof(_ih); + return 1; +} + +static int +hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ + struct iphdr *ip, _ip; + int nhoff = skb_network_offset(skb); + + ip = (struct iphdr *) (skb->data + nhoff); + if (ip->protocol == IPPROTO_ICMP) { + /* Use inner header in case of ICMP errors */ + if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { + ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); + if (ip == NULL) + return -1; + } + } + + t->src = (__force u32) ip->saddr; + t->dst = (__force u32) ip->daddr; + + t->src &= info->src_mask.ip; + t->dst &= info->dst_mask.ip; + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = ip->protocol; + + /* ICMP has no ports, skip */ + if (t->proto == IPPROTO_ICMP) + return 0; + + /* follow-up fragments don't contain ports, skip all fragments */ + if (ip->frag_off & htons(IP_MF | IP_OFFSET)) + return 0; + + hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); + + return 0; +} + +static unsigned int +hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + struct hmark_tuple t; + + memset(&t, 0, sizeof(struct hmark_tuple)); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { + if (hmark_ct_set_htuple(skb, &t, info) < 0) + return XT_CONTINUE; + } else { + if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0) + return XT_CONTINUE; + } + + skb->mark = hmark_hash(&t, info); + return XT_CONTINUE; +} + +static int hmark_tg_check(const struct xt_tgchk_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + + if (!info->hmodulus) { + pr_info("xt_HMARK: hash modulus can't be zero\n"); + return -EINVAL; + } + if (info->proto_mask && + (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { + pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); + return -EINVAL; + } + if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && + (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | + XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { + pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); + return -EINVAL; + } + if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && + (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | + XT_HMARK_FLAG(XT_HMARK_DPORT)))) { + pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); + return -EINVAL; + } + return 0; +} + +static struct xt_target hmark_tg_reg[] __read_mostly = { + { + .name = "HMARK", + .family = NFPROTO_IPV4, + .target = hmark_tg_v4, + .targetsize = sizeof(struct xt_hmark_info), + .checkentry = hmark_tg_check, + .me = THIS_MODULE, + }, +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) + { + .name = "HMARK", + .family = NFPROTO_IPV6, + .target = hmark_tg_v6, + .targetsize = sizeof(struct xt_hmark_info), + .checkentry = hmark_tg_check, + .me = THIS_MODULE, + }, +#endif +}; + +static int __init hmark_tg_init(void) +{ + return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); +} + +static void __exit hmark_tg_exit(void) +{ + xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); +} + +module_init(hmark_tg_init); +module_exit(hmark_tg_exit); diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 190ad37c5cf..71a266de5fb 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -67,15 +67,13 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (info->mss == XT_TCPMSS_CLAMP_PMTU) { if (dst_mtu(skb_dst(skb)) <= minlen) { - if (net_ratelimit()) - pr_err("unknown or invalid path-MTU (%u)\n", - dst_mtu(skb_dst(skb))); + net_err_ratelimited("unknown or invalid path-MTU (%u)\n", + dst_mtu(skb_dst(skb))); return -1; } if (in_mtu <= minlen) { - if (net_ratelimit()) - pr_err("unknown or invalid path-MTU (%u)\n", - in_mtu); + net_err_ratelimited("unknown or invalid path-MTU (%u)\n", + in_mtu); return -1; } newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 35a959a096e..146033a86de 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -282,10 +282,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) struct sock *sk; const struct in6_addr *laddr; __be16 lport; - int thoff; + int thoff = 0; int tproto; - tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); + tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d95f9c963cd..26a668a84aa 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -171,8 +171,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, if (ht->cfg.max && ht->count >= ht->cfg.max) { /* FIXME: do something. question is what.. */ - if (net_ratelimit()) - pr_err("max count of %u reached\n", ht->cfg.max); + net_err_ratelimited("max count of %u reached\n", ht->cfg.max); ent = NULL; } else ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); @@ -388,9 +387,20 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) +/* in byte mode, the lowest possible rate is one packet/second. + * credit_cap is used as a counter that tells us how many times we can + * refill the "credits available" counter when it becomes empty. + */ +#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ) +#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES) + +static u32 xt_hashlimit_len_to_chunks(u32 len) +{ + return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1; +} + /* Precision saver. */ -static inline u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -400,12 +410,53 @@ user2credits(u_int32_t user) return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } -static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) +static u32 user2credits_byte(u32 user) { - dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; - if (dh->rateinfo.credit > dh->rateinfo.credit_cap) - dh->rateinfo.credit = dh->rateinfo.credit_cap; + u64 us = user; + us *= HZ * CREDITS_PER_JIFFY_BYTES; + return (u32) (us >> 32); +} + +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode) +{ + unsigned long delta = now - dh->rateinfo.prev; + u32 cap; + + if (delta == 0) + return; + dh->rateinfo.prev = now; + + if (mode & XT_HASHLIMIT_BYTES) { + u32 tmp = dh->rateinfo.credit; + dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta; + cap = CREDITS_PER_JIFFY_BYTES * HZ; + if (tmp >= dh->rateinfo.credit) {/* overflow */ + dh->rateinfo.credit = cap; + return; + } + } else { + dh->rateinfo.credit += delta * CREDITS_PER_JIFFY; + cap = dh->rateinfo.credit_cap; + } + if (dh->rateinfo.credit > cap) + dh->rateinfo.credit = cap; +} + +static void rateinfo_init(struct dsthash_ent *dh, + struct xt_hashlimit_htable *hinfo) +{ + dh->rateinfo.prev = jiffies; + if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { + dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; + dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); + dh->rateinfo.credit_cap = hinfo->cfg.burst; + } else { + dh->rateinfo.credit = user2credits(hinfo->cfg.avg * + hinfo->cfg.burst); + dh->rateinfo.cost = user2credits(hinfo->cfg.avg); + dh->rateinfo.credit_cap = dh->rateinfo.credit; + } } static inline __be32 maskl(__be32 a, unsigned int l) @@ -511,6 +562,21 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, return 0; } +static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh) +{ + u64 tmp = xt_hashlimit_len_to_chunks(len); + tmp = tmp * dh->rateinfo.cost; + + if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ)) + tmp = CREDITS_PER_JIFFY_BYTES * HZ; + + if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) { + dh->rateinfo.credit_cap--; + dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; + } + return (u32) tmp; +} + static bool hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { @@ -519,6 +585,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) unsigned long now = jiffies; struct dsthash_ent *dh; struct dsthash_dst dst; + u32 cost; if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) goto hotdrop; @@ -532,21 +599,21 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) goto hotdrop; } dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); - dh->rateinfo.prev = jiffies; - dh->rateinfo.credit = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); - dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); - dh->rateinfo.cost = user2credits(hinfo->cfg.avg); + rateinfo_init(dh, hinfo); } else { /* update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); - rateinfo_recalc(dh, now); + rateinfo_recalc(dh, now, hinfo->cfg.mode); } - if (dh->rateinfo.credit >= dh->rateinfo.cost) { + if (info->cfg.mode & XT_HASHLIMIT_BYTES) + cost = hashlimit_byte_cost(skb->len, dh); + else + cost = dh->rateinfo.cost; + + if (dh->rateinfo.credit >= cost) { /* below the limit */ - dh->rateinfo.credit -= dh->rateinfo.cost; + dh->rateinfo.credit -= cost; spin_unlock(&dh->lock); rcu_read_unlock_bh(); return !(info->cfg.mode & XT_HASHLIMIT_INVERT); @@ -568,14 +635,6 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) struct xt_hashlimit_mtinfo1 *info = par->matchinfo; int ret; - /* Check for overflow. */ - if (info->cfg.burst == 0 || - user2credits(info->cfg.avg * info->cfg.burst) < - user2credits(info->cfg.avg)) { - pr_info("overflow, try lower: %u/%u\n", - info->cfg.avg, info->cfg.burst); - return -ERANGE; - } if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) return -EINVAL; if (info->name[sizeof(info->name)-1] != '\0') @@ -588,6 +647,26 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) return -EINVAL; } + if (info->cfg.mode & ~XT_HASHLIMIT_ALL) { + pr_info("Unknown mode mask %X, kernel too old?\n", + info->cfg.mode); + return -EINVAL; + } + + /* Check for overflow. */ + if (info->cfg.mode & XT_HASHLIMIT_BYTES) { + if (user2credits_byte(info->cfg.avg) == 0) { + pr_info("overflow, rate too high: %u\n", info->cfg.avg); + return -EINVAL; + } + } else if (info->cfg.burst == 0 || + user2credits(info->cfg.avg * info->cfg.burst) < + user2credits(info->cfg.avg)) { + pr_info("overflow, try lower: %u/%u\n", + info->cfg.avg, info->cfg.burst); + return -ERANGE; + } + mutex_lock(&hashlimit_mutex); info->hinfo = htable_find_get(net, info->name, par->family); if (info->hinfo == NULL) { @@ -680,10 +759,11 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { int res; + const struct xt_hashlimit_htable *ht = s->private; spin_lock(&ent->lock); /* recalculate to show accurate numbers */ - rateinfo_recalc(ent, jiffies); + rateinfo_recalc(ent, jiffies, ht->cfg.mode); switch (family) { case NFPROTO_IPV4: diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 32b7a579a03..5c22ce8ab30 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* Precision saver. */ -static u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -123,7 +122,7 @@ static int limit_mt_check(const struct xt_mtchk_param *par) 128. */ priv->prev = jiffies; priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ - r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ + r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } return 0; diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index 8160f6b1435..d5b4fd4f91e 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c @@ -36,7 +36,7 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) return false; if (skb_mac_header(skb) + ETH_HLEN > skb->data) return false; - ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; + ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr); ret ^= info->invert; return ret; } diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index d2ff15a2412..fc0d6dbe5d1 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -314,7 +314,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par) #ifdef CONFIG_PROC_FS struct proc_dir_entry *pde; #endif - unsigned i; + unsigned int i; int ret = -EINVAL; if (unlikely(!hash_rnd_inited)) { diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 0ec8138aa47..035960ec5cb 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -44,6 +44,14 @@ const struct ip_set_adt_opt n = { \ .cmdflags = cfs, \ .timeout = t, \ } +#define ADT_MOPT(n, f, d, fs, cfs, t) \ +struct ip_set_adt_opt n = { \ + .family = f, \ + .dim = d, \ + .flags = fs, \ + .cmdflags = cfs, \ + .timeout = t, \ +} /* Revision 0 interface: backward compatible with netfilter/iptables */ @@ -296,11 +304,14 @@ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; - ADT_OPT(add_opt, par->family, info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + ADT_MOPT(add_opt, par->family, info->add_set.dim, + info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); + /* Normalize to fit into jiffies */ + if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 72bb07f57f9..9ea482d08cf 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -263,10 +263,10 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) struct sock *sk; struct in6_addr *daddr, *saddr; __be16 dport, sport; - int thoff, tproto; + int thoff = 0, tproto; const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; - tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); + tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index faa48f70b7c..b3025a603d5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -104,27 +104,27 @@ static inline int netlink_is_kernel(struct sock *sk) } struct nl_pid_hash { - struct hlist_head *table; - unsigned long rehash_time; + struct hlist_head *table; + unsigned long rehash_time; - unsigned int mask; - unsigned int shift; + unsigned int mask; + unsigned int shift; - unsigned int entries; - unsigned int max_shift; + unsigned int entries; + unsigned int max_shift; - u32 rnd; + u32 rnd; }; struct netlink_table { - struct nl_pid_hash hash; - struct hlist_head mc_list; - struct listeners __rcu *listeners; - unsigned int nl_nonroot; - unsigned int groups; - struct mutex *cb_mutex; - struct module *module; - int registered; + struct nl_pid_hash hash; + struct hlist_head mc_list; + struct listeners __rcu *listeners; + unsigned int nl_nonroot; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int registered; }; static struct netlink_table *nl_table; @@ -132,7 +132,6 @@ static struct netlink_table *nl_table; static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); -static void netlink_destroy_callback(struct netlink_callback *cb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -149,6 +148,18 @@ static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } +static void netlink_destroy_callback(struct netlink_callback *cb) +{ + kfree_skb(cb->skb); + kfree(cb); +} + +static void netlink_consume_callback(struct netlink_callback *cb) +{ + consume_skb(cb->skb); + kfree(cb); +} + static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -414,9 +425,9 @@ static int __netlink_create(struct net *net, struct socket *sock, sock_init_data(sock, sk); nlk = nlk_sk(sk); - if (cb_mutex) + if (cb_mutex) { nlk->cb_mutex = cb_mutex; - else { + } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } @@ -522,8 +533,9 @@ static int netlink_release(struct socket *sock) nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } - } else if (nlk->subscriptions) + } else if (nlk->subscriptions) { netlink_update_listeners(sk); + } netlink_table_ungrab(); kfree(nlk->groups); @@ -866,7 +878,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) struct sk_buff *nskb = skb_clone(skb, allocation); if (!nskb) return skb; - kfree_skb(skb); + consume_skb(skb); skb = nskb; } @@ -896,8 +908,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) ret = skb->len; skb_set_owner_r(skb, sk); nlk->netlink_rcv(skb); + consume_skb(skb); + } else { + kfree_skb(skb); } - kfree_skb(skb); sock_put(sk); return ret; } @@ -1086,8 +1100,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; - } else - consume_skb(info.skb2); + } + consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) @@ -1240,8 +1254,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, nlk->flags |= NETLINK_RECV_NO_ENOBUFS; clear_bit(0, &nlk->state); wake_up_interruptible(&nlk->wait); - } else + } else { nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; + } err = 0; break; default: @@ -1645,12 +1660,6 @@ void netlink_set_nonroot(int protocol, unsigned int flags) } EXPORT_SYMBOL(netlink_set_nonroot); -static void netlink_destroy_callback(struct netlink_callback *cb) -{ - kfree_skb(cb->skb); - kfree(cb); -} - struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) { @@ -1727,7 +1736,7 @@ static int netlink_dump(struct sock *sk) nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); - netlink_destroy_callback(cb); + netlink_consume_callback(cb); return 0; errout_skb: @@ -1996,11 +2005,11 @@ static void netlink_seq_stop(struct seq_file *seq, void *v) static int netlink_seq_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) + if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); - else { + } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 9f40441d7a7..8340ace837f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -635,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (hdr == NULL) return -1; - NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name); - NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id); - NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version); - NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); - NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); + if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || + nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || + nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || + nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || + nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) + goto nla_put_failure; if (!list_empty(&family->ops_list)) { struct nlattr *nla_ops; @@ -657,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); - NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); + if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || + nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) + goto nla_put_failure; nla_nest_end(skb, nest); } @@ -682,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); - NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, - grp->name); + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name)) + goto nla_put_failure; nla_nest_end(skb, nest); } @@ -710,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, if (hdr == NULL) return -1; - NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name); - NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id); + if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || + nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) + goto nla_put_failure; nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); if (nla_grps == NULL) @@ -721,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); - NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, - grp->name); + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name)) + goto nla_put_failure; nla_nest_end(skb, nest); nla_nest_end(skb, nla_grps); diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 1c51d7a58f0..743262becd6 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -97,7 +97,7 @@ static int nr_rebuild_header(struct sk_buff *skb) static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c index 1e0fa9e57aa..42f630b9a69 100644 --- a/net/netrom/sysctl_net_netrom.c +++ b/net/netrom/sysctl_net_netrom.c @@ -146,18 +146,12 @@ static ctl_table nr_table[] = { { } }; -static struct ctl_path nr_path[] = { - { .procname = "net", }, - { .procname = "netrom", }, - { } -}; - void __init nr_register_sysctl(void) { - nr_table_header = register_sysctl_paths(nr_path, nr_table); + nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); } void nr_unregister_sysctl(void) { - unregister_sysctl_table(nr_table_header); + unregister_net_sysctl_table(nr_table_header); } diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig index 44c865b86d6..8d8d9bc4b6f 100644 --- a/net/nfc/Kconfig +++ b/net/nfc/Kconfig @@ -14,6 +14,7 @@ menuconfig NFC be called nfc. source "net/nfc/nci/Kconfig" +source "net/nfc/hci/Kconfig" source "net/nfc/llcp/Kconfig" source "drivers/nfc/Kconfig" diff --git a/net/nfc/Makefile b/net/nfc/Makefile index 7b4a6dcfa56..d1a117c2c40 100644 --- a/net/nfc/Makefile +++ b/net/nfc/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_NFC) += nfc.o obj-$(CONFIG_NFC_NCI) += nci/ +obj-$(CONFIG_NFC_HCI) += hci/ nfc-objs := core.o netlink.o af_nfc.o rawsock.o nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o diff --git a/net/nfc/core.c b/net/nfc/core.c index 295d129864d..3192c3f589e 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -33,6 +33,8 @@ #define VERSION "0.1" +#define NFC_CHECK_PRES_FREQ_MS 2000 + int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); @@ -95,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev) goto error; } - if (dev->polling || dev->remote_activated) { + if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) { rc = -EBUSY; goto error; } @@ -211,6 +213,8 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) } rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len); + if (!rc) + dev->activated_target_idx = target_index; error: device_unlock(&dev->dev); @@ -246,6 +250,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) rc = dev->ops->dep_link_down(dev); if (!rc) { dev->dep_link_up = false; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; nfc_llcp_mac_is_down(dev); nfc_genl_dep_link_down_event(dev); } @@ -289,8 +294,13 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) } rc = dev->ops->activate_target(dev, target_idx, protocol); - if (!rc) - dev->remote_activated = true; + if (!rc) { + dev->activated_target_idx = target_idx; + + if (dev->ops->check_presence) + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + } error: device_unlock(&dev->dev); @@ -317,8 +327,11 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) goto error; } + if (dev->ops->check_presence) + del_timer_sync(&dev->check_pres_timer); + dev->ops->deactivate_target(dev, target_idx); - dev->remote_activated = false; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; error: device_unlock(&dev->dev); @@ -352,8 +365,27 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, goto error; } + if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) { + rc = -ENOTCONN; + kfree_skb(skb); + goto error; + } + + if (target_idx != dev->activated_target_idx) { + rc = -EADDRNOTAVAIL; + kfree_skb(skb); + goto error; + } + + if (dev->ops->check_presence) + del_timer_sync(&dev->check_pres_timer); + rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); + if (!rc && dev->ops->check_presence) + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + error: device_unlock(&dev->dev); return rc; @@ -428,10 +460,15 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb); int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int n_targets) { + int i; + pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); dev->polling = false; + for (i = 0; i < n_targets; i++) + targets[i].idx = dev->target_next_idx++; + spin_lock_bh(&dev->targets_lock); dev->targets_generation++; @@ -455,17 +492,92 @@ int nfc_targets_found(struct nfc_dev *dev, } EXPORT_SYMBOL(nfc_targets_found); +int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) +{ + struct nfc_target *tg; + int i; + + pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); + + spin_lock_bh(&dev->targets_lock); + + for (i = 0; i < dev->n_targets; i++) { + tg = &dev->targets[i]; + if (tg->idx == target_idx) + break; + } + + if (i == dev->n_targets) { + spin_unlock_bh(&dev->targets_lock); + return -EINVAL; + } + + dev->targets_generation++; + dev->n_targets--; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; + + if (dev->n_targets) { + memcpy(&dev->targets[i], &dev->targets[i + 1], + (dev->n_targets - i) * sizeof(struct nfc_target)); + } else { + kfree(dev->targets); + dev->targets = NULL; + } + + spin_unlock_bh(&dev->targets_lock); + + nfc_genl_target_lost(dev, target_idx); + + return 0; +} +EXPORT_SYMBOL(nfc_target_lost); + static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + if (dev->ops->check_presence) { + del_timer_sync(&dev->check_pres_timer); + destroy_workqueue(dev->check_pres_wq); + } + nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); kfree(dev); } +static void nfc_check_pres_work(struct work_struct *work) +{ + struct nfc_dev *dev = container_of(work, struct nfc_dev, + check_pres_work); + int rc; + + device_lock(&dev->dev); + + if (dev->activated_target_idx != NFC_TARGET_IDX_NONE && + timer_pending(&dev->check_pres_timer) == 0) { + rc = dev->ops->check_presence(dev, dev->activated_target_idx); + if (!rc) { + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + } else { + nfc_target_lost(dev, dev->activated_target_idx); + dev->activated_target_idx = NFC_TARGET_IDX_NONE; + } + } + + device_unlock(&dev->dev); +} + +static void nfc_check_pres_timeout(unsigned long data) +{ + struct nfc_dev *dev = (struct nfc_dev *)data; + + queue_work(dev->check_pres_wq, &dev->check_pres_work); +} + struct class nfc_class = { .name = "nfc", .dev_release = nfc_release, @@ -475,12 +587,12 @@ EXPORT_SYMBOL(nfc_class); static int match_idx(struct device *d, void *data) { struct nfc_dev *dev = to_nfc_dev(d); - unsigned *idx = data; + unsigned int *idx = data; return dev->idx == *idx; } -struct nfc_dev *nfc_get_device(unsigned idx) +struct nfc_dev *nfc_get_device(unsigned int idx) { struct device *d; @@ -531,6 +643,26 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, /* first generation must not be 0 */ dev->targets_generation = 1; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; + + if (ops->check_presence) { + char name[32]; + init_timer(&dev->check_pres_timer); + dev->check_pres_timer.data = (unsigned long)dev; + dev->check_pres_timer.function = nfc_check_pres_timeout; + + INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); + snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx); + dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT | + WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (dev->check_pres_wq == NULL) { + kfree(dev); + return NULL; + } + } + + return dev; } EXPORT_SYMBOL(nfc_allocate_device); diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig new file mode 100644 index 00000000000..17213a6362b --- /dev/null +++ b/net/nfc/hci/Kconfig @@ -0,0 +1,16 @@ +config NFC_HCI + depends on NFC + tristate "NFC HCI implementation" + default n + help + Say Y here if you want to build support for a kernel NFC HCI + implementation. This is mostly needed for devices that only process + HCI frames, like for example the NXP pn544. + +config NFC_SHDLC + depends on NFC_HCI + bool "SHDLC link layer for HCI based NFC drivers" + default n + ---help--- + Say yes if you use an NFC HCI driver that requires SHDLC link layer. + If unsure, say N here. diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile new file mode 100644 index 00000000000..f9c44b2fb06 --- /dev/null +++ b/net/nfc/hci/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Linux NFC HCI layer. +# + +obj-$(CONFIG_NFC_HCI) += hci.o + +hci-y := core.o hcp.o command.o +hci-$(CONFIG_NFC_SHDLC) += shdlc.o diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c new file mode 100644 index 00000000000..8729abf5f18 --- /dev/null +++ b/net/nfc/hci/command.c @@ -0,0 +1,354 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/module.h> + +#include <net/nfc/hci.h> + +#include "hci.h" + +static int nfc_hci_result_to_errno(u8 result) +{ + switch (result) { + case NFC_HCI_ANY_OK: + return 0; + case NFC_HCI_ANY_E_TIMEOUT: + return -ETIMEDOUT; + default: + return -1; + } +} + +static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb, void *cb_data) +{ + struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data; + + pr_debug("HCI Cmd completed with HCI result=%d\n", result); + + hcp_ew->exec_result = nfc_hci_result_to_errno(result); + if (hcp_ew->exec_result == 0) + hcp_ew->result_skb = skb; + else + kfree_skb(skb); + hcp_ew->exec_complete = true; + + wake_up(hcp_ew->wq); +} + +static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + const u8 *param, size_t param_len, + struct sk_buff **skb) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq); + struct hcp_exec_waiter hcp_ew; + hcp_ew.wq = &ew_wq; + hcp_ew.exec_complete = false; + hcp_ew.result_skb = NULL; + + pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); + + /* TODO: Define hci cmd execution delay. Should it be the same + * for all commands? + */ + hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe, + NFC_HCI_HCP_COMMAND, cmd, + param, param_len, + nfc_hci_execute_cb, &hcp_ew, + 3000); + if (hcp_ew.exec_result < 0) + return hcp_ew.exec_result; + + wait_event(ew_wq, hcp_ew.exec_complete == true); + + if (hcp_ew.exec_result == 0) { + if (skb) + *skb = hcp_ew.result_skb; + else + kfree_skb(hcp_ew.result_skb); + } + + return hcp_ew.exec_result; +} + +int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, + const u8 *param, size_t param_len) +{ + u8 pipe; + + pr_debug("%d to gate %d\n", event, gate); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event, + param, param_len, NULL, NULL, 0); +} +EXPORT_SYMBOL(nfc_hci_send_event); + +int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, + const u8 *param, size_t param_len) +{ + u8 pipe; + + pr_debug("\n"); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE, + response, param, param_len, NULL, NULL, + 0); +} +EXPORT_SYMBOL(nfc_hci_send_response); + +/* + * Execute an hci command sent to gate. + * skb will contain response data if success. skb can be NULL if you are not + * interested by the response. + */ +int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb) +{ + u8 pipe; + + pr_debug("\n"); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb); +} +EXPORT_SYMBOL(nfc_hci_send_cmd); + +int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + const u8 *param, size_t param_len) +{ + int r; + u8 *tmp; + + /* TODO ELa: reg idx must be inserted before param, but we don't want + * to ask the caller to do it to keep a simpler API. + * For now, just create a new temporary param buffer. This is far from + * optimal though, and the plan is to modify APIs to pass idx down to + * nfc_hci_hcp_message_tx where the frame is actually built, thereby + * eliminating the need for the temp allocation-copy here. + */ + + pr_debug("idx=%d to gate %d\n", idx, gate); + + tmp = kmalloc(1 + param_len, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + *tmp = idx; + memcpy(tmp + 1, param, param_len); + + r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER, + tmp, param_len + 1, NULL); + + kfree(tmp); + + return r; +} +EXPORT_SYMBOL(nfc_hci_set_param); + +int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + struct sk_buff **skb) +{ + pr_debug("gate=%d regidx=%d\n", gate, idx); + + return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER, + &idx, 1, skb); +} +EXPORT_SYMBOL(nfc_hci_get_param); + +static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + struct sk_buff *skb; + int r; + + pr_debug("pipe=%d\n", pipe); + + r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE, + NULL, 0, &skb); + if (r == 0) { + /* dest host other than host controller will send + * number of pipes already open on this gate before + * execution. The number can be found in skb->data[0] + */ + kfree_skb(skb); + } + + return r; +} + +static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + pr_debug("\n"); + + return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE, + NULL, 0, NULL); +} + +static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, + u8 dest_gate, int *result) +{ + struct sk_buff *skb; + struct hci_create_pipe_params params; + struct hci_create_pipe_resp *resp; + u8 pipe; + + pr_debug("gate=%d\n", dest_gate); + + params.src_gate = NFC_HCI_ADMIN_GATE; + params.dest_host = dest_host; + params.dest_gate = dest_gate; + + *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_CREATE_PIPE, + (u8 *) ¶ms, sizeof(params), &skb); + if (*result == 0) { + resp = (struct hci_create_pipe_resp *)skb->data; + pipe = resp->pipe; + kfree_skb(skb); + + pr_debug("pipe created=%d\n", pipe); + + return pipe; + } else + return NFC_HCI_INVALID_PIPE; +} + +static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + pr_debug("\n"); + + return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); +} + +static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) +{ + int r; + + u8 param[2]; + + /* TODO: Find out what the identity reference data is + * and fill param with it. HCI spec 6.1.3.5 */ + + pr_debug("\n"); + + r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); + + return 0; +} + +int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) +{ + int r; + u8 pipe = hdev->gate2pipe[gate]; + + pr_debug("\n"); + + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + r = nfc_hci_close_pipe(hdev, pipe); + if (r < 0) + return r; + + if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) { + r = nfc_hci_delete_pipe(hdev, pipe); + if (r < 0) + return r; + } + + hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE; + + return 0; +} +EXPORT_SYMBOL(nfc_hci_disconnect_gate); + +int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev) +{ + int r; + + pr_debug("\n"); + + r = nfc_hci_clear_all_pipes(hdev); + if (r < 0) + return r; + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return 0; +} +EXPORT_SYMBOL(nfc_hci_disconnect_all_gates); + +int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate) +{ + u8 pipe = NFC_HCI_INVALID_PIPE; + bool pipe_created = false; + int r; + + pr_debug("\n"); + + if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) + return -EADDRINUSE; + + switch (dest_gate) { + case NFC_HCI_LINK_MGMT_GATE: + pipe = NFC_HCI_LINK_MGMT_PIPE; + break; + case NFC_HCI_ADMIN_GATE: + pipe = NFC_HCI_ADMIN_PIPE; + break; + default: + pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r); + if (pipe == NFC_HCI_INVALID_PIPE) + return r; + pipe_created = true; + break; + } + + r = nfc_hci_open_pipe(hdev, pipe); + if (r < 0) { + if (pipe_created) + if (nfc_hci_delete_pipe(hdev, pipe) < 0) { + /* TODO: Cannot clean by deleting pipe... + * -> inconsistent state */ + } + return r; + } + + hdev->gate2pipe[dest_gate] = pipe; + + return 0; +} +EXPORT_SYMBOL(nfc_hci_connect_gate); diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c new file mode 100644 index 00000000000..86fd00d5a09 --- /dev/null +++ b/net/nfc/hci/core.c @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/nfc.h> + +#include <net/nfc/nfc.h> +#include <net/nfc/hci.h> + +#include "hci.h" + +/* Largest headroom needed for outgoing HCI commands */ +#define HCI_CMDS_HEADROOM 1 + +static void nfc_hci_msg_tx_work(struct work_struct *work) +{ + struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, + msg_tx_work); + struct hci_msg *msg; + struct sk_buff *skb; + int r = 0; + + mutex_lock(&hdev->msg_tx_mutex); + + if (hdev->cmd_pending_msg) { + if (timer_pending(&hdev->cmd_timer) == 0) { + if (hdev->cmd_pending_msg->cb) + hdev->cmd_pending_msg->cb(hdev, + NFC_HCI_ANY_E_TIMEOUT, + NULL, + hdev-> + cmd_pending_msg-> + cb_context); + kfree(hdev->cmd_pending_msg); + hdev->cmd_pending_msg = NULL; + } else + goto exit; + } + +next_msg: + if (list_empty(&hdev->msg_tx_queue)) + goto exit; + + msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l); + list_del(&msg->msg_l); + + pr_debug("msg_tx_queue has a cmd to send\n"); + while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) { + r = hdev->ops->xmit(hdev, skb); + if (r < 0) { + kfree_skb(skb); + skb_queue_purge(&msg->msg_frags); + if (msg->cb) + msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL, + msg->cb_context); + kfree(msg); + break; + } + } + + if (r) + goto next_msg; + + if (msg->wait_response == false) { + kfree(msg); + goto next_msg; + } + + hdev->cmd_pending_msg = msg; + mod_timer(&hdev->cmd_timer, jiffies + + msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay)); + +exit: + mutex_unlock(&hdev->msg_tx_mutex); +} + +static void nfc_hci_msg_rx_work(struct work_struct *work) +{ + struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, + msg_rx_work); + struct sk_buff *skb; + struct hcp_message *message; + u8 pipe; + u8 type; + u8 instruction; + + while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) { + pipe = skb->data[0]; + skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN); + message = (struct hcp_message *)skb->data; + type = HCP_MSG_GET_TYPE(message->header); + instruction = HCP_MSG_GET_CMD(message->header); + skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN); + + nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb); + } +} + +void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb) +{ + mutex_lock(&hdev->msg_tx_mutex); + + if (hdev->cmd_pending_msg == NULL) { + kfree_skb(skb); + goto exit; + } + + del_timer_sync(&hdev->cmd_timer); + + if (hdev->cmd_pending_msg->cb) + hdev->cmd_pending_msg->cb(hdev, result, skb, + hdev->cmd_pending_msg->cb_context); + else + kfree_skb(skb); + + kfree(hdev->cmd_pending_msg); + hdev->cmd_pending_msg = NULL; + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); + +exit: + mutex_unlock(&hdev->msg_tx_mutex); +} + +void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static u32 nfc_hci_sak_to_protocol(u8 sak) +{ + switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { + case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: + return NFC_PROTO_MIFARE_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_ISO14443: + return NFC_PROTO_ISO14443_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_DEP: + return NFC_PROTO_NFC_DEP_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP: + return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK; + default: + return 0xffffffff; + } +} + +static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) +{ + struct nfc_target *targets; + struct sk_buff *atqa_skb = NULL; + struct sk_buff *sak_skb = NULL; + int r; + + pr_debug("from gate %d\n", gate); + + targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); + if (targets == NULL) + return -ENOMEM; + + switch (gate) { + case NFC_HCI_RF_READER_A_GATE: + r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_RF_READER_A_ATQA, &atqa_skb); + if (r < 0) + goto exit; + + r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_RF_READER_A_SAK, &sak_skb); + if (r < 0) + goto exit; + + if (atqa_skb->len != 2 || sak_skb->len != 1) { + r = -EPROTO; + goto exit; + } + + targets->supported_protocols = + nfc_hci_sak_to_protocol(sak_skb->data[0]); + if (targets->supported_protocols == 0xffffffff) { + r = -EPROTO; + goto exit; + } + + targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data); + targets->sel_res = sak_skb->data[0]; + + if (hdev->ops->complete_target_discovered) { + r = hdev->ops->complete_target_discovered(hdev, gate, + targets); + if (r < 0) + goto exit; + } + break; + case NFC_HCI_RF_READER_B_GATE: + targets->supported_protocols = NFC_PROTO_ISO14443_MASK; + break; + default: + if (hdev->ops->target_from_gate) + r = hdev->ops->target_from_gate(hdev, gate, targets); + else + r = -EPROTO; + if (r < 0) + goto exit; + + if (hdev->ops->complete_target_discovered) { + r = hdev->ops->complete_target_discovered(hdev, gate, + targets); + if (r < 0) + goto exit; + } + break; + } + + targets->hci_reader_gate = gate; + + r = nfc_targets_found(hdev->ndev, targets, 1); + if (r < 0) + goto exit; + + kfree(hdev->targets); + hdev->targets = targets; + targets = NULL; + hdev->target_count = 1; + +exit: + kfree(targets); + kfree_skb(atqa_skb); + kfree_skb(sak_skb); + + return r; +} + +void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, + struct sk_buff *skb) +{ + int r = 0; + + switch (event) { + case NFC_HCI_EVT_TARGET_DISCOVERED: + if (hdev->poll_started == false) { + r = -EPROTO; + goto exit; + } + + if (skb->len < 1) { /* no status data? */ + r = -EPROTO; + goto exit; + } + + if (skb->data[0] == 3) { + /* TODO: Multiple targets in field, none activated + * poll is supposedly stopped, but there is no + * single target to activate, so nothing to report + * up. + * if we need to restart poll, we must save the + * protocols from the initial poll and reuse here. + */ + } + + if (skb->data[0] != 0) { + r = -EPROTO; + goto exit; + } + + r = nfc_hci_target_discovered(hdev, + nfc_hci_pipe2gate(hdev, pipe)); + break; + default: + /* TODO: Unknown events are hardware specific + * pass them to the driver (needs a new hci_ops) */ + break; + } + +exit: + kfree_skb(skb); + + if (r) { + /* TODO: There was an error dispatching the event, + * how to propagate up to nfc core? + */ + } +} + +static void nfc_hci_cmd_timeout(unsigned long data) +{ + struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data; + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); +} + +static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count, + u8 gates[]) +{ + int r; + u8 *p = gates; + while (gate_count--) { + r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p); + if (r < 0) + return r; + p++; + } + + return 0; +} + +static int hci_dev_session_init(struct nfc_hci_dev *hdev) +{ + struct sk_buff *skb = NULL; + int r; + u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */ + NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE, + NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE, + NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE + }; + + r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, + NFC_HCI_ADMIN_GATE); + if (r < 0) + goto exit; + + r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_SESSION_IDENTITY, &skb); + if (r < 0) + goto disconnect_all; + + if (skb->len && skb->len == strlen(hdev->init_data.session_id)) + if (memcmp(hdev->init_data.session_id, skb->data, + skb->len) == 0) { + /* TODO ELa: restore gate<->pipe table from + * some TBD location. + * note: it doesn't seem possible to get the chip + * currently open gate/pipe table. + * It is only possible to obtain the supported + * gate list. + */ + + /* goto exit + * For now, always do a full initialization */ + } + + r = nfc_hci_disconnect_all_gates(hdev); + if (r < 0) + goto exit; + + r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates); + if (r < 0) + goto disconnect_all; + + r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, + hdev->init_data.gates); + if (r < 0) + goto disconnect_all; + + r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_SESSION_IDENTITY, + hdev->init_data.session_id, + strlen(hdev->init_data.session_id)); + if (r == 0) + goto exit; + +disconnect_all: + nfc_hci_disconnect_all_gates(hdev); + +exit: + if (skb) + kfree_skb(skb); + + return r; +} + +static int hci_dev_version(struct nfc_hci_dev *hdev) +{ + int r; + struct sk_buff *skb; + + r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, + NFC_HCI_ID_MGMT_VERSION_SW, &skb); + if (r < 0) + return r; + + if (skb->len != 3) { + kfree_skb(skb); + return -EINVAL; + } + + hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4; + hdev->sw_patch = skb->data[0] & 0x0f; + hdev->sw_flashlib_major = skb->data[1]; + hdev->sw_flashlib_minor = skb->data[2]; + + kfree_skb(skb); + + r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, + NFC_HCI_ID_MGMT_VERSION_HW, &skb); + if (r < 0) + return r; + + if (skb->len != 3) { + kfree_skb(skb); + return -EINVAL; + } + + hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5; + hdev->hw_version = skb->data[0] & 0x1f; + hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6; + hdev->hw_software = skb->data[1] & 0x3f; + hdev->hw_bsid = skb->data[2]; + + kfree_skb(skb); + + pr_info("SOFTWARE INFO:\n"); + pr_info("RomLib : %d\n", hdev->sw_romlib); + pr_info("Patch : %d\n", hdev->sw_patch); + pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major); + pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor); + pr_info("HARDWARE INFO:\n"); + pr_info("Derivative : %d\n", hdev->hw_derivative); + pr_info("HW Version : %d\n", hdev->hw_version); + pr_info("#MPW : %d\n", hdev->hw_mpw); + pr_info("Software : %d\n", hdev->hw_software); + pr_info("BSID Version : %d\n", hdev->hw_bsid); + + return 0; +} + +static int hci_dev_up(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r = 0; + + if (hdev->ops->open) { + r = hdev->ops->open(hdev); + if (r < 0) + return r; + } + + r = hci_dev_session_init(hdev); + if (r < 0) + goto exit; + + r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_END_OPERATION, NULL, 0); + if (r < 0) + goto exit; + + if (hdev->ops->hci_ready) { + r = hdev->ops->hci_ready(hdev); + if (r < 0) + goto exit; + } + + r = hci_dev_version(hdev); + if (r < 0) + goto exit; + +exit: + if (r < 0) + if (hdev->ops->close) + hdev->ops->close(hdev); + return r; +} + +static int hci_dev_down(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->close) + hdev->ops->close(hdev); + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return 0; +} + +static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r; + + if (hdev->ops->start_poll) + r = hdev->ops->start_poll(hdev, protocols); + else + r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_READER_REQUESTED, NULL, 0); + if (r == 0) + hdev->poll_started = true; + + return r; +} + +static void hci_stop_poll(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->poll_started) { + nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_END_OPERATION, NULL, 0); + hdev->poll_started = false; + } +} + +static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev, + u32 target_idx) +{ + int i; + if (hdev->poll_started == false || hdev->targets == NULL) + return NULL; + + for (i = 0; i < hdev->target_count; i++) { + if (hdev->targets[i].idx == target_idx) + return &hdev->targets[i]; + } + + return NULL; +} + +static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, + u32 protocol) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hci_find_target(hdev, target_idx) == NULL) + return -ENOMEDIUM; + + return 0; +} + +static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) +{ +} + +static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, + struct sk_buff *skb, data_exchange_cb_t cb, + void *cb_context) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r; + struct nfc_target *target; + struct sk_buff *res_skb = NULL; + + pr_debug("target_idx=%d\n", target_idx); + + target = hci_find_target(hdev, target_idx); + if (target == NULL) + return -ENOMEDIUM; + + switch (target->hci_reader_gate) { + case NFC_HCI_RF_READER_A_GATE: + case NFC_HCI_RF_READER_B_GATE: + if (hdev->ops->data_exchange) { + r = hdev->ops->data_exchange(hdev, target, skb, + &res_skb); + if (r <= 0) /* handled */ + break; + } + + *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */ + r = nfc_hci_send_cmd(hdev, target->hci_reader_gate, + NFC_HCI_WR_XCHG_DATA, + skb->data, skb->len, &res_skb); + /* + * TODO: Check RF Error indicator to make sure data is valid. + * It seems that HCI cmd can complete without error, but data + * can be invalid if an RF error occured? Ignore for now. + */ + if (r == 0) + skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */ + break; + default: + if (hdev->ops->data_exchange) { + r = hdev->ops->data_exchange(hdev, target, skb, + &res_skb); + if (r == 1) + r = -ENOTSUPP; + } + else + r = -ENOTSUPP; + } + + kfree_skb(skb); + + cb(cb_context, res_skb, r); + + return 0; +} + +struct nfc_ops hci_nfc_ops = { + .dev_up = hci_dev_up, + .dev_down = hci_dev_down, + .start_poll = hci_start_poll, + .stop_poll = hci_stop_poll, + .activate_target = hci_activate_target, + .deactivate_target = hci_deactivate_target, + .data_exchange = hci_data_exchange, +}; + +struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, + int tx_tailroom, + int max_link_payload) +{ + struct nfc_hci_dev *hdev; + + if (ops->xmit == NULL) + return NULL; + + if (protocols == 0) + return NULL; + + hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL); + if (hdev == NULL) + return NULL; + + hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, + tx_headroom + HCI_CMDS_HEADROOM, + tx_tailroom); + if (!hdev->ndev) { + kfree(hdev); + return NULL; + } + + hdev->ops = ops; + hdev->max_data_link_payload = max_link_payload; + hdev->init_data = *init_data; + + nfc_set_drvdata(hdev->ndev, hdev); + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return hdev; +} +EXPORT_SYMBOL(nfc_hci_allocate_device); + +void nfc_hci_free_device(struct nfc_hci_dev *hdev) +{ + nfc_free_device(hdev->ndev); + kfree(hdev); +} +EXPORT_SYMBOL(nfc_hci_free_device); + +int nfc_hci_register_device(struct nfc_hci_dev *hdev) +{ + struct device *dev = &hdev->ndev->dev; + const char *devname = dev_name(dev); + char name[32]; + int r = 0; + + mutex_init(&hdev->msg_tx_mutex); + + INIT_LIST_HEAD(&hdev->msg_tx_queue); + + INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work); + snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname); + hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (hdev->msg_tx_wq == NULL) { + r = -ENOMEM; + goto exit; + } + + init_timer(&hdev->cmd_timer); + hdev->cmd_timer.data = (unsigned long)hdev; + hdev->cmd_timer.function = nfc_hci_cmd_timeout; + + skb_queue_head_init(&hdev->rx_hcp_frags); + + INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work); + snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname); + hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (hdev->msg_rx_wq == NULL) { + r = -ENOMEM; + goto exit; + } + + skb_queue_head_init(&hdev->msg_rx_queue); + + r = nfc_register_device(hdev->ndev); + +exit: + if (r < 0) { + if (hdev->msg_tx_wq) + destroy_workqueue(hdev->msg_tx_wq); + if (hdev->msg_rx_wq) + destroy_workqueue(hdev->msg_rx_wq); + } + + return r; +} +EXPORT_SYMBOL(nfc_hci_register_device); + +void nfc_hci_unregister_device(struct nfc_hci_dev *hdev) +{ + struct hci_msg *msg; + + skb_queue_purge(&hdev->rx_hcp_frags); + skb_queue_purge(&hdev->msg_rx_queue); + + while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, + msg_l)) != NULL) { + list_del(&msg->msg_l); + skb_queue_purge(&msg->msg_frags); + kfree(msg); + } + + del_timer_sync(&hdev->cmd_timer); + + nfc_unregister_device(hdev->ndev); + + destroy_workqueue(hdev->msg_tx_wq); + + destroy_workqueue(hdev->msg_rx_wq); +} +EXPORT_SYMBOL(nfc_hci_unregister_device); + +void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata) +{ + hdev->clientdata = clientdata; +} +EXPORT_SYMBOL(nfc_hci_set_clientdata); + +void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev) +{ + return hdev->clientdata; +} +EXPORT_SYMBOL(nfc_hci_get_clientdata); + +void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) +{ + struct hcp_packet *packet; + u8 type; + u8 instruction; + struct sk_buff *hcp_skb; + u8 pipe; + struct sk_buff *frag_skb; + int msg_len; + + if (skb == NULL) { + /* TODO ELa: lower layer had permanent failure, need to + * propagate that up + */ + + skb_queue_purge(&hdev->rx_hcp_frags); + + return; + } + + packet = (struct hcp_packet *)skb->data; + if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) { + skb_queue_tail(&hdev->rx_hcp_frags, skb); + return; + } + + /* it's the last fragment. Does it need re-aggregation? */ + if (skb_queue_len(&hdev->rx_hcp_frags)) { + pipe = packet->header & NFC_HCI_FRAGMENT; + skb_queue_tail(&hdev->rx_hcp_frags, skb); + + msg_len = 0; + skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { + msg_len += (frag_skb->len - + NFC_HCI_HCP_PACKET_HEADER_LEN); + } + + hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN + + msg_len, GFP_KERNEL); + if (hcp_skb == NULL) { + /* TODO ELa: cannot deliver HCP message. How to + * propagate error up? + */ + } + + *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe; + + skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { + msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN; + memcpy(skb_put(hcp_skb, msg_len), + frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN, + msg_len); + } + + skb_queue_purge(&hdev->rx_hcp_frags); + } else { + packet->header &= NFC_HCI_FRAGMENT; + hcp_skb = skb; + } + + /* if this is a response, dispatch immediately to + * unblock waiting cmd context. Otherwise, enqueue to dispatch + * in separate context where handler can also execute command. + */ + packet = (struct hcp_packet *)hcp_skb->data; + type = HCP_MSG_GET_TYPE(packet->message.header); + if (type == NFC_HCI_HCP_RESPONSE) { + pipe = packet->header; + instruction = HCP_MSG_GET_CMD(packet->message.header); + skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN + + NFC_HCI_HCP_MESSAGE_HEADER_LEN); + nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb); + } else { + skb_queue_tail(&hdev->msg_rx_queue, hcp_skb); + queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work); + } +} +EXPORT_SYMBOL(nfc_hci_recv_frame); + +MODULE_LICENSE("GPL"); diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h new file mode 100644 index 00000000000..45f2fe4fd48 --- /dev/null +++ b/net/nfc/hci/hci.h @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LOCAL_HCI_H +#define __LOCAL_HCI_H + +struct gate_pipe_map { + u8 gate; + u8 pipe; +}; + +struct hcp_message { + u8 header; /* type -cmd,evt,rsp- + instruction */ + u8 data[]; +} __packed; + +struct hcp_packet { + u8 header; /* cbit+pipe */ + struct hcp_message message; +} __packed; + +/* + * HCI command execution completion callback. + * result will be one of the HCI response codes. + * skb contains the response data and must be disposed. + */ +typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb, void *cb_data); + +struct hcp_exec_waiter { + wait_queue_head_t *wq; + bool exec_complete; + int exec_result; + struct sk_buff *result_skb; +}; + +struct hci_msg { + struct list_head msg_l; + struct sk_buff_head msg_frags; + bool wait_response; + hci_cmd_cb_t cb; + void *cb_context; + unsigned long completion_delay; +}; + +struct hci_create_pipe_params { + u8 src_gate; + u8 dest_host; + u8 dest_gate; +} __packed; + +struct hci_create_pipe_resp { + u8 src_host; + u8 src_gate; + u8 dest_host; + u8 dest_gate; + u8 pipe; +} __packed; + +#define NFC_HCI_FRAGMENT 0x7f + +#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f)) +#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6) +#define HCP_MSG_GET_CMD(header) (header & 0x3f) + +int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, + u8 type, u8 instruction, + const u8 *payload, size_t payload_len, + hci_cmd_cb_t cb, void *cb_data, + unsigned long completion_delay); + +u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe); + +void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, + u8 instruction, struct sk_buff *skb); + +/* HCP headers */ +#define NFC_HCI_HCP_PACKET_HEADER_LEN 1 +#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1 +#define NFC_HCI_HCP_HEADER_LEN 2 + +/* HCP types */ +#define NFC_HCI_HCP_COMMAND 0x00 +#define NFC_HCI_HCP_EVENT 0x01 +#define NFC_HCI_HCP_RESPONSE 0x02 + +/* Generic commands */ +#define NFC_HCI_ANY_SET_PARAMETER 0x01 +#define NFC_HCI_ANY_GET_PARAMETER 0x02 +#define NFC_HCI_ANY_OPEN_PIPE 0x03 +#define NFC_HCI_ANY_CLOSE_PIPE 0x04 + +/* Reader RF commands */ +#define NFC_HCI_WR_XCHG_DATA 0x10 + +/* Admin commands */ +#define NFC_HCI_ADM_CREATE_PIPE 0x10 +#define NFC_HCI_ADM_DELETE_PIPE 0x11 +#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12 +#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13 +#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14 +#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15 + +/* Generic responses */ +#define NFC_HCI_ANY_OK 0x00 +#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01 +#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02 +#define NFC_HCI_ANY_E_NOK 0x03 +#define NFC_HCI_ANY_E_PIPES_FULL 0x04 +#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05 +#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06 +#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07 +#define NFC_HCI_ANY_E_INHIBITED 0x08 +#define NFC_HCI_ANY_E_TIMEOUT 0x09 +#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a +#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b + +/* Pipes */ +#define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_LINK_MGMT_PIPE 0x00 +#define NFC_HCI_ADMIN_PIPE 0x01 + +#endif /* __LOCAL_HCI_H */ diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c new file mode 100644 index 00000000000..7212cf2c578 --- /dev/null +++ b/net/nfc/hci/hcp.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <net/nfc/hci.h> + +#include "hci.h" + +/* + * Payload is the HCP message data only. Instruction will be prepended. + * Guarantees that cb will be called upon completion or timeout delay + * counted from the moment the cmd is sent to the transport. + */ +int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, + u8 type, u8 instruction, + const u8 *payload, size_t payload_len, + hci_cmd_cb_t cb, void *cb_data, + unsigned long completion_delay) +{ + struct nfc_dev *ndev = hdev->ndev; + struct hci_msg *cmd; + const u8 *ptr = payload; + int hci_len, err; + bool firstfrag = true; + + cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + INIT_LIST_HEAD(&cmd->msg_l); + skb_queue_head_init(&cmd->msg_frags); + cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false; + cmd->cb = cb; + cmd->cb_context = cb_data; + cmd->completion_delay = completion_delay; + + hci_len = payload_len + 1; + while (hci_len > 0) { + struct sk_buff *skb; + int skb_len, data_link_len; + struct hcp_packet *packet; + + if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <= + hdev->max_data_link_payload) + data_link_len = hci_len; + else + data_link_len = hdev->max_data_link_payload - + NFC_HCI_HCP_PACKET_HEADER_LEN; + + skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN + + data_link_len + ndev->tx_tailroom; + hci_len -= data_link_len; + + skb = alloc_skb(skb_len, GFP_KERNEL); + if (skb == NULL) { + err = -ENOMEM; + goto out_skb_err; + } + skb_reserve(skb, ndev->tx_headroom); + + skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len); + + /* Only the last fragment will have the cb bit set to 1 */ + packet = (struct hcp_packet *)skb->data; + packet->header = pipe; + if (firstfrag) { + firstfrag = false; + packet->message.header = HCP_HEADER(type, instruction); + if (ptr) { + memcpy(packet->message.data, ptr, + data_link_len - 1); + ptr += data_link_len - 1; + } + } else { + memcpy(&packet->message, ptr, data_link_len); + ptr += data_link_len; + } + + /* This is the last fragment, set the cb bit */ + if (hci_len == 0) + packet->header |= ~NFC_HCI_FRAGMENT; + + skb_queue_tail(&cmd->msg_frags, skb); + } + + mutex_lock(&hdev->msg_tx_mutex); + list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l); + mutex_unlock(&hdev->msg_tx_mutex); + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); + + return 0; + +out_skb_err: + skb_queue_purge(&cmd->msg_frags); + kfree(cmd); + + return err; +} + +u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe) +{ + int gate; + + for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++) + if (hdev->gate2pipe[gate] == pipe) + return gate; + + return 0xff; +} + +/* + * Receive hcp message for pipe, with type and cmd. + * skb contains optional message data only. + */ +void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, + u8 instruction, struct sk_buff *skb) +{ + switch (type) { + case NFC_HCI_HCP_RESPONSE: + nfc_hci_resp_received(hdev, instruction, skb); + break; + case NFC_HCI_HCP_COMMAND: + nfc_hci_cmd_received(hdev, pipe, instruction, skb); + break; + case NFC_HCI_HCP_EVENT: + nfc_hci_event_received(hdev, pipe, instruction, skb); + break; + default: + pr_err("UNKNOWN MSG Type %d, instruction=%d\n", + type, instruction); + kfree_skb(skb); + break; + } +} diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c new file mode 100644 index 00000000000..923bdf7c26d --- /dev/null +++ b/net/nfc/hci/shdlc.c @@ -0,0 +1,945 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__ + +#include <linux/sched.h> +#include <linux/export.h> +#include <linux/wait.h> +#include <linux/crc-ccitt.h> +#include <linux/slab.h> +#include <linux/skbuff.h> + +#include <net/nfc/hci.h> +#include <net/nfc/shdlc.h> + +#define SHDLC_LLC_HEAD_ROOM 2 +#define SHDLC_LLC_TAIL_ROOM 2 + +#define SHDLC_MAX_WINDOW 4 +#define SHDLC_SREJ_SUPPORT false + +#define SHDLC_CONTROL_HEAD_MASK 0xe0 +#define SHDLC_CONTROL_HEAD_I 0x80 +#define SHDLC_CONTROL_HEAD_I2 0xa0 +#define SHDLC_CONTROL_HEAD_S 0xc0 +#define SHDLC_CONTROL_HEAD_U 0xe0 + +#define SHDLC_CONTROL_NS_MASK 0x38 +#define SHDLC_CONTROL_NR_MASK 0x07 +#define SHDLC_CONTROL_TYPE_MASK 0x18 + +#define SHDLC_CONTROL_M_MASK 0x1f + +enum sframe_type { + S_FRAME_RR = 0x00, + S_FRAME_REJ = 0x01, + S_FRAME_RNR = 0x02, + S_FRAME_SREJ = 0x03 +}; + +enum uframe_modifier { + U_FRAME_UA = 0x06, + U_FRAME_RSET = 0x19 +}; + +#define SHDLC_CONNECT_VALUE_MS 5 +#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4) +#define SHDLC_T2_VALUE_MS 300 + +#define SHDLC_DUMP_SKB(info, skb) \ +do { \ + pr_debug("%s:\n", info); \ + print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \ + 16, 1, skb->data, skb->len, 0); \ +} while (0) + +/* checks x < y <= z modulo 8 */ +static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z) +{ + if (x < z) + return ((x < y) && (y <= z)) ? true : false; + else + return ((y > x) || (y <= z)) ? true : false; +} + +/* checks x <= y < z modulo 8 */ +static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z) +{ + if (x <= z) + return ((x <= y) && (y < z)) ? true : false; + else /* x > z -> z+8 > x */ + return ((y >= x) || (y < z)) ? true : false; +} + +static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc, + int payload_len) +{ + struct sk_buff *skb; + + skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM + + shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM + + payload_len, GFP_KERNEL); + if (skb) + skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM); + + return skb; +} + +static void nfc_shdlc_add_len_crc(struct sk_buff *skb) +{ + u16 crc; + int len; + + len = skb->len + 2; + *skb_push(skb, 1) = len; + + crc = crc_ccitt(0xffff, skb->data, skb->len); + crc = ~crc; + *skb_put(skb, 1) = crc & 0xff; + *skb_put(skb, 1) = crc >> 8; +} + +/* immediately sends an S frame. */ +static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc, + enum sframe_type sframe_type, int nr) +{ + int r; + struct sk_buff *skb; + + pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr); + + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb == NULL) + return -ENOMEM; + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr; + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + + kfree_skb(skb); + + return r; +} + +/* immediately sends an U frame. skb may contain optional payload */ +static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, + enum uframe_modifier uframe_modifier) +{ + int r; + + pr_debug("uframe_modifier=%d\n", uframe_modifier); + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier; + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + + kfree_skb(skb); + + return r; +} + +/* + * Free ack_pending frames until y_nr - 1, and reset t2 according to + * the remaining oldest ack_pending frame sent time + */ +static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr) +{ + struct sk_buff *skb; + int dnr = shdlc->dnr; /* MUST initially be < y_nr */ + + pr_debug("release ack pending up to frame %d excluded\n", y_nr); + + while (dnr != y_nr) { + pr_debug("release ack pending frame %d\n", dnr); + + skb = skb_dequeue(&shdlc->ack_pending_q); + kfree_skb(skb); + + dnr = (dnr + 1) % 8; + } + + if (skb_queue_empty(&shdlc->ack_pending_q)) { + if (shdlc->t2_active) { + del_timer_sync(&shdlc->t2_timer); + shdlc->t2_active = false; + + pr_debug + ("All sent frames acked. Stopped T2(retransmit)\n"); + } + } else { + skb = skb_peek(&shdlc->ack_pending_q); + + mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb + + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); + shdlc->t2_active = true; + + pr_debug + ("Start T2(retransmit) for remaining unacked sent frames\n"); + } +} + +/* + * Receive validated frames from lower layer. skb contains HCI payload only. + * Handle according to algorithm at spec:10.8.2 + */ +static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, int ns, int nr) +{ + int x_ns = ns; + int y_nr = nr; + + pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr); + + if (shdlc->state != SHDLC_CONNECTED) + goto exit; + + if (x_ns != shdlc->nr) { + nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr); + goto exit; + } + + if (shdlc->t1_active == false) { + shdlc->t1_active = true; + mod_timer(&shdlc->t1_timer, + msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w))); + pr_debug("(re)Start T1(send ack)\n"); + } + + if (skb->len) { + nfc_hci_recv_frame(shdlc->hdev, skb); + skb = NULL; + } + + shdlc->nr = (shdlc->nr + 1) % 8; + + if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { + nfc_shdlc_reset_t2(shdlc, y_nr); + + shdlc->dnr = y_nr; + } + +exit: + if (skb) + kfree_skb(skb); +} + +static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr) +{ + pr_debug("remote acked up to frame %d excluded\n", y_nr); + + if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { + nfc_shdlc_reset_t2(shdlc, y_nr); + shdlc->dnr = y_nr; + } +} + +static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("ns reset to %d\n", shdlc->dnr); + + while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) { + skb_pull(skb, 2); /* remove len+control */ + skb_trim(skb, skb->len - 2); /* remove crc */ + skb_queue_head(&shdlc->send_q, skb); + } + shdlc->ns = shdlc->dnr; +} + +static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr) +{ + struct sk_buff *skb; + + pr_debug("remote asks retransmition from frame %d\n", y_nr); + + if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { + if (shdlc->t2_active) { + del_timer_sync(&shdlc->t2_timer); + shdlc->t2_active = false; + pr_debug("Stopped T2(retransmit)\n"); + } + + if (shdlc->dnr != y_nr) { + while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) { + skb = skb_dequeue(&shdlc->ack_pending_q); + kfree_skb(skb); + } + } + + nfc_shdlc_requeue_ack_pending(shdlc); + } +} + +/* See spec RR:10.8.3 REJ:10.8.4 */ +static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc, + enum sframe_type s_frame_type, int nr) +{ + struct sk_buff *skb; + + if (shdlc->state != SHDLC_CONNECTED) + return; + + switch (s_frame_type) { + case S_FRAME_RR: + nfc_shdlc_rcv_ack(shdlc, nr); + if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */ + shdlc->rnr = false; + if (shdlc->send_q.qlen == 0) { + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb) + skb_queue_tail(&shdlc->send_q, skb); + } + } + break; + case S_FRAME_REJ: + nfc_shdlc_rcv_rej(shdlc, nr); + break; + case S_FRAME_RNR: + nfc_shdlc_rcv_ack(shdlc, nr); + shdlc->rnr = true; + break; + default: + break; + } +} + +static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r) +{ + pr_debug("result=%d\n", r); + + del_timer_sync(&shdlc->connect_timer); + + if (r == 0) { + shdlc->ns = 0; + shdlc->nr = 0; + shdlc->dnr = 0; + + shdlc->state = SHDLC_CONNECTED; + } else { + shdlc->state = SHDLC_DISCONNECTED; + + /* + * TODO: Could it be possible that there are pending + * executing commands that are waiting for connect to complete + * before they can be carried? As connect is a blocking + * operation, it would require that the userspace process can + * send commands on the same device from a second thread before + * the device is up. I don't think that is possible, is it? + */ + } + + shdlc->connect_result = r; + + wake_up(shdlc->connect_wq); +} + +static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("\n"); + + skb = nfc_shdlc_alloc_skb(shdlc, 2); + if (skb == NULL) + return -ENOMEM; + + *skb_put(skb, 1) = SHDLC_MAX_WINDOW; + *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0; + + return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET); +} + +static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("\n"); + + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb == NULL) + return -ENOMEM; + + return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA); +} + +static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, + enum uframe_modifier u_frame_modifier) +{ + u8 w = SHDLC_MAX_WINDOW; + bool srej_support = SHDLC_SREJ_SUPPORT; + int r; + + pr_debug("u_frame_modifier=%d\n", u_frame_modifier); + + switch (u_frame_modifier) { + case U_FRAME_RSET: + if (shdlc->state == SHDLC_NEGOCIATING) { + /* we sent RSET, but chip wants to negociate */ + if (skb->len > 0) + w = skb->data[0]; + + if (skb->len > 1) + srej_support = skb->data[1] & 0x01 ? true : + false; + + if ((w <= SHDLC_MAX_WINDOW) && + (SHDLC_SREJ_SUPPORT || (srej_support == false))) { + shdlc->w = w; + shdlc->srej_support = srej_support; + r = nfc_shdlc_connect_send_ua(shdlc); + nfc_shdlc_connect_complete(shdlc, r); + } + } else if (shdlc->state > SHDLC_NEGOCIATING) { + /* + * TODO: Chip wants to reset link + * send ua, empty skb lists, reset counters + * propagate info to HCI layer + */ + } + break; + case U_FRAME_UA: + if ((shdlc->state == SHDLC_CONNECTING && + shdlc->connect_tries > 0) || + (shdlc->state == SHDLC_NEGOCIATING)) + nfc_shdlc_connect_complete(shdlc, 0); + break; + default: + break; + } + + kfree_skb(skb); +} + +static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + u8 control; + int nr; + int ns; + enum sframe_type s_frame_type; + enum uframe_modifier u_frame_modifier; + + if (shdlc->rcv_q.qlen) + pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen); + + while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) { + control = skb->data[0]; + skb_pull(skb, 1); + switch (control & SHDLC_CONTROL_HEAD_MASK) { + case SHDLC_CONTROL_HEAD_I: + case SHDLC_CONTROL_HEAD_I2: + ns = (control & SHDLC_CONTROL_NS_MASK) >> 3; + nr = control & SHDLC_CONTROL_NR_MASK; + nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr); + break; + case SHDLC_CONTROL_HEAD_S: + s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3; + nr = control & SHDLC_CONTROL_NR_MASK; + nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr); + kfree_skb(skb); + break; + case SHDLC_CONTROL_HEAD_U: + u_frame_modifier = control & SHDLC_CONTROL_M_MASK; + nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier); + break; + default: + pr_err("UNKNOWN Control=%d\n", control); + kfree_skb(skb); + break; + } + } +} + +static int nfc_shdlc_w_used(int ns, int dnr) +{ + int unack_count; + + if (dnr <= ns) + unack_count = ns - dnr; + else + unack_count = 8 - dnr + ns; + + return unack_count; +} + +/* Send frames according to algorithm at spec:10.8.1 */ +static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + int r; + unsigned long time_sent; + + if (shdlc->send_q.qlen) + pr_debug + ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", + shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, + shdlc->rnr == false ? "false" : "true", + shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr), + shdlc->ack_pending_q.qlen); + + while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && + (shdlc->rnr == false)) { + + if (shdlc->t1_active) { + del_timer_sync(&shdlc->t1_timer); + shdlc->t1_active = false; + pr_debug("Stopped T1(send ack)\n"); + } + + skb = skb_dequeue(&shdlc->send_q); + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) | + shdlc->nr; + + pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns, + shdlc->nr); + /* SHDLC_DUMP_SKB("shdlc frame written", skb); */ + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + if (r < 0) { + /* + * TODO: Cannot send, shdlc machine is dead, we + * must propagate the information up to HCI. + */ + shdlc->hard_fault = r; + break; + } + + shdlc->ns = (shdlc->ns + 1) % 8; + + time_sent = jiffies; + *(unsigned long *)skb->cb = time_sent; + + skb_queue_tail(&shdlc->ack_pending_q, skb); + + if (shdlc->t2_active == false) { + shdlc->t2_active = true; + mod_timer(&shdlc->t2_timer, time_sent + + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); + pr_debug("Started T2 (retransmit)\n"); + } + } +} + +static void nfc_shdlc_connect_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_t1_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("SoftIRQ: need to send ack\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_t2_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("SoftIRQ: need to retransmit\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_sm_work(struct work_struct *work) +{ + struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work); + int r; + + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + switch (shdlc->state) { + case SHDLC_DISCONNECTED: + skb_queue_purge(&shdlc->rcv_q); + skb_queue_purge(&shdlc->send_q); + skb_queue_purge(&shdlc->ack_pending_q); + break; + case SHDLC_CONNECTING: + if (shdlc->connect_tries++ < 5) + r = nfc_shdlc_connect_initiate(shdlc); + else + r = -ETIME; + if (r < 0) + nfc_shdlc_connect_complete(shdlc, r); + else { + mod_timer(&shdlc->connect_timer, jiffies + + msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); + + shdlc->state = SHDLC_NEGOCIATING; + } + break; + case SHDLC_NEGOCIATING: + if (timer_pending(&shdlc->connect_timer) == 0) { + shdlc->state = SHDLC_CONNECTING; + queue_work(shdlc->sm_wq, &shdlc->sm_work); + } + + nfc_shdlc_handle_rcv_queue(shdlc); + break; + case SHDLC_CONNECTED: + nfc_shdlc_handle_rcv_queue(shdlc); + nfc_shdlc_handle_send_queue(shdlc); + + if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) { + pr_debug + ("Handle T1(send ack) elapsed (T1 now inactive)\n"); + + shdlc->t1_active = false; + r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR, + shdlc->nr); + if (r < 0) + shdlc->hard_fault = r; + } + + if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) { + pr_debug + ("Handle T2(retransmit) elapsed (T2 inactive)\n"); + + shdlc->t2_active = false; + + nfc_shdlc_requeue_ack_pending(shdlc); + nfc_shdlc_handle_send_queue(shdlc); + } + + if (shdlc->hard_fault) { + /* + * TODO: Handle hard_fault that occured during + * this invocation of the shdlc worker + */ + } + break; + default: + break; + } + mutex_unlock(&shdlc->state_mutex); +} + +/* + * Called from syscall context to establish shdlc link. Sleeps until + * link is ready or failure. + */ +static int nfc_shdlc_connect(struct nfc_shdlc *shdlc) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); + + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + shdlc->state = SHDLC_CONNECTING; + shdlc->connect_wq = &connect_wq; + shdlc->connect_tries = 0; + shdlc->connect_result = 1; + + mutex_unlock(&shdlc->state_mutex); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); + + wait_event(connect_wq, shdlc->connect_result != 1); + + return shdlc->connect_result; +} + +static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc) +{ + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + shdlc->state = SHDLC_DISCONNECTED; + + mutex_unlock(&shdlc->state_mutex); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +/* + * Receive an incoming shdlc frame. Frame has already been crc-validated. + * skb contains only LLC header and payload. + * If skb == NULL, it is a notification that the link below is dead. + */ +void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb) +{ + if (skb == NULL) { + pr_err("NULL Frame -> link is dead\n"); + shdlc->hard_fault = -EREMOTEIO; + } else { + SHDLC_DUMP_SKB("incoming frame", skb); + skb_queue_tail(&shdlc->rcv_q, skb); + } + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} +EXPORT_SYMBOL(nfc_shdlc_recv_frame); + +static int nfc_shdlc_open(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + int r; + + pr_debug("\n"); + + if (shdlc->ops->open) { + r = shdlc->ops->open(shdlc); + if (r < 0) + return r; + } + + r = nfc_shdlc_connect(shdlc); + if (r < 0 && shdlc->ops->close) + shdlc->ops->close(shdlc); + + return r; +} + +static void nfc_shdlc_close(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + nfc_shdlc_disconnect(shdlc); + + if (shdlc->ops->close) + shdlc->ops->close(shdlc); +} + +static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + int r = 0; + + pr_debug("\n"); + + if (shdlc->ops->hci_ready) + r = shdlc->ops->hci_ready(shdlc); + + return r; +} + +static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb); + + skb_queue_tail(&shdlc->send_q, skb); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); + + return 0; +} + +static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + if (shdlc->ops->start_poll) + return shdlc->ops->start_poll(shdlc, protocols); + + return 0; +} + +static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + if (shdlc->ops->target_from_gate) + return shdlc->ops->target_from_gate(shdlc, gate, target); + + return -EPERM; +} + +static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev, + u8 gate, + struct nfc_target *target) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + if (shdlc->ops->complete_target_discovered) + return shdlc->ops->complete_target_discovered(shdlc, gate, + target); + + return 0; +} + +static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev, + struct nfc_target *target, + struct sk_buff *skb, + struct sk_buff **res_skb) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + if (shdlc->ops->data_exchange) + return shdlc->ops->data_exchange(shdlc, target, skb, res_skb); + + return -EPERM; +} + +static struct nfc_hci_ops shdlc_ops = { + .open = nfc_shdlc_open, + .close = nfc_shdlc_close, + .hci_ready = nfc_shdlc_hci_ready, + .xmit = nfc_shdlc_xmit, + .start_poll = nfc_shdlc_start_poll, + .target_from_gate = nfc_shdlc_target_from_gate, + .complete_target_discovered = nfc_shdlc_complete_target_discovered, + .data_exchange = nfc_shdlc_data_exchange, +}; + +struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, int tx_tailroom, + int max_link_payload, const char *devname) +{ + struct nfc_shdlc *shdlc; + int r; + char name[32]; + + if (ops->xmit == NULL) + return NULL; + + shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL); + if (shdlc == NULL) + return NULL; + + mutex_init(&shdlc->state_mutex); + shdlc->ops = ops; + shdlc->state = SHDLC_DISCONNECTED; + + init_timer(&shdlc->connect_timer); + shdlc->connect_timer.data = (unsigned long)shdlc; + shdlc->connect_timer.function = nfc_shdlc_connect_timeout; + + init_timer(&shdlc->t1_timer); + shdlc->t1_timer.data = (unsigned long)shdlc; + shdlc->t1_timer.function = nfc_shdlc_t1_timeout; + + init_timer(&shdlc->t2_timer); + shdlc->t2_timer.data = (unsigned long)shdlc; + shdlc->t2_timer.function = nfc_shdlc_t2_timeout; + + shdlc->w = SHDLC_MAX_WINDOW; + shdlc->srej_support = SHDLC_SREJ_SUPPORT; + + skb_queue_head_init(&shdlc->rcv_q); + skb_queue_head_init(&shdlc->send_q); + skb_queue_head_init(&shdlc->ack_pending_q); + + INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work); + snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname); + shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (shdlc->sm_wq == NULL) + goto err_allocwq; + + shdlc->client_headroom = tx_headroom; + shdlc->client_tailroom = tx_tailroom; + + shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols, + tx_headroom + SHDLC_LLC_HEAD_ROOM, + tx_tailroom + SHDLC_LLC_TAIL_ROOM, + max_link_payload); + if (shdlc->hdev == NULL) + goto err_allocdev; + + nfc_hci_set_clientdata(shdlc->hdev, shdlc); + + r = nfc_hci_register_device(shdlc->hdev); + if (r < 0) + goto err_regdev; + + return shdlc; + +err_regdev: + nfc_hci_free_device(shdlc->hdev); + +err_allocdev: + destroy_workqueue(shdlc->sm_wq); + +err_allocwq: + kfree(shdlc); + + return NULL; +} +EXPORT_SYMBOL(nfc_shdlc_allocate); + +void nfc_shdlc_free(struct nfc_shdlc *shdlc) +{ + pr_debug("\n"); + + /* TODO: Check that this cannot be called while still in use */ + + nfc_hci_unregister_device(shdlc->hdev); + nfc_hci_free_device(shdlc->hdev); + + destroy_workqueue(shdlc->sm_wq); + + skb_queue_purge(&shdlc->rcv_q); + skb_queue_purge(&shdlc->send_q); + skb_queue_purge(&shdlc->ack_pending_q); + + kfree(shdlc); +} +EXPORT_SYMBOL(nfc_shdlc_free); + +void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata) +{ + pr_debug("\n"); + + shdlc->clientdata = clientdata; +} +EXPORT_SYMBOL(nfc_shdlc_set_clientdata); + +void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc) +{ + return shdlc->clientdata; +} +EXPORT_SYMBOL(nfc_shdlc_get_clientdata); + +struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc) +{ + return shdlc->hdev; +} +EXPORT_SYMBOL(nfc_shdlc_get_hci_dev); diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index ef10ffcb4b6..11a3b7d98dc 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c @@ -102,7 +102,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length) length = llcp_tlv_length[type]; if (length == 0 && value_length == 0) return NULL; - else + else if (length == 0) length = value_length; *tlv_length = 2 + length; @@ -248,7 +248,7 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC); + skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC); skb_queue_tail(&local->tx_queue, skb); @@ -416,7 +416,7 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM); + skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM); memcpy(skb_put(skb, 1), &reason, 1); @@ -522,7 +522,7 @@ int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) skb_put(skb, LLCP_SEQUENCE_SIZE); - skb->data[2] = sock->recv_n % 16; + skb->data[2] = sock->recv_n; skb_queue_head(&local->tx_queue, skb); diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 17a578f641f..92988aa620d 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -307,6 +307,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) u8 *gb_cur, *version_tlv, version, version_length; u8 *lto_tlv, lto, lto_length; u8 *wks_tlv, wks_length; + u8 *miux_tlv, miux_length; + __be16 miux; u8 gb_len = 0; version = LLCP_VERSION_11; @@ -316,7 +318,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) /* 1500 ms */ lto = 150; - lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, <o, 1, <o_length); + lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, <o, 1, <o_length); gb_len += lto_length; pr_debug("Local wks 0x%lx\n", local->local_wks); @@ -324,6 +326,11 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) &wks_length); gb_len += wks_length; + miux = cpu_to_be16(LLCP_MAX_MIUX); + miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, + &miux_length); + gb_len += miux_length; + gb_len += ARRAY_SIZE(llcp_magic); if (gb_len > NFC_MAX_GT_LEN) { @@ -345,6 +352,9 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) memcpy(gb_cur, wks_tlv, wks_length); gb_cur += wks_length; + memcpy(gb_cur, miux_tlv, miux_length); + gb_cur += miux_length; + kfree(version_tlv); kfree(lto_tlv); @@ -388,6 +398,9 @@ static void nfc_llcp_tx_work(struct work_struct *work) skb = skb_dequeue(&local->tx_queue); if (skb != NULL) { pr_debug("Sending pending skb\n"); + print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + nfc_data_exchange(local->dev, local->target_idx, skb, nfc_llcp_recv, local); } else { @@ -425,7 +438,7 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu) static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) { - pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16); + pdu->data[2] = (sock->send_n << 4) | (sock->recv_n); sock->send_n = (sock->send_n + 1) % 16; sock->recv_ack_n = (sock->recv_n - 1) % 16; } @@ -814,6 +827,10 @@ static void nfc_llcp_rx_work(struct work_struct *work) pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); + if (ptype != LLCP_PDU_SYMM) + print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + switch (ptype) { case LLCP_PDU_SYMM: pr_debug("SYMM\n"); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 9ec065bb9ee..8737c2089fd 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -477,7 +477,7 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, } if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { - param.rf_discovery_id = target->idx; + param.rf_discovery_id = target->logical_idx; if (protocol == NFC_PROTO_JEWEL) param.rf_protocol = NCI_RF_PROTOCOL_T1T; diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 2e3dee42196..99e1632e6aa 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -227,7 +227,7 @@ static void nci_add_new_target(struct nci_dev *ndev, for (i = 0; i < ndev->n_targets; i++) { target = &ndev->targets[i]; - if (target->idx == ntf->rf_discovery_id) { + if (target->logical_idx == ntf->rf_discovery_id) { /* This target already exists, add the new protocol */ nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, @@ -248,10 +248,10 @@ static void nci_add_new_target(struct nci_dev *ndev, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); if (!rc) { - target->idx = ntf->rf_discovery_id; + target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; - pr_debug("target_idx %d, n_targets %d\n", target->idx, + pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); } } @@ -372,10 +372,11 @@ static void nci_target_auto_activated(struct nci_dev *ndev, if (rc) return; - target->idx = ntf->rf_discovery_id; + target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; - pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets); + pr_debug("logical idx %d, n_targets %d\n", + target->logical_idx, ndev->n_targets); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 6404052d6c0..f1829f6ae9c 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -63,19 +63,23 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, genl_dump_check_consistent(cb, hdr, &nfc_genl_family); - NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols); - NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); - NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); - if (target->nfcid1_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, - target->nfcid1); - if (target->sensb_res_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, - target->sensb_res); - if (target->sensf_res_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, - target->sensf_res); + if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) || + nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) || + nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res)) + goto nla_put_failure; + if (target->nfcid1_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, + target->nfcid1)) + goto nla_put_failure; + if (target->sensb_res_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, + target->sensb_res)) + goto nla_put_failure; + if (target->sensf_res_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, + target->sensf_res)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -170,7 +174,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -183,6 +188,37 @@ free_msg: return -EMSGSIZE; } +int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_EVENT_TARGET_LOST); + if (!hdr) + goto free_msg; + + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + nlmsg_free(msg); + return -EMSGSIZE; +} + int nfc_genl_device_added(struct nfc_dev *dev) { struct sk_buff *msg; @@ -197,10 +233,11 @@ int nfc_genl_device_added(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); - NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || + nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -229,7 +266,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -259,10 +297,11 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, if (cb) genl_dump_check_consistent(cb, hdr, &nfc_genl_family); - NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); - NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || + nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -339,11 +378,14 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - if (rf_mode == NFC_RF_INITIATOR) - NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); - NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode); - NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; + if (rf_mode == NFC_RF_INITIATOR && + nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) + goto nla_put_failure; + if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) || + nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -376,7 +418,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index ec8794c1099..7d589a81942 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -119,6 +119,7 @@ void nfc_genl_data_init(struct nfc_genl_data *genl_data); void nfc_genl_data_exit(struct nfc_genl_data *genl_data); int nfc_genl_targets_found(struct nfc_dev *dev); +int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx); int nfc_genl_device_added(struct nfc_dev *dev); int nfc_genl_device_removed(struct nfc_dev *dev); @@ -127,7 +128,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); int nfc_genl_dep_link_down_event(struct nfc_dev *dev); -struct nfc_dev *nfc_get_device(unsigned idx); +struct nfc_dev *nfc_get_device(unsigned int idx); static inline void nfc_put_device(struct nfc_dev *dev) { diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 5a839ceb2e8..ec1134c9e07 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -92,6 +92,12 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, goto error; } + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { + rc = -EINVAL; + goto error; + } + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e66341ec455..2c74daa5aca 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -786,15 +786,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, tcp_flags = flow->tcp_flags; spin_unlock_bh(&flow->lock); - if (used) - NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); + if (used && + nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) + goto nla_put_failure; - if (stats.n_packets) - NLA_PUT(skb, OVS_FLOW_ATTR_STATS, - sizeof(struct ovs_flow_stats), &stats); + if (stats.n_packets && + nla_put(skb, OVS_FLOW_ATTR_STATS, + sizeof(struct ovs_flow_stats), &stats)) + goto nla_put_failure; - if (tcp_flags) - NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); + if (tcp_flags && + nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) + goto nla_put_failure; /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for @@ -1176,7 +1179,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, goto nla_put_failure; get_dp_stats(dp, &dp_stats); - NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); + if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) + goto nla_put_failure; return genlmsg_end(skb, ovs_header); @@ -1476,14 +1480,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, ovs_header->dp_ifindex = get_dpifindex(vport->dp); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); - NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); + if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || + nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || + nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) || + nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid)) + goto nla_put_failure; ovs_vport_get_stats(vport, &vport_stats); - NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), - &vport_stats); + if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), + &vport_stats)) + goto nla_put_failure; err = ovs_vport_get_options(vport, skb); if (err == -EMSGSIZE) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2a11ec2383e..6d4d8097cf9 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1175,11 +1175,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; - if (swkey->phy.priority) - NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); + if (swkey->phy.priority && + nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + goto nla_put_failure; - if (swkey->phy.in_port != USHRT_MAX) - NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); + if (swkey->phy.in_port != USHRT_MAX && + nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) + goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) @@ -1189,8 +1191,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); - NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + goto nla_put_failure; encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; @@ -1201,7 +1204,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index c1068aed03d..3fd6c0d88e1 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -140,9 +140,9 @@ int ovs_netdev_get_ifindex(const struct vport *vport) return netdev_vport->dev->ifindex; } -static unsigned packet_length(const struct sk_buff *skb) +static unsigned int packet_length(const struct sk_buff *skb) { - unsigned length = skb->len - ETH_HLEN; + unsigned int length = skb->len - ETH_HLEN; if (skb->protocol == htons(ETH_P_8021Q)) length -= VLAN_HLEN; @@ -157,9 +157,9 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb) int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { - if (net_ratelimit()) - pr_warn("%s: dropped over-mtu packet: %d > %d\n", - ovs_dp_name(vport->dp), packet_length(skb), mtu); + net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", + ovs_dp_name(vport->dp), + packet_length(skb), mtu); goto error; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4f2c0df7956..0f661745df0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1654,7 +1654,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, skb->data = skb_head; skb->len = skb_len; } - kfree_skb(skb); + consume_skb(skb); skb = nskb; } @@ -1764,7 +1764,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { - unsigned maclen = skb_network_offset(skb); + unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; @@ -3224,10 +3224,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; - int val; + int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); - void *data; + void *data = &val; struct tpacket_stats st; union tpacket_stats_u st_u; @@ -3242,21 +3242,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, switch (optname) { case PACKET_STATISTICS: - if (po->tp_version == TPACKET_V3) { - len = sizeof(struct tpacket_stats_v3); - } else { - if (len > sizeof(struct tpacket_stats)) - len = sizeof(struct tpacket_stats); - } spin_lock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { + lv = sizeof(struct tpacket_stats_v3); memcpy(&st_u.stats3, &po->stats, - sizeof(struct tpacket_stats)); + sizeof(struct tpacket_stats)); st_u.stats3.tp_freeze_q_cnt = - po->stats_u.stats3.tp_freeze_q_cnt; + po->stats_u.stats3.tp_freeze_q_cnt; st_u.stats3.tp_packets += po->stats.tp_drops; data = &st_u.stats3; } else { + lv = sizeof(struct tpacket_stats); st = po->stats; st.tp_packets += st.tp_drops; data = &st; @@ -3265,31 +3261,16 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, spin_unlock_bh(&sk->sk_receive_queue.lock); break; case PACKET_AUXDATA: - if (len > sizeof(int)) - len = sizeof(int); val = po->auxdata; - - data = &val; break; case PACKET_ORIGDEV: - if (len > sizeof(int)) - len = sizeof(int); val = po->origdev; - - data = &val; break; case PACKET_VNET_HDR: - if (len > sizeof(int)) - len = sizeof(int); val = po->has_vnet_hdr; - - data = &val; break; case PACKET_VERSION: - if (len > sizeof(int)) - len = sizeof(int); val = po->tp_version; - data = &val; break; case PACKET_HDRLEN: if (len > sizeof(int)) @@ -3309,39 +3290,28 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, default: return -EINVAL; } - data = &val; break; case PACKET_RESERVE: - if (len > sizeof(unsigned int)) - len = sizeof(unsigned int); val = po->tp_reserve; - data = &val; break; case PACKET_LOSS: - if (len > sizeof(unsigned int)) - len = sizeof(unsigned int); val = po->tp_loss; - data = &val; break; case PACKET_TIMESTAMP: - if (len > sizeof(int)) - len = sizeof(int); val = po->tp_tstamp; - data = &val; break; case PACKET_FANOUT: - if (len > sizeof(int)) - len = sizeof(int); val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16)) : 0); - data = &val; break; default: return -ENOPROTOOPT; } + if (len > lv) + len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index d65f699fbf3..779ce4ff92e 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -129,7 +129,7 @@ static const struct net_proto_family phonet_proto_family = { /* Phonet device header operations */ static int pn_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { u8 *media = skb_push(skb, 1); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9726fe684ab..9dd4f926f7d 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -273,7 +273,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) hdr = pnp_hdr(skb); if (hdr->data[0] != PN_PEP_TYPE_COMMON) { LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", - (unsigned)hdr->data[0]); + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } @@ -305,7 +305,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) default: LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", - (unsigned)hdr->data[1]); + (unsigned int)hdr->data[1]); return -EOPNOTSUPP; } if (wake) @@ -478,9 +478,9 @@ static void pipe_destruct(struct sock *sk) skb_queue_purge(&pn->ctrlreq_queue); } -static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n) +static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n) { - unsigned i; + unsigned int i; u8 final_fc = PN_NO_FLOW_CONTROL; for (i = 0; i < n; i++) { diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index bf5cf69c820..36f75a9e2c3 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -44,7 +44,7 @@ struct phonet_net { struct phonet_routes routes; }; -int phonet_net_id __read_mostly; +static int phonet_net_id __read_mostly; static struct phonet_net *phonet_pernet(struct net *net) { @@ -268,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev) static void phonet_route_autodel(struct net_device *dev) { struct phonet_net *pnn = phonet_pernet(dev_net(dev)); - unsigned i; + unsigned int i; DECLARE_BITMAP(deleted, 64); /* Remove left-over Phonet routes */ diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index d61f6761777..cfdf135fcd6 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; ifm->ifa_index = dev->ifindex; - NLA_PUT_U8(skb, IFA_LOCAL, addr); + if (nla_put_u8(skb, IFA_LOCAL, addr)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - NLA_PUT_U8(skb, RTA_DST, dst); - NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); + if (nla_put_u8(skb, RTA_DST, dst) || + nla_put_u32(skb, RTA_OIF, dev->ifindex)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 4c7eff30dfa..89cfa9ce493 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -58,7 +58,7 @@ static struct { void __init pn_sock_init(void) { - unsigned i; + unsigned int i; for (i = 0; i < PN_HASHSIZE; i++) INIT_HLIST_HEAD(pnsocks.hlist + i); @@ -116,7 +116,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) { struct hlist_head *hlist = pnsocks.hlist; - unsigned h; + unsigned int h; rcu_read_lock(); for (h = 0; h < PN_HASHSIZE; h++) { @@ -545,7 +545,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) struct hlist_head *hlist = pnsocks.hlist; struct hlist_node *node; struct sock *sknode; - unsigned h; + unsigned int h; for (h = 0; h < PN_HASHSIZE; h++) { sk_for_each_rcu(sknode, node, hlist) { @@ -710,7 +710,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res) void pn_sock_unbind_all_res(struct sock *sk) { - unsigned res, match = 0; + unsigned int res, match = 0; mutex_lock(&resource_mutex); for (res = 0; res < 256; res++) { @@ -732,7 +732,7 @@ void pn_sock_unbind_all_res(struct sock *sk) static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) { struct net *net = seq_file_net(seq); - unsigned i; + unsigned int i; if (!net_eq(net, &init_net)) return NULL; @@ -750,7 +750,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) { struct net *net = seq_file_net(seq); - unsigned i; + unsigned int i; BUG_ON(!net_eq(net, &init_net)); diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index cea1c7dbdae..696348fd31a 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c @@ -27,6 +27,10 @@ #include <linux/errno.h> #include <linux/init.h> +#include <net/sock.h> +#include <linux/phonet.h> +#include <net/phonet/phonet.h> + #define DYNAMIC_PORT_MIN 0x40 #define DYNAMIC_PORT_MAX 0x7f @@ -46,7 +50,8 @@ static void set_local_port_range(int range[2]) void phonet_get_local_port_range(int *min, int *max) { - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&local_port_range_lock); if (min) @@ -93,19 +98,13 @@ static struct ctl_table phonet_table[] = { { } }; -static struct ctl_path phonet_ctl_path[] = { - { .procname = "net", }, - { .procname = "phonet", }, - { }, -}; - int __init phonet_sysctl_init(void) { - phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); + phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } void phonet_sysctl_exit(void) { - unregister_sysctl_table(phonet_table_hrd); + unregister_net_sysctl_table(phonet_table_hrd); } diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index 1253b006efd..7e643bafb4a 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c @@ -106,22 +106,15 @@ static ctl_table rds_ib_sysctl_table[] = { { } }; -static struct ctl_path rds_ib_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { .procname = "ib", }, - { } -}; - void rds_ib_sysctl_exit(void) { if (rds_ib_sysctl_hdr) - unregister_sysctl_table(rds_ib_sysctl_hdr); + unregister_net_sysctl_table(rds_ib_sysctl_hdr); } int rds_ib_sysctl_init(void) { - rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); + rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table); if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c index e2e47176e72..5d5ebd576f3 100644 --- a/net/rds/iw_sysctl.c +++ b/net/rds/iw_sysctl.c @@ -109,22 +109,15 @@ static ctl_table rds_iw_sysctl_table[] = { { } }; -static struct ctl_path rds_iw_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { .procname = "iw", }, - { } -}; - void rds_iw_sysctl_exit(void) { if (rds_iw_sysctl_hdr) - unregister_sysctl_table(rds_iw_sysctl_hdr); + unregister_net_sysctl_table(rds_iw_sysctl_hdr); } int rds_iw_sysctl_init(void) { - rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); + rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table); if (!rds_iw_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index 25ad0c77a26..907214b4c4d 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -92,17 +92,10 @@ static ctl_table rds_sysctl_rds_table[] = { { } }; -static struct ctl_path rds_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { } -}; - - void rds_sysctl_exit(void) { if (rds_sysctl_reg_table) - unregister_sysctl_table(rds_sysctl_reg_table); + unregister_net_sysctl_table(rds_sysctl_reg_table); } int rds_sysctl_init(void) @@ -110,7 +103,7 @@ int rds_sysctl_init(void) rds_sysctl_reconnect_min = msecs_to_jiffies(1); rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; - rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); + rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table); if (!rds_sysctl_reg_table) return -ENOMEM; return 0; diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 8b5cc4aa886..72981375f47 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -145,7 +145,7 @@ int rds_tcp_listen_init(void) if (ret < 0) goto out; - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; rds_tcp_nonagle(sock); write_lock_bh(&sock->sk->sk_callback_lock); diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 906cc05bba6..28dbdb911b8 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -37,7 +37,7 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 47f1fdb346b..7ca57741b2f 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c @@ -399,7 +399,7 @@ int rose_parse_facilities(unsigned char *p, unsigned packet_len, facilities_len = *p++; - if (facilities_len == 0 || (unsigned)facilities_len > packet_len) + if (facilities_len == 0 || (unsigned int)facilities_len > packet_len) return 0; while (facilities_len >= 3 && *p == 0x00) { diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c index df6d9dac218..94ca9c2ccd6 100644 --- a/net/rose/sysctl_net_rose.c +++ b/net/rose/sysctl_net_rose.c @@ -118,18 +118,12 @@ static ctl_table rose_table[] = { { } }; -static struct ctl_path rose_path[] = { - { .procname = "net", }, - { .procname = "rose", }, - { } -}; - void __init rose_register_sysctl(void) { - rose_table_header = register_sysctl_paths(rose_path, rose_table); + rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table); } void rose_unregister_sysctl(void) { - unregister_sysctl_table(rose_table_header); + unregister_net_sysctl_table(rose_table_header); } diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 74c064c0dfd..05996d0dd82 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -26,7 +26,7 @@ MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_RXRPC); -unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; +unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "RxRPC debugging mask"); @@ -513,7 +513,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); - unsigned min_sec_level; + unsigned int min_sec_level; int ret; _enter(",%d,%d,,%d", level, optname, optlen); @@ -555,13 +555,13 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, case RXRPC_MIN_SECURITY_LEVEL: ret = -EINVAL; - if (optlen != sizeof(unsigned)) + if (optlen != sizeof(unsigned int)) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = get_user(min_sec_level, - (unsigned __user *) optval); + (unsigned int __user *) optval); if (ret < 0) goto error; ret = -EINVAL; diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index c3126e864f3..e4d9cbcff40 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -19,7 +19,7 @@ #include <net/af_rxrpc.h> #include "ar-internal.h" -static unsigned rxrpc_ack_defer = 1; +static unsigned int rxrpc_ack_defer = 1; static const char *const rxrpc_acks[] = { "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", @@ -548,11 +548,11 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call) * process the extra information that may be appended to an ACK packet */ static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, - unsigned latest, int nAcks) + unsigned int latest, int nAcks) { struct rxrpc_ackinfo ackinfo; struct rxrpc_peer *peer; - unsigned mtu; + unsigned int mtu; if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) { _leave(" [no ackinfo]"); diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index bf656c230ba..a3bbb360a3f 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -38,8 +38,8 @@ const char *const rxrpc_call_states[] = { struct kmem_cache *rxrpc_call_jar; LIST_HEAD(rxrpc_calls); DEFINE_RWLOCK(rxrpc_call_lock); -static unsigned rxrpc_call_max_lifetime = 60; -static unsigned rxrpc_dead_call_timeout = 2; +static unsigned int rxrpc_call_max_lifetime = 60; +static unsigned int rxrpc_dead_call_timeout = 2; static void rxrpc_destroy_call(struct work_struct *work); static void rxrpc_call_life_expired(unsigned long _call); diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 1a2b0633fec..529572f18d1 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -76,7 +76,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, * --ANK */ // ret = -ENOBUFS; // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= -// (unsigned) sk->sk_rcvbuf) +// (unsigned int) sk->sk_rcvbuf) // goto out; ret = sk_filter(sk, skb); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 8e22bd345e7..a693aca2ae2 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -83,7 +83,7 @@ struct rxrpc_skb_priv { struct rxrpc_call *call; /* call with which associated */ unsigned long resend_at; /* time in jiffies at which to resend */ union { - unsigned offset; /* offset into buffer of next read */ + unsigned int offset; /* offset into buffer of next read */ int remain; /* amount of space remaining for next write */ u32 error; /* network error code */ bool need_resend; /* T if needs resending */ @@ -176,9 +176,9 @@ struct rxrpc_peer { struct list_head error_targets; /* targets for net error distribution */ spinlock_t lock; /* access lock */ atomic_t usage; - unsigned if_mtu; /* interface MTU for this peer */ - unsigned mtu; /* network MTU for this peer */ - unsigned maxdata; /* data size (MTU - hdrsize) */ + unsigned int if_mtu; /* interface MTU for this peer */ + unsigned int mtu; /* network MTU for this peer */ + unsigned int maxdata; /* data size (MTU - hdrsize) */ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ int debug_id; /* debug ID for printks */ int net_error; /* network error distributed */ @@ -187,8 +187,8 @@ struct rxrpc_peer { /* calculated RTT cache */ #define RXRPC_RTT_CACHE_SIZE 32 suseconds_t rtt; /* current RTT estimate (in uS) */ - unsigned rtt_point; /* next entry at which to insert */ - unsigned rtt_usage; /* amount of cache actually used */ + unsigned int rtt_point; /* next entry at which to insert */ + unsigned int rtt_usage; /* amount of cache actually used */ suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ }; @@ -271,7 +271,7 @@ struct rxrpc_connection { } state; int error; /* error code for local abort */ int debug_id; /* debug ID for printks */ - unsigned call_counter; /* call ID counter */ + unsigned int call_counter; /* call ID counter */ atomic_t serial; /* packet serial number counter */ atomic_t hi_serial; /* highest serial number received */ u8 avail_calls; /* number of calls available */ @@ -592,7 +592,7 @@ extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, /* * debug tracing */ -extern unsigned rxrpc_debug; +extern unsigned int rxrpc_debug; #define dbgprintk(FMT,...) \ printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index ae3a035f539..8b1f9f49960 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -82,7 +82,7 @@ static int rxrpc_vet_description_s(const char *desc) * - the caller guarantees we have at least 4 words */ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, - unsigned toklen) + unsigned int toklen) { struct rxrpc_key_token *token, **pptoken; size_t plen; @@ -210,10 +210,10 @@ static void rxrpc_rxk5_free(struct rxk5_key *rxk5) */ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, n_parts, loop, tmp; + unsigned int toklen = *_toklen, n_parts, loop, tmp; /* there must be at least one name, and at least #names+1 length * words */ @@ -286,10 +286,10 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, size_t max_data_size, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -330,11 +330,11 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td, u8 max_n_elem, size_t max_elem_size, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { struct krb5_tagged_data *td; const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, n_elem, loop; + unsigned int toklen = *_toklen, n_elem, loop; int ret; /* there must be at least one count */ @@ -380,10 +380,10 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td, * extract a krb5 ticket */ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, - const __be32 **_xdr, unsigned *_toklen) + const __be32 **_xdr, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len; /* there must be at least one length word */ if (toklen <= 4) @@ -419,7 +419,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, * - the caller guarantees we have at least 4 words */ static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr, - unsigned toklen) + unsigned int toklen) { struct rxrpc_key_token *token, **pptoken; struct rxk5_key *rxk5; @@ -549,7 +549,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal { const __be32 *xdr = data, *token; const char *cp; - unsigned len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, tmp, loop, ntoken, toklen, sec_ix; int ret; _enter(",{%x,%x,%x,%x},%zu", diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 7635107726c..f226709ebd8 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -31,7 +31,7 @@ #define REALM_SZ 40 /* size of principal's auth domain */ #define SNAME_SZ 40 /* size of service name */ -unsigned rxrpc_debug; +unsigned int rxrpc_debug; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "rxkad debugging mask"); @@ -207,7 +207,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; - unsigned len; + unsigned int len; u16 check; int nsg; @@ -826,7 +826,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, struct rxrpc_crypt iv, key; struct scatterlist sg[1]; struct in_addr addr; - unsigned life; + unsigned int life; time_t issue, now; bool little_endian; int ret; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 75b58f81d53..e7a8976bf25 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -250,6 +250,28 @@ config NET_SCH_QFQ If unsure, say N. +config NET_SCH_CODEL + tristate "Controlled Delay AQM (CODEL)" + help + Say Y here if you want to use the Controlled Delay (CODEL) + packet scheduling algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_codel. + + If unsure, say N. + +config NET_SCH_FQ_CODEL + tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)" + help + Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL) + packet scheduling algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_fq_codel. + + If unsure, say N. + config NET_SCH_INGRESS tristate "Ingress Qdisc" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index 8cdf4e2b51d..5940a1992f0 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -37,6 +37,8 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o +obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 93fdf131bd7..5cfb160df06 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; - NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); + if (nla_put_string(skb, TCA_KIND, a->ops->kind)) + goto nla_put_failure; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; @@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, p = s_p; } } - NLA_PUT_U32(skb, TCA_FCNT, n_i); + if (nla_put_u32(skb, TCA_FCNT, n_i)) + goto nla_put_failure; nla_nest_end(skb, nest); return n_i; @@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) if (a->ops == NULL || a->ops->dump == NULL) return err; - NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); + if (nla_put_string(skb, TCA_KIND, a->ops->kind)) + goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; nest = nla_nest_start(skb, TCA_OPTIONS); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 453a73431ac..2c8ad7c86e4 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -397,7 +397,7 @@ static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, while (len > 1) { switch (xh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; case IPV6_TLV_JUMBO: @@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb, }; struct tcf_t t; - NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); + if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index b77f5a06a65..f10fb825644 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int }; struct tcf_t t; - NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; #ifdef CONFIG_GACT_PROB if (gact->tcfg_ptype) { struct tc_gact_p p_opt = { @@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int .ptype = gact->tcfg_ptype, }; - NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); + if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt)) + goto nla_put_failure; } #endif t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); - NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60f8f616e8f..60e281ad0f0 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -1,5 +1,5 @@ /* - * net/sched/ipt.c iptables target interface + * net/sched/ipt.c iptables target interface * *TODO: Add other tables. For now we only support the ipv4 table targets * @@ -235,9 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, result = TC_ACT_PIPE; break; default: - if (net_ratelimit()) - pr_notice("tc filter: Bogus netfilter code" - " %d assume ACCEPT\n", ret); + net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", + ret); result = TC_POLICE_OK; break; } @@ -267,15 +266,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int c.refcnt = ipt->tcf_refcnt - ref; strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); - NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); - NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index); - NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook); - NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); - NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname); + if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || + nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) || + nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) || + nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || + nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) + goto nla_put_failure; tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); - NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); + if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm)) + goto nla_put_failure; kfree(t); return skb->len; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index e051398fdf6..fe81cc18e9e 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -174,9 +174,8 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, } if (!(dev->flags & IFF_UP)) { - if (net_ratelimit()) - pr_notice("tc mirred to Houston: device %s is down\n", - dev->name); + net_notice_ratelimited("tc mirred to Houston: device %s is down\n", + dev->name); goto out; } @@ -227,11 +226,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i }; struct tcf_t t; - NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(m->tcf_tm.expires); - NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); + if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 001d1b35486..b5d029eb44f 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 10d3aed8656..26aa2f6ce25 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->refcnt = p->tcf_refcnt - ref; opt->bindcnt = p->tcf_bindcnt - bind; - NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt); + if (nla_put(skb, TCA_PEDIT_PARMS, s, opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t)) + goto nla_put_failure; kfree(opt); return skb->len; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 6fb3f5af0f8..a9de23297d4 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) opt.rate = police->tcfp_R_tab->rate; if (police->tcfp_P_tab) opt.peakrate = police->tcfp_P_tab->rate; - NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); - if (police->tcfp_result) - NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); - if (police->tcfp_ewma_rate) - NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate); + if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) + goto nla_put_failure; + if (police->tcfp_result && + nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result)) + goto nla_put_failure; + if (police->tcfp_ewma_rate && + nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 73e0a3ab4d5..3922f2a2821 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); - NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); + if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) || + nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); + if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 35dbbe91027..476e0fac671 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); - if (d->flags & SKBEDIT_F_PRIORITY) - NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), - &d->priority); - if (d->flags & SKBEDIT_F_QUEUE_MAPPING) - NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, - sizeof(d->queue_mapping), &d->queue_mapping); - if (d->flags & SKBEDIT_F_MARK) - NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), - &d->mark); + if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_PRIORITY) && + nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), + &d->priority)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && + nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING, + sizeof(d->queue_mapping), &d->queue_mapping)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_MARK) && + nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), + &d->mark)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a69d44f1dac..f452f696b4b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; tcm->tcm_parent = tp->classid; tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); - NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); + if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) + goto nla_put_failure; tcm->tcm_handle = fh; if (RTM_DELTFILTER != event) { tcm->tcm_handle = 0; diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index ea1f70b5a5f..590960a22a7 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - if (f->res.classid) - NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 1d8bd0dbcd1..ccd08c8dc6a 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); - NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); + if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || + nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) + goto nla_put_failure; if (f->mask != ~0 || f->xor != 0) { - NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); - NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); + if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || + nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) + goto nla_put_failure; } - if (f->rshift) - NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); - if (f->addend) - NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); + if (f->rshift && + nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) + goto nla_put_failure; + if (f->addend && + nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) + goto nla_put_failure; - if (f->divisor) - NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); - if (f->baseclass) - NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); + if (f->divisor && + nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) + goto nla_put_failure; + if (f->baseclass && + nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) + goto nla_put_failure; - if (f->perturb_period) - NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); + if (f->perturb_period && + nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 389af152ec4..8384a479724 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - if (f->res.classid) - NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid)) + goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND - if (strlen(f->indev)) - NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev); + if (strlen(f->indev) && + nla_put_string(skb, TCA_FW_INDEV, f->indev)) + goto nla_put_failure; #endif /* CONFIG_NET_CLS_IND */ - if (head->mask != 0xFFFFFFFF) - NLA_PUT_U32(skb, TCA_FW_MASK, head->mask); + if (head->mask != 0xFFFFFFFF && + nla_put_u32(skb, TCA_FW_MASK, head->mask)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 13ab66e9df5..36fec422740 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, if (!(f->handle & 0x8000)) { id = f->id & 0xFF; - NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); + if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) + goto nla_put_failure; } if (f->handle & 0x80000000) { - if ((f->handle >> 16) != 0xFFFF) - NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); + if ((f->handle >> 16) != 0xFFFF && + nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) + goto nla_put_failure; } else { id = f->id >> 16; - NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); + if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) + goto nla_put_failure; } - if (f->res.classid) - NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index b01427924f8..18ab93ec8d7 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst); + if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst)) + goto nla_put_failure; pinfo.dpi = s->dpi; pinfo.spi = f->spi; pinfo.protocol = s->protocol; pinfo.tunnelid = s->tunnelid; pinfo.tunnelhdr = f->tunnelhdr; pinfo.pad = 0; - NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); - if (f->res.classid) - NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); - if (((f->handle >> 8) & 0xFF) != 16) - NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); + if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo)) + goto nla_put_failure; + if (f->res.classid && + nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid)) + goto nla_put_failure; + if (((f->handle >> 8) & 0xFF) != 16 && + nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index dbe199234c6..fe29420d0b0 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, if (!fh) { t->tcm_handle = ~0; /* whatever ... */ - NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash); - NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask); - NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift); - NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through); + if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || + nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || + nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || + nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) + goto nla_put_failure; nla_nest_end(skb, nest); } else { if (p->perfect) { @@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, } } pr_debug("handle = %d\n", t->tcm_handle); - if (r->res.class) - NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid); + if (r->res.class && + nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 939b627b479..d45373fb00b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -234,8 +234,7 @@ out: return -1; deadloop: - if (net_ratelimit()) - pr_warning("cls_u32: dead loop\n"); + net_warn_ratelimited("cls_u32: dead loop\n"); return -1; } @@ -733,36 +732,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; u32 divisor = ht->divisor + 1; - NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); + if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) + goto nla_put_failure; } else { - NLA_PUT(skb, TCA_U32_SEL, - sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), - &n->sel); + if (nla_put(skb, TCA_U32_SEL, + sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), + &n->sel)) + goto nla_put_failure; if (n->ht_up) { u32 htid = n->handle & 0xFFFFF000; - NLA_PUT_U32(skb, TCA_U32_HASH, htid); + if (nla_put_u32(skb, TCA_U32_HASH, htid)) + goto nla_put_failure; } - if (n->res.classid) - NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid); - if (n->ht_down) - NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle); + if (n->res.classid && + nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) + goto nla_put_failure; + if (n->ht_down && + nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle)) + goto nla_put_failure; #ifdef CONFIG_CLS_U32_MARK - if (n->mark.val || n->mark.mask) - NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); + if ((n->mark.val || n->mark.mask) && + nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) + goto nla_put_failure; #endif if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND - if (strlen(n->indev)) - NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); + if (strlen(n->indev) && + nla_put_string(skb, TCA_U32_INDEV, n->indev)) + goto nla_put_failure; #endif #ifdef CONFIG_CLS_U32_PERF - NLA_PUT(skb, TCA_U32_PCNT, - sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), - n->pf); + if (nla_put(skb, TCA_U32_PCNT, + sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), + n->pf)) + goto nla_put_failure; #endif } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 1363bf14e61..4790c696cbc 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v, static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) { - if (v->val && v->len) - NLA_PUT(skb, tlv, v->len, (void *) v->val); + if (v->val && v->len && + nla_put(skb, tlv, v->len, (void *) v->val)) + goto nla_put_failure; return 0; nla_put_failure: @@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v, static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) { - if (v->len == sizeof(unsigned long)) - NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); - else if (v->len == sizeof(u32)) - NLA_PUT_U32(skb, tlv, v->val); + if (v->len == sizeof(unsigned long)) { + if (nla_put(skb, tlv, sizeof(unsigned long), &v->val)) + goto nla_put_failure; + } else if (v->len == sizeof(u32)) { + if (nla_put_u32(skb, tlv, v->val)) + goto nla_put_failure; + } return 0; @@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); - NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr); + if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr)) + goto nla_put_failure; ops = meta_type_ops(&meta->lvalue); if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 88d93eb9250..3a633debb6d 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) if (top_start == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); + if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr)) + goto nla_put_failure; list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) @@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) .flags = em->flags }; - NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); + if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr)) + goto nla_put_failure; if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) @@ -535,9 +537,7 @@ pop_stack: return res; stack_overflow: - if (net_ratelimit()) - pr_warning("tc ematch: local stack overflow," - " increase NET_EMATCH_STACK\n"); + net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 3d8981fde30..085ce53d570 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) nest = nla_nest_start(skb, TCA_STAB); if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); + if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) + goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; @@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, tcm->tcm_parent = clid; tcm->tcm_handle = q->handle; tcm->tcm_info = atomic_read(&q->refcnt); - NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); + if (nla_put_string(skb, TCA_KIND, q->ops->id)) + goto nla_put_failure; if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; q->qstats.qlen = q->q.qlen; @@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, tcm->tcm_parent = q->handle; tcm->tcm_handle = q->handle; tcm->tcm_info = 0; - NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); + if (nla_put_string(skb, TCA_KIND, q->ops->id)) + goto nla_put_failure; if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; @@ -1688,12 +1691,10 @@ reclassify: tp = otp; if (verd++ >= MAX_REC_LOOP) { - if (net_ratelimit()) - pr_notice("%s: packet reclassify loop" - " rule prio %u protocol %02x\n", - tp->q->ops->id, - tp->prio & 0xffff, - ntohs(tp->protocol)); + net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n", + tp->q->ops->id, + tp->prio & 0xffff, + ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index e25e49061a0..8522a479337 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -423,8 +423,6 @@ drop: __maybe_unused } return ret; } - qdisc_bstats_update(sch, skb); - bstats_update(&flow->bstats, skb); /* * Okay, this may seem weird. We pretend we've dropped the packet if * it goes via ATM. The reason for this is that the outer qdisc @@ -472,6 +470,8 @@ static void sch_atm_dequeue(unsigned long data) if (unlikely(!skb)) break; + qdisc_bstats_update(sch, skb); + bstats_update(&flow->bstats, skb); pr_debug("atm_tc_dequeue: sending on class %p\n", flow); /* remove any LL header somebody else has attached */ skb_pull(skb, skb_network_offset(skb)); @@ -601,7 +601,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); + if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr)) + goto nla_put_failure; if (flow->vcc) { struct sockaddr_atmpvc pvc; int state; @@ -610,15 +611,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; pvc.sap_addr.vpi = flow->vcc->vpi; pvc.sap_addr.vci = flow->vcc->vci; - NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); + if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc)) + goto nla_put_failure; state = ATM_VF2VS(flow->vcc->flags); - NLA_PUT_U32(skb, TCA_ATM_STATE, state); + if (nla_put_u32(skb, TCA_ATM_STATE, state)) + goto nla_put_failure; + } + if (flow->excess) { + if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid)) + goto nla_put_failure; + } else { + if (nla_put_u32(skb, TCA_ATM_EXCESS, 0)) + goto nla_put_failure; } - if (flow->excess) - NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); - else - NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); - nla_nest_end(skb, nest); return skb->len; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 24d94c097b3..6aabd77d1cf 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); - NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); + if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) opt.minidle = (u32)(-cl->minidle); opt.offtime = cl->offtime; opt.change = ~0; - NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) opt.priority = cl->priority + 1; opt.cpriority = cl->cpriority + 1; opt.weight = cl->weight; - NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) opt.priority2 = cl->priority2 + 1; opt.pad = 0; opt.penalty = cl->penalty; - NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) opt.split = cl->split ? cl->split->common.classid : 0; opt.defmap = cl->defmap; opt.defchange = ~0; - NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt)) + goto nla_put_failure; } return skb->len; @@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) opt.police = cl->police; opt.__res1 = 0; opt.__res2 = 0; - NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt)) + goto nla_put_failure; } return skb->len; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 7e267d7b9c7..cc37dd52ecf 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) } q->stats.pdrop++; - sch->qstats.drops++; - kfree_skb(skb); - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); - congestion_drop: +congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; - other_drop: +other_drop: if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); @@ -515,8 +513,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); + if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || + nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c new file mode 100644 index 00000000000..2f9ab17db85 --- /dev/null +++ b/net/sched/sch_codel.c @@ -0,0 +1,276 @@ +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> + * + * Implemented on linux by : + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/skbuff.h> +#include <linux/prefetch.h> +#include <net/pkt_sched.h> +#include <net/codel.h> + + +#define DEFAULT_CODEL_LIMIT 1000 + +struct codel_sched_data { + struct codel_params params; + struct codel_vars vars; + struct codel_stats stats; + u32 drop_overlimit; +}; + +/* This is the specific function called from codel_dequeue() + * to dequeue a packet from queue. Note: backlog is handled in + * codel, we dont need to reduce it here. + */ +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +{ + struct sk_buff *skb = __skb_dequeue(&sch->q); + + prefetch(&skb->end); /* we'll need skb_shinfo() */ + return skb; +} + +static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); + + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, + * or HTB crashes. Defer it for next round. + */ + if (q->stats.drop_count && sch->q.qlen) { + qdisc_tree_decrease_qlen(sch, q->stats.drop_count); + q->stats.drop_count = 0; + } + if (skb) + qdisc_bstats_update(sch, skb); + return skb; +} + +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct codel_sched_data *q; + + if (likely(qdisc_qlen(sch) < sch->limit)) { + codel_set_enqueue_time(skb); + return qdisc_enqueue_tail(skb, sch); + } + q = qdisc_priv(sch); + q->drop_overlimit++; + return qdisc_drop(skb, sch); +} + +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { + [TCA_CODEL_TARGET] = { .type = NLA_U32 }, + [TCA_CODEL_LIMIT] = { .type = NLA_U32 }, + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 }, + [TCA_CODEL_ECN] = { .type = NLA_U32 }, +}; + +static int codel_change(struct Qdisc *sch, struct nlattr *opt) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_CODEL_MAX + 1]; + unsigned int qlen; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); + if (err < 0) + return err; + + sch_tree_lock(sch); + + if (tb[TCA_CODEL_TARGET]) { + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); + + q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_CODEL_INTERVAL]) { + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); + + q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_CODEL_LIMIT]) + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); + + if (tb[TCA_CODEL_ECN]) + q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); + + qlen = sch->q.qlen; + while (sch->q.qlen > sch->limit) { + struct sk_buff *skb = __skb_dequeue(&sch->q); + + sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_drop(skb, sch); + } + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); + + sch_tree_unlock(sch); + return 0; +} + +static int codel_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct codel_sched_data *q = qdisc_priv(sch); + + sch->limit = DEFAULT_CODEL_LIMIT; + + codel_params_init(&q->params); + codel_vars_init(&q->vars); + codel_stats_init(&q->stats); + + if (opt) { + int err = codel_change(sch, opt); + + if (err) + return err; + } + + if (sch->limit >= 1) + sch->flags |= TCQ_F_CAN_BYPASS; + else + sch->flags &= ~TCQ_F_CAN_BYPASS; + + return 0; +} + +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CODEL_TARGET, + codel_time_to_us(q->params.target)) || + nla_put_u32(skb, TCA_CODEL_LIMIT, + sch->limit) || + nla_put_u32(skb, TCA_CODEL_INTERVAL, + codel_time_to_us(q->params.interval)) || + nla_put_u32(skb, TCA_CODEL_ECN, + q->params.ecn)) + goto nla_put_failure; + + return nla_nest_end(skb, opts); + +nla_put_failure: + nla_nest_cancel(skb, opts); + return -1; +} + +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + const struct codel_sched_data *q = qdisc_priv(sch); + struct tc_codel_xstats st = { + .maxpacket = q->stats.maxpacket, + .count = q->vars.count, + .lastcount = q->vars.lastcount, + .drop_overlimit = q->drop_overlimit, + .ldelay = codel_time_to_us(q->vars.ldelay), + .dropping = q->vars.dropping, + .ecn_mark = q->stats.ecn_mark, + }; + + if (q->vars.dropping) { + codel_tdiff_t delta = q->vars.drop_next - codel_get_time(); + + if (delta >= 0) + st.drop_next = codel_time_to_us(delta); + else + st.drop_next = -codel_time_to_us(-delta); + } + + return gnet_stats_copy_app(d, &st, sizeof(st)); +} + +static void codel_reset(struct Qdisc *sch) +{ + struct codel_sched_data *q = qdisc_priv(sch); + + qdisc_reset_queue(sch); + codel_vars_init(&q->vars); +} + +static struct Qdisc_ops codel_qdisc_ops __read_mostly = { + .id = "codel", + .priv_size = sizeof(struct codel_sched_data), + + .enqueue = codel_qdisc_enqueue, + .dequeue = codel_qdisc_dequeue, + .peek = qdisc_peek_dequeued, + .init = codel_init, + .reset = codel_reset, + .change = codel_change, + .dump = codel_dump, + .dump_stats = codel_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init codel_module_init(void) +{ + return register_qdisc(&codel_qdisc_ops); +} + +static void __exit codel_module_exit(void) +{ + unregister_qdisc(&codel_qdisc_ops); +} + +module_init(codel_module_init) +module_exit(codel_module_exit) + +MODULE_DESCRIPTION("Controlled Delay queue discipline"); +MODULE_AUTHOR("Dave Taht"); +MODULE_AUTHOR("Eric Dumazet"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 6b7fe4a84f1..9ce0b4fe23f 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg, nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); + if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: @@ -375,8 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = cl->quantum; } - bstats_update(&cl->bstats, skb); - sch->q.qlen++; return err; } @@ -402,6 +401,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) skb = qdisc_dequeue_peeked(cl->qdisc); if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); + + bstats_update(&cl->bstats, skb); qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 2c790204d04..3886365cc20 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; drop: - kfree_skb(skb); - sch->qstats.drops++; + qdisc_drop(skb, sch); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } @@ -429,8 +428,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); - NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); + if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) || + nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1])) + goto nla_put_failure; return nla_nest_end(skb, opts); @@ -447,13 +447,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); + if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices)) + goto nla_put_failure; - if (p->default_index != NO_DEFAULT_INDEX) - NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); + if (p->default_index != NO_DEFAULT_INDEX && + nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index)) + goto nla_put_failure; - if (p->set_tc_index) - NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); + if (p->set_tc_index && + nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX)) + goto nla_put_failure; return nla_nest_end(skb, opts); diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 66effe2da8e..e15a9eb2908 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_fifo_qopt opt = { .limit = sch->limit }; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c new file mode 100644 index 00000000000..9fc1c62ec80 --- /dev/null +++ b/net/sched/sch_fq_codel.c @@ -0,0 +1,626 @@ +/* + * Fair Queue CoDel discipline + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/string.h> +#include <linux/in.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/jhash.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <net/netlink.h> +#include <net/pkt_sched.h> +#include <net/flow_keys.h> +#include <net/codel.h> + +/* Fair Queue CoDel. + * + * Principles : + * Packets are classified (internal classifier or external) on flows. + * This is a Stochastic model (as we use a hash, several flows + * might be hashed on same slot) + * Each flow has a CoDel managed queue. + * Flows are linked onto two (Round Robin) lists, + * so that new flows have priority on old ones. + * + * For a given flow, packets are not reordered (CoDel uses a FIFO) + * head drops only. + * ECN capability is on by default. + * Low memory footprint (64 bytes per flow) + */ + +struct fq_codel_flow { + struct sk_buff *head; + struct sk_buff *tail; + struct list_head flowchain; + int deficit; + u32 dropped; /* number of drops (or ECN marks) on this flow */ + struct codel_vars cvars; +}; /* please try to keep this structure <= 64 bytes */ + +struct fq_codel_sched_data { + struct tcf_proto *filter_list; /* optional external classifier */ + struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ + u32 *backlogs; /* backlog table [flows_cnt] */ + u32 flows_cnt; /* number of flows */ + u32 perturbation; /* hash perturbation */ + u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ + struct codel_params cparams; + struct codel_stats cstats; + u32 drop_overlimit; + u32 new_flow_count; + + struct list_head new_flows; /* list of new flows */ + struct list_head old_flows; /* list of old flows */ +}; + +static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, + const struct sk_buff *skb) +{ + struct flow_keys keys; + unsigned int hash; + + skb_flow_dissect(skb, &keys); + hash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src ^ keys.ip_proto, + (__force u32)keys.ports, q->perturbation); + return ((u64)hash * q->flows_cnt) >> 32; +} + +static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, + int *qerr) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tcf_result res; + int result; + + if (TC_H_MAJ(skb->priority) == sch->handle && + TC_H_MIN(skb->priority) > 0 && + TC_H_MIN(skb->priority) <= q->flows_cnt) + return TC_H_MIN(skb->priority); + + if (!q->filter_list) + return fq_codel_hash(q, skb) + 1; + + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + result = tc_classify(skb, q->filter_list, &res); + if (result >= 0) { +#ifdef CONFIG_NET_CLS_ACT + switch (result) { + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + case TC_ACT_SHOT: + return 0; + } +#endif + if (TC_H_MIN(res.classid) <= q->flows_cnt) + return TC_H_MIN(res.classid); + } + return 0; +} + +/* helper functions : might be changed when/if skb use a standard list_head */ + +/* remove one skb from head of slot queue */ +static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) +{ + struct sk_buff *skb = flow->head; + + flow->head = skb->next; + skb->next = NULL; + return skb; +} + +/* add skb to flow queue (tail add) */ +static inline void flow_queue_add(struct fq_codel_flow *flow, + struct sk_buff *skb) +{ + if (flow->head == NULL) + flow->head = skb; + else + flow->tail->next = skb; + flow->tail = skb; + skb->next = NULL; +} + +static unsigned int fq_codel_drop(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + unsigned int maxbacklog = 0, idx = 0, i, len; + struct fq_codel_flow *flow; + + /* Queue is full! Find the fat flow and drop packet from it. + * This might sound expensive, but with 1024 flows, we scan + * 4KB of memory, and we dont need to handle a complex tree + * in fast path (packet queue/enqueue) with many cache misses. + */ + for (i = 0; i < q->flows_cnt; i++) { + if (q->backlogs[i] > maxbacklog) { + maxbacklog = q->backlogs[i]; + idx = i; + } + } + flow = &q->flows[idx]; + skb = dequeue_head(flow); + len = qdisc_pkt_len(skb); + q->backlogs[idx] -= len; + kfree_skb(skb); + sch->q.qlen--; + sch->qstats.drops++; + sch->qstats.backlog -= len; + flow->dropped++; + return idx; +} + +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + unsigned int idx; + struct fq_codel_flow *flow; + int uninitialized_var(ret); + + idx = fq_codel_classify(skb, sch, &ret); + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + sch->qstats.drops++; + kfree_skb(skb); + return ret; + } + idx--; + + codel_set_enqueue_time(skb); + flow = &q->flows[idx]; + flow_queue_add(flow, skb); + q->backlogs[idx] += qdisc_pkt_len(skb); + sch->qstats.backlog += qdisc_pkt_len(skb); + + if (list_empty(&flow->flowchain)) { + list_add_tail(&flow->flowchain, &q->new_flows); + codel_vars_init(&flow->cvars); + q->new_flow_count++; + flow->deficit = q->quantum; + flow->dropped = 0; + } + if (++sch->q.qlen < sch->limit) + return NET_XMIT_SUCCESS; + + q->drop_overlimit++; + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ + if (fq_codel_drop(sch) == idx) + return NET_XMIT_CN; + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_decrease_qlen(sch, 1); + return NET_XMIT_SUCCESS; +} + +/* This is the specific function called from codel_dequeue() + * to dequeue a packet from queue. Note: backlog is handled in + * codel, we dont need to reduce it here. + */ +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct fq_codel_flow *flow; + struct sk_buff *skb = NULL; + + flow = container_of(vars, struct fq_codel_flow, cvars); + if (flow->head) { + skb = dequeue_head(flow); + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); + sch->q.qlen--; + } + return skb; +} + +static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + struct fq_codel_flow *flow; + struct list_head *head; + u32 prev_drop_count, prev_ecn_mark; + +begin: + head = &q->new_flows; + if (list_empty(head)) { + head = &q->old_flows; + if (list_empty(head)) + return NULL; + } + flow = list_first_entry(head, struct fq_codel_flow, flowchain); + + if (flow->deficit <= 0) { + flow->deficit += q->quantum; + list_move_tail(&flow->flowchain, &q->old_flows); + goto begin; + } + + prev_drop_count = q->cstats.drop_count; + prev_ecn_mark = q->cstats.ecn_mark; + + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, + dequeue); + + flow->dropped += q->cstats.drop_count - prev_drop_count; + flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; + + if (!skb) { + /* force a pass through old_flows to prevent starvation */ + if ((head == &q->new_flows) && !list_empty(&q->old_flows)) + list_move_tail(&flow->flowchain, &q->old_flows); + else + list_del_init(&flow->flowchain); + goto begin; + } + qdisc_bstats_update(sch, skb); + flow->deficit -= qdisc_pkt_len(skb); + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, + * or HTB crashes. Defer it for next round. + */ + if (q->cstats.drop_count && sch->q.qlen) { + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); + q->cstats.drop_count = 0; + } + return skb; +} + +static void fq_codel_reset(struct Qdisc *sch) +{ + struct sk_buff *skb; + + while ((skb = fq_codel_dequeue(sch)) != NULL) + kfree_skb(skb); +} + +static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { + [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, +}; + +static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); + if (err < 0) + return err; + if (tb[TCA_FQ_CODEL_FLOWS]) { + if (q->flows) + return -EINVAL; + q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); + if (!q->flows_cnt || + q->flows_cnt > 65536) + return -EINVAL; + } + sch_tree_lock(sch); + + if (tb[TCA_FQ_CODEL_TARGET]) { + u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); + + q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_FQ_CODEL_INTERVAL]) { + u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); + + q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_FQ_CODEL_LIMIT]) + sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); + + if (tb[TCA_FQ_CODEL_ECN]) + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); + + if (tb[TCA_FQ_CODEL_QUANTUM]) + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + + while (sch->q.qlen > sch->limit) { + struct sk_buff *skb = fq_codel_dequeue(sch); + + kfree_skb(skb); + q->cstats.drop_count++; + } + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); + q->cstats.drop_count = 0; + + sch_tree_unlock(sch); + return 0; +} + +static void *fq_codel_zalloc(size_t sz) +{ + void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); + + if (!ptr) + ptr = vzalloc(sz); + return ptr; +} + +static void fq_codel_free(void *addr) +{ + if (addr) { + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); + } +} + +static void fq_codel_destroy(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + + tcf_destroy_chain(&q->filter_list); + fq_codel_free(q->backlogs); + fq_codel_free(q->flows); +} + +static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + int i; + + sch->limit = 10*1024; + q->flows_cnt = 1024; + q->quantum = psched_mtu(qdisc_dev(sch)); + q->perturbation = net_random(); + INIT_LIST_HEAD(&q->new_flows); + INIT_LIST_HEAD(&q->old_flows); + codel_params_init(&q->cparams); + codel_stats_init(&q->cstats); + q->cparams.ecn = true; + + if (opt) { + int err = fq_codel_change(sch, opt); + if (err) + return err; + } + + if (!q->flows) { + q->flows = fq_codel_zalloc(q->flows_cnt * + sizeof(struct fq_codel_flow)); + if (!q->flows) + return -ENOMEM; + q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); + if (!q->backlogs) { + fq_codel_free(q->flows); + return -ENOMEM; + } + for (i = 0; i < q->flows_cnt; i++) { + struct fq_codel_flow *flow = q->flows + i; + + INIT_LIST_HEAD(&flow->flowchain); + } + } + if (sch->limit >= 1) + sch->flags |= TCQ_F_CAN_BYPASS; + else + sch->flags &= ~TCQ_F_CAN_BYPASS; + return 0; +} + +static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, + codel_time_to_us(q->cparams.target)) || + nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, + sch->limit) || + nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, + codel_time_to_us(q->cparams.interval)) || + nla_put_u32(skb, TCA_FQ_CODEL_ECN, + q->cparams.ecn) || + nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, + q->quantum) || + nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, + q->flows_cnt)) + goto nla_put_failure; + + nla_nest_end(skb, opts); + return skb->len; + +nla_put_failure: + return -1; +} + +static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tc_fq_codel_xstats st = { + .type = TCA_FQ_CODEL_XSTATS_QDISC, + }; + struct list_head *pos; + + st.qdisc_stats.maxpacket = q->cstats.maxpacket; + st.qdisc_stats.drop_overlimit = q->drop_overlimit; + st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; + st.qdisc_stats.new_flow_count = q->new_flow_count; + + list_for_each(pos, &q->new_flows) + st.qdisc_stats.new_flows_len++; + + list_for_each(pos, &q->old_flows) + st.qdisc_stats.old_flows_len++; + + return gnet_stats_copy_app(d, &st, sizeof(st)); +} + +static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + +static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid) +{ + return 0; +} + +static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + /* we cannot bypass queue discipline anymore */ + sch->flags &= ~TCQ_F_CAN_BYPASS; + return 0; +} + +static void fq_codel_put(struct Qdisc *q, unsigned long cl) +{ +} + +static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + + if (cl) + return NULL; + return &q->filter_list; +} + +static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + tcm->tcm_handle |= TC_H_MIN(cl); + return 0; +} + +static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + u32 idx = cl - 1; + struct gnet_stats_queue qs = { 0 }; + struct tc_fq_codel_xstats xstats; + + if (idx < q->flows_cnt) { + const struct fq_codel_flow *flow = &q->flows[idx]; + const struct sk_buff *skb = flow->head; + + memset(&xstats, 0, sizeof(xstats)); + xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; + xstats.class_stats.deficit = flow->deficit; + xstats.class_stats.ldelay = + codel_time_to_us(flow->cvars.ldelay); + xstats.class_stats.count = flow->cvars.count; + xstats.class_stats.lastcount = flow->cvars.lastcount; + xstats.class_stats.dropping = flow->cvars.dropping; + if (flow->cvars.dropping) { + codel_tdiff_t delta = flow->cvars.drop_next - + codel_get_time(); + + xstats.class_stats.drop_next = (delta >= 0) ? + codel_time_to_us(delta) : + -codel_time_to_us(-delta); + } + while (skb) { + qs.qlen++; + skb = skb->next; + } + qs.backlog = q->backlogs[idx]; + qs.drops = flow->dropped; + } + if (gnet_stats_copy_queue(d, &qs) < 0) + return -1; + if (idx < q->flows_cnt) + return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); + return 0; +} + +static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + unsigned int i; + + if (arg->stop) + return; + + for (i = 0; i < q->flows_cnt; i++) { + if (list_empty(&q->flows[i].flowchain) || + arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, i + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } +} + +static const struct Qdisc_class_ops fq_codel_class_ops = { + .leaf = fq_codel_leaf, + .get = fq_codel_get, + .put = fq_codel_put, + .tcf_chain = fq_codel_find_tcf, + .bind_tcf = fq_codel_bind, + .unbind_tcf = fq_codel_put, + .dump = fq_codel_dump_class, + .dump_stats = fq_codel_dump_class_stats, + .walk = fq_codel_walk, +}; + +static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { + .cl_ops = &fq_codel_class_ops, + .id = "fq_codel", + .priv_size = sizeof(struct fq_codel_sched_data), + .enqueue = fq_codel_enqueue, + .dequeue = fq_codel_dequeue, + .peek = qdisc_peek_dequeued, + .drop = fq_codel_drop, + .init = fq_codel_init, + .reset = fq_codel_reset, + .destroy = fq_codel_destroy, + .change = fq_codel_change, + .dump = fq_codel_dump, + .dump_stats = fq_codel_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init fq_codel_module_init(void) +{ + return register_qdisc(&fq_codel_qdisc_ops); +} + +static void __exit fq_codel_module_exit(void) +{ + unregister_qdisc(&fq_codel_qdisc_ops); +} + +module_init(fq_codel_module_init) +module_exit(fq_codel_module_exit) +MODULE_AUTHOR("Eric Dumazet"); +MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 67fc573e013..511323e89ce 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -86,9 +86,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * deadloop is detected. Return OK to try the next skb. */ kfree_skb(skb); - if (net_ratelimit()) - pr_warning("Dead loop on netdevice %s, fix it urgently!\n", - dev_queue->dev->name); + net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", + dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* @@ -136,9 +135,9 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ - if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) - pr_warning("BUG %s code %d qlen %d\n", - dev->name, ret, q->q.qlen); + if (unlikely(ret != NETDEV_TX_BUSY)) + net_warn_ratelimited("BUG %s code %d qlen %d\n", + dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } @@ -512,7 +511,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 8179494c269..e901583e4ea 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -255,10 +255,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { - if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "after dequeue, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", + tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); @@ -287,10 +285,8 @@ static unsigned int gred_drop(struct Qdisc *sch) u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { - if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "while dropping, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n", + tc_index_to_dp(skb)); } else { q->backlog -= len; q->stats.other++; @@ -521,14 +517,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); + if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) + goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } - NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); + if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) + goto nla_put_failure; parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 9bdca2e011e..6c2ec451054 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) tsc.m1 = sm2m(sc->sm1); tsc.d = dx2d(sc->dx); tsc.m2 = sm2m(sc->sm2); - NLA_PUT(skb, attr, sizeof(tsc), &tsc); + if (nla_put(skb, attr, sizeof(tsc), &tsc)) + goto nla_put_failure; return skb->len; @@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) } qopt.defcls = q->defcls; - NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); + if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1607,7 +1609,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); - bstats_update(&cl->bstats, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -1655,6 +1656,7 @@ hfsc_dequeue(struct Qdisc *sch) return NULL; } + bstats_update(&cl->bstats, skb); update_vf(cl, qdisc_pkt_len(skb), cur_time); if (realtime) cl->cl_cumul += qdisc_pkt_len(skb); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 29b942ce9e8..9d75b776131 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - kfree_skb(skb); - sch->qstats.drops++; - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { @@ -576,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } return ret; } else { - bstats_update(&cl->bstats, skb); htb_activate(q, cl); } @@ -837,6 +834,7 @@ next: } while (cl != start); if (likely(skb != NULL)) { + bstats_update(&cl->bstats, skb); cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); if (cl->un.leaf.deficit[level] < 0) { cl->un.leaf.deficit[level] += cl->quantum; @@ -1051,7 +1049,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); + if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt)) + goto nla_put_failure; nla_nest_end(skb, nest); spin_unlock_bh(root_lock); @@ -1090,7 +1089,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, opt.quantum = cl->quantum; opt.prio = cl->prio; opt.level = cl->level; - NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; nla_nest_end(skb, nest); spin_unlock_bh(root_lock); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 28de4309233..d1831ca966d 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.offset[i] = dev->tc_to_txq[i].offset; } - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 49131d7a744..2a2b096d9a6 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) opt.bands = q->bands; opt.max_bands = q->max_bands; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index ebd22966f74..a2a95aabf9c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -26,6 +26,7 @@ #include <net/netlink.h> #include <net/pkt_sched.h> +#include <net/inet_ecn.h> #define VERSION "1.3" @@ -78,6 +79,7 @@ struct netem_sched_data { psched_tdiff_t jitter; u32 loss; + u32 ecn; u32 limit; u32 counter; u32 gap; @@ -374,9 +376,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ++count; /* Drop packet? */ - if (loss_event(q)) - --count; - + if (loss_event(q)) { + if (q->ecn && INET_ECN_set_ce(skb)) + sch->qstats.drops++; /* mark packet */ + else + --count; + } if (count == 0) { sch->qstats.drops++; kfree_skb(skb); @@ -704,6 +709,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, + [TCA_NETEM_ECN] = { .type = NLA_U32 }, }; static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, @@ -774,6 +780,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_NETEM_RATE]) get_rate(sch, tb[TCA_NETEM_RATE]); + if (tb[TCA_NETEM_ECN]) + q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); + q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); @@ -832,7 +841,8 @@ static int dump_loss_model(const struct netem_sched_data *q, .p23 = q->clg.a5, }; - NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); + if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) + goto nla_put_failure; break; } case CLG_GILB_ELL: { @@ -843,7 +853,8 @@ static int dump_loss_model(const struct netem_sched_data *q, .k1 = q->clg.a4, }; - NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); + if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) + goto nla_put_failure; break; } } @@ -872,26 +883,34 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; - NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); + if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) + goto nla_put_failure; cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; - NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); + if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) + goto nla_put_failure; reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; - NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); + if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) + goto nla_put_failure; corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; - NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); + if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) + goto nla_put_failure; rate.rate = q->rate; rate.packet_overhead = q->packet_overhead; rate.cell_size = q->cell_size; rate.cell_overhead = q->cell_overhead; - NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); + if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) + goto nla_put_failure; + + if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) + goto nla_put_failure; if (dump_loss_model(q, skb) != 0) goto nla_put_failure; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b5d56a22b1d..79359b69ad8 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.bands = q->bands; memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e68cb440756..9af01f3df18 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w); - NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax); + if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) || + nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a5cc3012cf4..633e32defdc 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P); + if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || + nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index d7eea99333e..74305c883bd 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start(skb, TCA_OPTIONS); - NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 02a21abea65..d3a1bc26dbf 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); opt.flags = q->flags; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b8e156319d7..4b056c15e90 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = q->mtu; opt.buffer = q->buffer; - NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 45326599fda..ca0c29695d5 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } - kfree_skb(skb); - sch->qstats.drops++; - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); } static struct sk_buff * diff --git a/net/sctp/associola.c b/net/sctp/associola.c index acd2edbc073..5bc9ab161b3 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1408,7 +1408,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc) } /* Increase asoc's rwnd by len and send any window update SACK if needed. */ -void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) +void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) { struct sctp_chunk *sack; struct timer_list *timer; @@ -1465,7 +1465,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) } /* Decrease asoc's rwnd by len. */ -void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) +void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) { int rx_count; int over = 0; diff --git a/net/sctp/input.c b/net/sctp/input.c index 80f71af7138..80564fe0302 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - if (sk_add_backlog(sk, skb)) + if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) sctp_chunk_free(chunk); else backloged = 1; @@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) struct sctp_ep_common *rcvr = chunk->rcvr; int ret; - ret = sk_add_backlog(sk, skb); + ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); if (!ret) { /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear diff --git a/net/sctp/output.c b/net/sctp/output.c index 8fc4dcd294a..f1b7d4bb591 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -661,8 +661,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, */ if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && inflight && sctp_state(asoc, ESTABLISHED)) { - unsigned max = transport->pathmtu - packet->overhead; - unsigned len = chunk->skb->len + q->out_qlen; + unsigned int max = transport->pathmtu - packet->overhead; + unsigned int len = chunk->skb->len + q->out_qlen; /* Check whether this chunk and all the rest of pending * data will fit or delay in hopes of bundling a full diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index cfeb1d4a1ee..a0fa19f5650 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1147,7 +1147,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) __u32 sack_ctsn, ctsn, tsn; __u32 highest_tsn, highest_new_tsn; __u32 sack_a_rwnd; - unsigned outstanding; + unsigned int outstanding; struct sctp_transport *primary = asoc->peer.primary_path; int count_of_newacks = 0; int gap_ack_blocks; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 1ff51c9d18d..c96d1a81cf4 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -524,7 +524,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, /* Worker routine to handle INIT command failure. */ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, - unsigned error) + unsigned int error) { struct sctp_ulpevent *event; @@ -550,7 +550,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_chunk *chunk, - unsigned error) + unsigned int error) { struct sctp_ulpevent *event; @@ -1161,9 +1161,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, break; case SCTP_DISPOSITION_VIOLATION: - if (net_ratelimit()) - pr_err("protocol violation state %d chunkid %d\n", - state, subtype.chunk); + net_err_ratelimited("protocol violation state %d chunkid %d\n", + state, subtype.chunk); break; case SCTP_DISPOSITION_NOT_IMPL: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 891f5db8cc3..9fca1035735 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1129,17 +1129,15 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, /* This should never happen, but lets log it if so. */ if (unlikely(!link)) { if (from_addr.sa.sa_family == AF_INET6) { - if (net_ratelimit()) - pr_warn("%s association %p could not find address %pI6\n", - __func__, - asoc, - &from_addr.v6.sin6_addr); + net_warn_ratelimited("%s association %p could not find address %pI6\n", + __func__, + asoc, + &from_addr.v6.sin6_addr); } else { - if (net_ratelimit()) - pr_warn("%s association %p could not find address %pI4\n", - __func__, - asoc, - &from_addr.v4.sin_addr.s_addr); + net_warn_ratelimited("%s association %p could not find address %pI4\n", + __func__, + asoc, + &from_addr.v4.sin_addr.s_addr); } return SCTP_DISPOSITION_DISCARD; } @@ -2410,7 +2408,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - unsigned len; + unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; /* See if we have an error cause code in the chunk. */ @@ -2446,7 +2444,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - unsigned len; + unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; if (!sctp_vtag_verify_either(chunk, asoc)) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 92ba71dfe08..b3b8a8d813e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5840,10 +5840,8 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { - if (net_ratelimit()) { - pr_info("failed to load transform for %s: %ld\n", - sctp_hmac_alg, PTR_ERR(tfm)); - } + net_info_ratelimited("failed to load transform for %s: %ld\n", + sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 60ffbd067ff..e5fe639c89e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -275,22 +275,16 @@ static ctl_table sctp_table[] = { { /* sentinel */ } }; -static struct ctl_path sctp_path[] = { - { .procname = "net", }, - { .procname = "sctp", }, - { } -}; - static struct ctl_table_header * sctp_sysctl_header; /* Sysctl registration. */ void sctp_sysctl_register(void) { - sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table); + sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table); } /* Sysctl deregistration. */ void sctp_sysctl_unregister(void) { - unregister_sysctl_table(sctp_sysctl_header); + unregister_net_sysctl_table(sctp_sysctl_header); } diff --git a/net/socket.c b/net/socket.c index 851edcd6b09..2a2898ce596 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1234,8 +1234,7 @@ int __sock_create(struct net *net, int family, int type, int protocol, */ sock = sock_alloc(); if (!sock) { - if (net_ratelimit()) - printk(KERN_WARNING "socket: no more sockets\n"); + net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } @@ -1479,7 +1478,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; - if ((unsigned)backlog > somaxconn) + if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); @@ -1691,7 +1690,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, - unsigned, flags, struct sockaddr __user *, addr, + unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; @@ -1738,7 +1737,7 @@ out: */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, - unsigned, flags) + unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } @@ -1750,7 +1749,7 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, - unsigned, flags, struct sockaddr __user *, addr, + unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; @@ -1795,7 +1794,7 @@ out: */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, - unsigned flags) + unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } @@ -1897,7 +1896,7 @@ struct used_address { }; static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, + struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = @@ -1908,7 +1907,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; - int err, ctl_len, iov_size, total_len; + int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { @@ -1917,16 +1916,13 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; - /* do not move before msg_sys is valid */ - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - - /* Check whether to allocate the iovec area */ - err = -ENOMEM; - iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { - iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + err = -EMSGSIZE; + if (msg_sys->msg_iovlen > UIO_MAXIOV) + goto out; + err = -ENOMEM; + iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), + GFP_KERNEL); if (!iov) goto out; } @@ -2005,7 +2001,7 @@ out_freectl: sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) - sock_kfree_s(sock->sk, iov, iov_size); + kfree(iov); out: return err; } @@ -2014,7 +2010,7 @@ out: * BSD sendmsg interface */ -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) +SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2096,14 +2092,14 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, int nosec) + struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; - int err, iov_size, total_len, len; + int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; @@ -2118,15 +2114,13 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - - /* Check whether to allocate the iovec area */ - err = -ENOMEM; - iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { - iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + err = -EMSGSIZE; + if (msg_sys->msg_iovlen > UIO_MAXIOV) + goto out; + err = -ENOMEM; + iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), + GFP_KERNEL); if (!iov) goto out; } @@ -2180,7 +2174,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, out_freeiov: if (iov != iovstack) - sock_kfree_s(sock->sk, iov, iov_size); + kfree(iov); out: return err; } @@ -2524,6 +2518,12 @@ EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; + /* + * Initialize the network sysctl infrastructure. + */ + err = net_sysctl_init(); + if (err) + goto out; /* * Initialize sock SLAB cache. @@ -3223,7 +3223,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return -ENOIOCTLCMD; } -static long compat_sock_ioctl(struct file *file, unsigned cmd, +static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 8eff8c32d1b..d3611f11a8d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -624,7 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, ctx->seq_send = ctx->seq_send64; if (ctx->seq_send64 != ctx->seq_send) { dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, - (long unsigned)ctx->seq_send64, ctx->seq_send); + (unsigned long)ctx->seq_send64, ctx->seq_send); p = ERR_PTR(-EINVAL); goto out_err; } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index de0b0f39d9d..47ad2666fdf 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1273,7 +1273,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) __acquires(cd->hash_lock) { loff_t n = *pos; - unsigned hash, entry; + unsigned int hash, entry; struct cache_head *ch; struct cache_detail *cd = ((struct handle*)m->private)->cd; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 4153846984a..017c0117d15 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1041,23 +1041,21 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) * Printk the given error with the address of the client that caused it. */ static __printf(2, 3) -int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) +void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) { + struct va_format vaf; va_list args; - int r; char buf[RPC_MAX_ADDRBUFLEN]; - if (!net_ratelimit()) - return 0; + va_start(args, fmt); - printk(KERN_WARNING "svc: %s: ", - svc_print_addr(rqstp, buf, sizeof(buf))); + vaf.fmt = fmt; + vaf.va = &args; - va_start(args, fmt); - r = vprintk(fmt, args); - va_end(args); + net_warn_ratelimited("svc: %s: %pV", + svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); - return r; + va_end(args); } /* diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 4bda09d7e1a..b98ee351491 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -544,14 +544,11 @@ static void svc_check_conn_limits(struct svc_serv *serv) struct svc_xprt *xprt = NULL; spin_lock_bh(&serv->sv_lock); if (!list_empty(&serv->sv_tempsocks)) { - if (net_ratelimit()) { - /* Try to help the admin */ - printk(KERN_NOTICE "%s: too many open " - "connections, consider increasing %s\n", - serv->sv_name, serv->sv_maxconn ? - "the max number of connections." : - "the number of threads."); - } + /* Try to help the admin */ + net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", + serv->sv_name, serv->sv_maxconn ? + "max number of connections" : + "number of threads"); /* * Always select the oldest connection. It's not fair, * but so is life diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 824d32fb312..a6de09de5d2 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -617,11 +617,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) rqstp->rq_prot = IPPROTO_UDP; if (!svc_udp_get_dest_address(rqstp, cmh)) { - if (net_ratelimit()) - printk(KERN_WARNING - "svc: received unknown control message %d/%d; " - "dropping RPC reply datagram\n", - cmh->cmsg_level, cmh->cmsg_type); + net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", + cmh->cmsg_level, cmh->cmsg_type); skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } @@ -871,18 +868,17 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) if (err == -ENOMEM) printk(KERN_WARNING "%s: no more sockets!\n", serv->sv_name); - else if (err != -EAGAIN && net_ratelimit()) - printk(KERN_WARNING "%s: accept failed (err %d)!\n", - serv->sv_name, -err); + else if (err != -EAGAIN) + net_warn_ratelimited("%s: accept failed (err %d)!\n", + serv->sv_name, -err); return NULL; } set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin, &slen); if (err < 0) { - if (net_ratelimit()) - printk(KERN_WARNING "%s: peername failed (err %d)!\n", - serv->sv_name, -err); + net_warn_ratelimited("%s: peername failed (err %d)!\n", + serv->sv_name, -err); goto failed; /* aborted connection or whatever */ } @@ -1012,19 +1008,15 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) * bit set in the fragment length header. * But apparently no known nfs clients send fragmented * records. */ - if (net_ratelimit()) - printk(KERN_NOTICE "RPC: multiple fragments " - "per record not supported\n"); + net_notice_ratelimited("RPC: multiple fragments per record not supported\n"); goto err_delete; } svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); if (svsk->sk_reclen > serv->sv_max_mesg) { - if (net_ratelimit()) - printk(KERN_NOTICE "RPC: " - "fragment too large: 0x%08lx\n", - (unsigned long)svsk->sk_reclen); + net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n", + (unsigned long)svsk->sk_reclen); goto err_delete; } } @@ -1556,7 +1548,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, (char *)&val, sizeof(val)); if (type == SOCK_STREAM) - sock->sk->sk_reuse = 1; /* allow address reuse */ + sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */ error = kernel_bind(sock, sin, len); if (error < 0) goto bummer; diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c index dd824341c34..08881d0c967 100644 --- a/net/sunrpc/timer.c +++ b/net/sunrpc/timer.c @@ -34,7 +34,7 @@ void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) { unsigned long init = 0; - unsigned i; + unsigned int i; rt->timeo = timeo; @@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(rpc_init_rtt); * NB: When computing the smoothed RTT and standard deviation, * be careful not to produce negative intermediate results. */ -void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) +void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m) { long *srtt, *sdrtt; @@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(rpc_update_rtt); * read, write, commit - A+4D * other - timeo */ -unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer) +unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer) { unsigned long res; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index b97a3dd9a60..fddcccfcdf7 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1204,7 +1204,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data) { int i, ret = 0; - unsigned page_len, thislen, page_offset; + unsigned int page_len, thislen, page_offset; struct scatterlist sg[1]; sg_init_table(sg, 1); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 0cbcd1ab49a..6fe2dcead15 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -783,7 +783,7 @@ static void xprt_update_rtt(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; - unsigned timer = task->tk_msg.rpc_proc->p_timer; + unsigned int timer = task->tk_msg.rpc_proc->p_timer; long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); if (timer) { diff --git a/net/sysctl_net.c b/net/sysctl_net.c index c3e65aebecc..e3a6e37cd1c 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -26,10 +26,6 @@ #include <linux/if_ether.h> #endif -#ifdef CONFIG_TR -#include <linux/if_tr.h> -#endif - static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces) { @@ -59,19 +55,6 @@ static struct ctl_table_root net_sysctl_root = { .permissions = net_ctl_permissions, }; -static int net_ctl_ro_header_perms(struct ctl_table_root *root, - struct nsproxy *namespaces, struct ctl_table *table) -{ - if (net_eq(namespaces->net_ns, &init_net)) - return table->mode; - else - return table->mode & ~0222; -} - -static struct ctl_table_root net_sysctl_ro_root = { - .permissions = net_ctl_ro_header_perms, -}; - static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); @@ -88,34 +71,32 @@ static struct pernet_operations sysctl_pernet_ops = { .exit = sysctl_net_exit, }; -static __init int net_sysctl_init(void) +static struct ctl_table_header *net_header; +__init int net_sysctl_init(void) { - int ret; + static struct ctl_table empty[1]; + int ret = -ENOMEM; + /* Avoid limitations in the sysctl implementation by + * registering "/proc/sys/net" as an empty directory not in a + * network namespace. + */ + net_header = register_sysctl("net", empty); + if (!net_header) + goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out; - setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL); - register_sysctl_root(&net_sysctl_ro_root); register_sysctl_root(&net_sysctl_root); out: return ret; } -subsys_initcall(net_sysctl_init); - -struct ctl_table_header *register_net_sysctl_table(struct net *net, - const struct ctl_path *path, struct ctl_table *table) -{ - return __register_sysctl_paths(&net->sysctls, path, table); -} -EXPORT_SYMBOL_GPL(register_net_sysctl_table); -struct ctl_table_header *register_net_sysctl_rotable(const - struct ctl_path *path, struct ctl_table *table) +struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table) { - return __register_sysctl_paths(&net_sysctl_ro_root.default_set, - path, table); + return __register_sysctl_table(&net->sysctls, path, table); } -EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); +EXPORT_SYMBOL_GPL(register_net_sysctl); void unregister_net_sysctl_table(struct ctl_table_header *header) { diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 521d24d04ab..6cd55d671d3 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -9,5 +9,3 @@ tipc-y += addr.o bcast.o bearer.o config.o \ name_distr.o subscr.o name_table.o net.o \ netlink.o node.o node_subscr.o port.o ref.o \ socket.o log.o eth_media.o - -# End of file diff --git a/net/tipc/addr.c b/net/tipc/addr.c index a6fdab33877..357b74b26f9 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -45,7 +45,6 @@ * * Returns 1 if domain address is valid, otherwise 0 */ - int tipc_addr_domain_valid(u32 addr) { u32 n = tipc_node(addr); @@ -66,7 +65,6 @@ int tipc_addr_domain_valid(u32 addr) * * Returns 1 if address can be used, otherwise 0 */ - int tipc_addr_node_valid(u32 addr) { return tipc_addr_domain_valid(addr) && tipc_node(addr); @@ -86,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr) /** * tipc_addr_scope - convert message lookup domain to a 2-bit scope value */ - int tipc_addr_scope(u32 domain) { if (likely(!domain)) diff --git a/net/tipc/addr.h b/net/tipc/addr.h index e4f35afe320..60b00ab93d7 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h @@ -50,18 +50,33 @@ static inline u32 tipc_cluster_mask(u32 addr) return addr & TIPC_CLUSTER_MASK; } -static inline int in_own_cluster(u32 addr) +static inline int in_own_cluster_exact(u32 addr) { return !((addr ^ tipc_own_addr) >> 12); } /** + * in_own_node - test for node inclusion; <0.0.0> always matches + */ +static inline int in_own_node(u32 addr) +{ + return (addr == tipc_own_addr) || !addr; +} + +/** + * in_own_cluster - test for cluster inclusion; <0.0.0> always matches + */ +static inline int in_own_cluster(u32 addr) +{ + return in_own_cluster_exact(addr) || !addr; +} + +/** * addr_domain - convert 2-bit scope value to equivalent message lookup domain * * Needed when address of a named message must be looked up a second time * after a network hop. */ - static inline u32 addr_domain(u32 sc) { if (likely(sc == TIPC_NODE_SCOPE)) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e00441a2092..2625f5ebe3e 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -73,7 +73,6 @@ struct tipc_bcbearer_pair { * large local variables within multicast routines. Concurrent access is * prevented through use of the spinlock "bc_lock". */ - struct tipc_bcbearer { struct tipc_bearer bearer; struct tipc_media media; @@ -92,7 +91,6 @@ struct tipc_bcbearer { * * Handles sequence numbering, fragmentation, bundling, etc. */ - struct tipc_bclink { struct tipc_link link; struct tipc_node node; @@ -169,7 +167,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) * * Called with bc_lock locked */ - struct tipc_node *tipc_bclink_retransmit_to(void) { return bclink->retransmit_to; @@ -182,7 +179,6 @@ struct tipc_node *tipc_bclink_retransmit_to(void) * * Called with bc_lock locked */ - static void bclink_retransmit_pkt(u32 after, u32 to) { struct sk_buff *buf; @@ -200,7 +196,6 @@ static void bclink_retransmit_pkt(u32 after, u32 to) * * Node is locked, bc_lock unlocked. */ - void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { struct sk_buff *crs; @@ -280,7 +275,6 @@ exit: * * tipc_net_lock and node lock set */ - void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) { struct sk_buff *buf; @@ -344,7 +338,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) * * Only tipc_net_lock set. */ - static void bclink_peek_nack(struct tipc_msg *msg) { struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); @@ -365,7 +358,6 @@ static void bclink_peek_nack(struct tipc_msg *msg) /* * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster */ - int tipc_bclink_send_msg(struct sk_buff *buf) { int res; @@ -394,7 +386,6 @@ exit: * * Called with both sending node's lock and bc_lock taken. */ - static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) { bclink_update_last_sent(node, seqno); @@ -420,7 +411,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) * * tipc_net_lock is read_locked, no other locks set */ - void tipc_bclink_recv_pkt(struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); @@ -588,7 +578,6 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) * Returns 0 (packet sent successfully) under all circumstances, * since the broadcast link's pseudo-bearer never blocks */ - static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, struct tipc_media_addr *unused2) @@ -601,7 +590,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, * preparation is skipped for broadcast link protocol messages * since they are sent in an unreliable manner and don't need it */ - if (likely(!msg_non_seq(buf_msg(buf)))) { struct tipc_msg *msg; @@ -618,7 +606,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, } /* Send buffer over bearers until all targets reached */ - bcbearer->remains = bclink->bcast_nodes; for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { @@ -660,7 +647,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, /** * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer */ - void tipc_bcbearer_sort(void) { struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; @@ -671,7 +657,6 @@ void tipc_bcbearer_sort(void) spin_lock_bh(&bc_lock); /* Group bearers by priority (can assume max of two per priority) */ - memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); for (b_index = 0; b_index < MAX_BEARERS; b_index++) { @@ -687,7 +672,6 @@ void tipc_bcbearer_sort(void) } /* Create array of bearer pairs for broadcasting */ - bp_curr = bcbearer->bpairs; memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); @@ -817,7 +801,6 @@ void tipc_bclink_stop(void) /** * tipc_nmap_add - add a node to a node map */ - void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); @@ -833,7 +816,6 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) /** * tipc_nmap_remove - remove a node from a node map */ - void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); @@ -852,7 +834,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) * @nm_b: input node map B * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) */ - static void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, struct tipc_node_map *nm_diff) @@ -878,7 +859,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, /** * tipc_port_list_add - add a port to a port list, ensuring no duplicates */ - void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) { struct tipc_port_list *item = pl_ptr; @@ -912,7 +892,6 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) * tipc_port_list_free - free dynamically created entries in port_list chain * */ - void tipc_port_list_free(struct tipc_port_list *pl_ptr) { struct tipc_port_list *item; @@ -923,4 +902,3 @@ void tipc_port_list_free(struct tipc_port_list *pl_ptr) kfree(item); } } - diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 5571394098f..a93306557e0 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -45,7 +45,6 @@ * @count: # of nodes in set * @map: bitmap of node identifiers that are in the set */ - struct tipc_node_map { u32 count; u32 map[MAX_NODES / WSIZE]; @@ -59,7 +58,6 @@ struct tipc_node_map { * @next: pointer to next entry in list * @ports: array of port references */ - struct tipc_port_list { int count; struct tipc_port_list *next; @@ -77,7 +75,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); /** * tipc_nmap_equal - test for equality of node maps */ - static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) { return !memcmp(nm_a, nm_b, sizeof(*nm_a)); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 5dfd89c4042..a297e3a2e3e 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -53,7 +53,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr); * * Returns 1 if media name is valid, otherwise 0. */ - static int media_name_valid(const char *name) { u32 len; @@ -67,7 +66,6 @@ static int media_name_valid(const char *name) /** * tipc_media_find - locates specified media object by name */ - struct tipc_media *tipc_media_find(const char *name) { u32 i; @@ -82,7 +80,6 @@ struct tipc_media *tipc_media_find(const char *name) /** * media_find_id - locates specified media object by type identifier */ - static struct tipc_media *media_find_id(u8 type) { u32 i; @@ -99,7 +96,6 @@ static struct tipc_media *media_find_id(u8 type) * * Bearers for this media type must be activated separately at a later stage. */ - int tipc_register_media(struct tipc_media *m_ptr) { int res = -EINVAL; @@ -134,7 +130,6 @@ exit: /** * tipc_media_addr_printf - record media address in print buffer */ - void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) { char addr_str[MAX_ADDR_STR]; @@ -156,7 +151,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) /** * tipc_media_get_names - record names of registered media in buffer */ - struct sk_buff *tipc_media_get_names(void) { struct sk_buff *buf; @@ -183,7 +177,6 @@ struct sk_buff *tipc_media_get_names(void) * * Returns 1 if bearer name is valid, otherwise 0. */ - static int bearer_name_validate(const char *name, struct tipc_bearer_names *name_parts) { @@ -194,7 +187,6 @@ static int bearer_name_validate(const char *name, u32 if_len; /* copy bearer name & ensure length is OK */ - name_copy[TIPC_MAX_BEARER_NAME - 1] = 0; /* need above in case non-Posix strncpy() doesn't pad with nulls */ strncpy(name_copy, name, TIPC_MAX_BEARER_NAME); @@ -202,7 +194,6 @@ static int bearer_name_validate(const char *name, return 0; /* ensure all component parts of bearer name are present */ - media_name = name_copy; if_name = strchr(media_name, ':'); if (if_name == NULL) @@ -212,7 +203,6 @@ static int bearer_name_validate(const char *name, if_len = strlen(if_name) + 1; /* validate component parts of bearer name */ - if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || (strspn(media_name, tipc_alphabet) != (media_len - 1)) || @@ -220,7 +210,6 @@ static int bearer_name_validate(const char *name, return 0; /* return bearer name components, if necessary */ - if (name_parts) { strcpy(name_parts->media_name, media_name); strcpy(name_parts->if_name, if_name); @@ -231,7 +220,6 @@ static int bearer_name_validate(const char *name, /** * tipc_bearer_find - locates bearer object with matching bearer name */ - struct tipc_bearer *tipc_bearer_find(const char *name) { struct tipc_bearer *b_ptr; @@ -247,7 +235,6 @@ struct tipc_bearer *tipc_bearer_find(const char *name) /** * tipc_bearer_find_interface - locates bearer object with matching interface name */ - struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) { struct tipc_bearer *b_ptr; @@ -267,7 +254,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) /** * tipc_bearer_get_names - record names of bearers in buffer */ - struct sk_buff *tipc_bearer_get_names(void) { struct sk_buff *buf; @@ -363,7 +349,6 @@ void tipc_continue(struct tipc_bearer *b_ptr) * the bearer is congested. 'tipc_net_lock' is in read_lock here * bearer.lock is busy */ - static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { @@ -377,7 +362,6 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, * the bearer is congested. 'tipc_net_lock' is in read_lock here, * bearer.lock is free */ - void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { spin_lock_bh(&b_ptr->lock); @@ -410,7 +394,6 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, /** * tipc_bearer_congested - determines if bearer is currently congested */ - int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { if (unlikely(b_ptr->blocked)) @@ -423,7 +406,6 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) /** * tipc_enable_bearer - enable bearer with the given name */ - int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) { struct tipc_bearer *b_ptr; @@ -449,7 +431,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) if (tipc_in_scope(disc_domain, tipc_own_addr)) { disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; res = 0; /* accept any node in own cluster */ - } else if (in_own_cluster(disc_domain)) + } else if (in_own_cluster_exact(disc_domain)) res = 0; /* accept specified node in own cluster */ } if (res) { @@ -541,7 +523,6 @@ exit: * tipc_block_bearer(): Block the bearer with the given name, * and reset all its links */ - int tipc_block_bearer(const char *name) { struct tipc_bearer *b_ptr = NULL; @@ -573,11 +554,10 @@ int tipc_block_bearer(const char *name) } /** - * bearer_disable - + * bearer_disable * * Note: This routine assumes caller holds tipc_net_lock. */ - static void bearer_disable(struct tipc_bearer *b_ptr) { struct tipc_link *l_ptr; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index d3eac56b8c2..e3b2be37fb3 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -49,7 +49,6 @@ * - media type identifier located at offset 3 * - remaining bytes vary according to media type */ - #define TIPC_MEDIA_ADDR_SIZE 20 #define TIPC_MEDIA_TYPE_OFFSET 3 @@ -64,7 +63,6 @@ * @media_id: TIPC media type identifier * @broadcast: non-zero if address is a broadcast address */ - struct tipc_media_addr { u8 value[TIPC_MEDIA_ADDR_SIZE]; u8 media_id; @@ -89,7 +87,6 @@ struct tipc_bearer; * @type_id: TIPC media identifier * @name: media name */ - struct tipc_media { int (*send_msg)(struct sk_buff *buf, struct tipc_bearer *b_ptr, @@ -216,7 +213,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr); * send routine always returns success -- even if the buffer was not sent -- * and let TIPC's link code deal with the undelivered message. */ - static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, struct sk_buff *buf, struct tipc_media_addr *dest) diff --git a/net/tipc/config.c b/net/tipc/config.c index f76d3b15e4e..c5712a34381 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -131,7 +131,6 @@ static struct sk_buff *tipc_show_stats(void) tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); /* Use additional tipc_printf()'s to return more info ... */ - str_len = tipc_printbuf_validate(&pb); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); @@ -191,7 +190,6 @@ static struct sk_buff *cfg_set_own_addr(void) * configuration commands can't be received until a local configuration * command to enable the first bearer is received and processed. */ - spin_unlock_bh(&config_lock); tipc_core_start_net(addr); spin_lock_bh(&config_lock); @@ -283,14 +281,12 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area spin_lock_bh(&config_lock); /* Save request and reply details in a well-known location */ - req_tlv_area = request_area; req_tlv_space = request_space; rep_headroom = reply_headroom; /* Check command authorization */ - - if (likely(orig_node == tipc_own_addr)) { + if (likely(in_own_node(orig_node))) { /* command is permitted */ } else if (cmd >= 0x8000) { rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED @@ -310,7 +306,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area } /* Call appropriate processing routine */ - switch (cmd) { case TIPC_CMD_NOOP: rep_tlv_buf = tipc_cfg_reply_none(); @@ -433,7 +428,6 @@ static void cfg_named_msg_event(void *userdata, struct sk_buff *rep_buf; /* Validate configuration message header (ignore invalid message) */ - req_hdr = (struct tipc_cfg_msg_hdr *)msg; if ((size < sizeof(*req_hdr)) || (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || @@ -443,7 +437,6 @@ static void cfg_named_msg_event(void *userdata, } /* Generate reply for request (if can't, return request) */ - rep_buf = tipc_cfg_do_cmd(orig->node, ntohs(req_hdr->tcm_type), msg + sizeof(*req_hdr), @@ -489,10 +482,23 @@ failed: return res; } +void tipc_cfg_reinit(void) +{ + struct tipc_name_seq seq; + int res; + + seq.type = TIPC_CFG_SRV; + seq.lower = seq.upper = 0; + tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq); + + seq.lower = seq.upper = tipc_own_addr; + res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq); + if (res) + err("Unable to reinitialize configuration service\n"); +} + void tipc_cfg_stop(void) { - if (config_port_ref) { - tipc_deleteport(config_port_ref); - config_port_ref = 0; - } + tipc_deleteport(config_port_ref); + config_port_ref = 0; } diff --git a/net/tipc/config.h b/net/tipc/config.h index 80da6ebc278..1f252f3fa05 100644 --- a/net/tipc/config.h +++ b/net/tipc/config.h @@ -66,6 +66,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, int headroom); int tipc_cfg_init(void); +void tipc_cfg_reinit(void); void tipc_cfg_stop(void); #endif diff --git a/net/tipc/core.c b/net/tipc/core.c index 68eba03e795..f7b95239ebd 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -52,14 +52,12 @@ #endif /* global variables used by multiple sub-systems within TIPC */ - int tipc_random; const char tipc_alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_."; /* configurable TIPC parameters */ - u32 tipc_own_addr; int tipc_max_ports; int tipc_max_subscriptions; @@ -77,7 +75,6 @@ int tipc_remote_management; * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ - struct sk_buff *tipc_buf_acquire(u32 size) { struct sk_buff *skb; @@ -95,7 +92,6 @@ struct sk_buff *tipc_buf_acquire(u32 size) /** * tipc_core_stop_net - shut down TIPC networking sub-systems */ - static void tipc_core_stop_net(void) { tipc_net_stop(); @@ -105,7 +101,6 @@ static void tipc_core_stop_net(void) /** * start_net - start TIPC networking sub-systems */ - int tipc_core_start_net(unsigned long addr) { int res; @@ -121,7 +116,6 @@ int tipc_core_start_net(unsigned long addr) /** * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode */ - static void tipc_core_stop(void) { tipc_netlink_stop(); @@ -137,7 +131,6 @@ static void tipc_core_stop(void) /** * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode */ - static int tipc_core_start(void) { int res; @@ -150,9 +143,9 @@ static int tipc_core_start(void) if (!res) res = tipc_nametbl_init(); if (!res) - res = tipc_k_signal((Handler)tipc_subscr_start, 0); + res = tipc_subscr_start(); if (!res) - res = tipc_k_signal((Handler)tipc_cfg_init, 0); + res = tipc_cfg_init(); if (!res) res = tipc_netlink_start(); if (!res) diff --git a/net/tipc/core.h b/net/tipc/core.h index 13837e0e56b..2a9bb99537b 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -85,7 +85,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...); /* * TIPC_OUTPUT is the destination print buffer for system messages. */ - #ifndef TIPC_OUTPUT #define TIPC_OUTPUT TIPC_LOG #endif @@ -102,7 +101,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...); /* * DBG_OUTPUT is the destination print buffer for debug messages. */ - #ifndef DBG_OUTPUT #define DBG_OUTPUT TIPC_LOG #endif @@ -126,13 +124,11 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *); /* * TIPC-specific error codes */ - #define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ /* * Global configuration variables */ - extern u32 tipc_own_addr; extern int tipc_max_ports; extern int tipc_max_subscriptions; @@ -143,7 +139,6 @@ extern int tipc_remote_management; /* * Other global variables */ - extern int tipc_random; extern const char tipc_alphabet[]; @@ -151,7 +146,6 @@ extern const char tipc_alphabet[]; /* * Routines available to privileged subsystems */ - extern int tipc_core_start_net(unsigned long); extern int tipc_handler_start(void); extern void tipc_handler_stop(void); @@ -163,7 +157,6 @@ extern void tipc_socket_stop(void); /* * TIPC timer and signal code */ - typedef void (*Handler) (unsigned long); u32 tipc_k_signal(Handler routine, unsigned long argument); @@ -176,7 +169,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument); * * Timer must be initialized before use (and terminated when no longer needed). */ - static inline void k_init_timer(struct timer_list *timer, Handler routine, unsigned long argument) { @@ -196,7 +188,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine, * then an additional jiffy is added to account for the fact that * the starting time may be in the middle of the current jiffy. */ - static inline void k_start_timer(struct timer_list *timer, unsigned long msec) { mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1); @@ -212,7 +203,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec) * WARNING: Must not be called when holding locks required by the timer's * timeout routine, otherwise deadlock can occur on SMP systems! */ - static inline void k_cancel_timer(struct timer_list *timer) { del_timer_sync(timer); @@ -229,12 +219,10 @@ static inline void k_cancel_timer(struct timer_list *timer) * (Do not "enhance" this routine to automatically cancel an active timer, * otherwise deadlock can arise when a timeout routine calls k_term_timer.) */ - static inline void k_term_timer(struct timer_list *timer) { } - /* * TIPC message buffer code * @@ -244,7 +232,6 @@ static inline void k_term_timer(struct timer_list *timer) * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields * are word aligned for quicker access */ - #define BUF_HEADROOM LL_MAX_HEADER struct tipc_skb_cb { @@ -253,7 +240,6 @@ struct tipc_skb_cb { #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) - static inline struct tipc_msg *buf_msg(struct sk_buff *skb) { return (struct tipc_msg *)skb->data; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index c630a21b2be..ae054cfe179 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -70,7 +70,6 @@ struct tipc_link_req { * @dest_domain: network domain of node(s) which should respond to message * @b_ptr: ptr to bearer issuing message */ - static struct sk_buff *tipc_disc_init_msg(u32 type, u32 dest_domain, struct tipc_bearer *b_ptr) @@ -96,7 +95,6 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, * @node_addr: duplicated node address * @media_addr: media address advertised by duplicated node */ - static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, struct tipc_media_addr *media_addr) { @@ -117,7 +115,6 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, * @buf: buffer containing message * @b_ptr: bearer that message arrived on */ - void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) { struct tipc_node *n_ptr; @@ -221,7 +218,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) * the new media address and reset the link to ensure it starts up * cleanly. */ - if (addr_mismatch) { if (tipc_link_is_up(link)) { disc_dupl_alert(b_ptr, orig, &media_addr); @@ -264,7 +260,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) * Reinitiates discovery process if discovery object has no associated nodes * and is either not currently searching or is searching at a slow rate */ - static void disc_update(struct tipc_link_req *req) { if (!req->num_nodes) { @@ -280,7 +275,6 @@ static void disc_update(struct tipc_link_req *req) * tipc_disc_add_dest - increment set of discovered nodes * @req: ptr to link request structure */ - void tipc_disc_add_dest(struct tipc_link_req *req) { req->num_nodes++; @@ -290,7 +284,6 @@ void tipc_disc_add_dest(struct tipc_link_req *req) * tipc_disc_remove_dest - decrement set of discovered nodes * @req: ptr to link request structure */ - void tipc_disc_remove_dest(struct tipc_link_req *req) { req->num_nodes--; @@ -301,7 +294,6 @@ void tipc_disc_remove_dest(struct tipc_link_req *req) * disc_send_msg - send link setup request message * @req: ptr to link request structure */ - static void disc_send_msg(struct tipc_link_req *req) { if (!req->bearer->blocked) @@ -314,7 +306,6 @@ static void disc_send_msg(struct tipc_link_req *req) * * Called whenever a link setup request timer associated with a bearer expires. */ - static void disc_timeout(struct tipc_link_req *req) { int max_delay; @@ -322,7 +313,6 @@ static void disc_timeout(struct tipc_link_req *req) spin_lock_bh(&req->bearer->lock); /* Stop searching if only desired node has been found */ - if (tipc_node(req->domain) && req->num_nodes) { req->timer_intv = TIPC_LINK_REQ_INACTIVE; goto exit; @@ -335,7 +325,6 @@ static void disc_timeout(struct tipc_link_req *req) * hold at fast polling rate if don't have any associated nodes, * otherwise hold at slow polling rate */ - disc_send_msg(req); req->timer_intv *= 2; @@ -359,7 +348,6 @@ exit: * * Returns 0 if successful, otherwise -errno. */ - int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest, u32 dest_domain) { @@ -391,7 +379,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, * tipc_disc_delete - destroy object sending periodic link setup requests * @req: ptr to link request structure */ - void tipc_disc_delete(struct tipc_link_req *req) { k_cancel_timer(&req->timer); @@ -399,4 +386,3 @@ void tipc_disc_delete(struct tipc_link_req *req) kfree_skb(req->buf); kfree(req); } - diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 527e3f0e165..90ac9bfa7ab 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -48,7 +48,6 @@ * @tipc_packet_type: used in binding TIPC to Ethernet driver * @cleanup: work item used when disabling bearer */ - struct eth_bearer { struct tipc_bearer *bearer; struct net_device *dev; @@ -67,7 +66,6 @@ static struct notifier_block notifier; * Media-dependent "value" field stores MAC address in first 6 bytes * and zeroes out the remaining bytes. */ - static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) { memcpy(a->value, mac, ETH_ALEN); @@ -79,7 +77,6 @@ static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) /** * send_msg - send a TIPC message out over an Ethernet interface */ - static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, struct tipc_media_addr *dest) { @@ -115,7 +112,6 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, * ignores packets sent using Ethernet multicast, and traffic sent to other * nodes (which can happen if interface is running in promiscuous mode). */ - static int recv_msg(struct sk_buff *buf, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { @@ -140,7 +136,6 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, /** * enable_bearer - attach TIPC bearer to an Ethernet interface */ - static int enable_bearer(struct tipc_bearer *tb_ptr) { struct net_device *dev = NULL; @@ -151,7 +146,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) int pending_dev = 0; /* Find unused Ethernet bearer structure */ - while (eb_ptr->dev) { if (!eb_ptr->bearer) pending_dev++; @@ -160,7 +154,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) } /* Find device with specified name */ - read_lock(&dev_base_lock); for_each_netdev(&init_net, pdev) { if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { @@ -174,7 +167,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) return -ENODEV; /* Create Ethernet bearer for device */ - eb_ptr->dev = dev; eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); eb_ptr->tipc_packet_type.dev = dev; @@ -184,7 +176,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) dev_add_pack(&eb_ptr->tipc_packet_type); /* Associate TIPC bearer with Ethernet bearer */ - eb_ptr->bearer = tb_ptr; tb_ptr->usr_handle = (void *)eb_ptr; tb_ptr->mtu = dev->mtu; @@ -198,7 +189,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) * * This routine must be invoked from a work queue because it can sleep. */ - static void cleanup_bearer(struct work_struct *work) { struct eth_bearer *eb_ptr = @@ -216,7 +206,6 @@ static void cleanup_bearer(struct work_struct *work) * then get worker thread to complete bearer cleanup. (Can't do cleanup * here because cleanup code needs to sleep and caller holds spinlocks.) */ - static void disable_bearer(struct tipc_bearer *tb_ptr) { struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle; @@ -232,7 +221,6 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * Change the state of the Ethernet bearer (if any) associated with the * specified device. */ - static int recv_notification(struct notifier_block *nb, unsigned long evt, void *dv) { @@ -281,7 +269,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, /** * eth_addr2str - convert Ethernet address to string */ - static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) { if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ @@ -294,7 +281,6 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) /** * eth_str2addr - convert string to Ethernet address */ - static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) { char mac[ETH_ALEN]; @@ -314,7 +300,6 @@ static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) /** * eth_str2addr - convert Ethernet address format to message header format */ - static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) { memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); @@ -326,7 +311,6 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) /** * eth_str2addr - convert message header address format to Ethernet format */ - static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) { if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) @@ -339,7 +323,6 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) /* * Ethernet media registration info */ - static struct tipc_media eth_media_info = { .send_msg = send_msg, .enable_bearer = enable_bearer, @@ -363,7 +346,6 @@ static struct tipc_media eth_media_info = { * Register Ethernet media type with TIPC bearer code. Also register * with OS for notifications about device state changes. */ - int tipc_eth_media_start(void) { int res; @@ -386,7 +368,6 @@ int tipc_eth_media_start(void) /** * tipc_eth_media_stop - deactivate Ethernet bearer support */ - void tipc_eth_media_stop(void) { if (!eth_started) diff --git a/net/tipc/handler.c b/net/tipc/handler.c index 274c98e164b..9c6f22ff1c6 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -129,4 +129,3 @@ void tipc_handler_stop(void) kmem_cache_destroy(tipc_queue_item_cache); } - diff --git a/net/tipc/link.c b/net/tipc/link.c index b4b9b30167a..7a614f43549 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -45,13 +45,11 @@ /* * Out-of-range value for link session numbers */ - #define INVALID_SESSION 0x10000 /* * Link state events: */ - #define STARTING_EVT 856384768 /* link processing trigger */ #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ #define TIMEOUT_EVT 560817u /* link timer expired */ @@ -67,7 +65,6 @@ /* * State value stored in 'exp_msg_count' */ - #define START_CHANGEOVER 100000u /** @@ -77,7 +74,6 @@ * @addr_peer: network address of node at far end * @if_peer: name of interface at far end */ - struct tipc_link_name { u32 addr_local; char if_local[TIPC_MAX_IF_NAME]; @@ -105,7 +101,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); /* * Simple link routines */ - static unsigned int align(unsigned int i) { return (i + 3) & ~3u; @@ -143,7 +138,6 @@ static u32 link_last_sent(struct tipc_link *l_ptr) /* * Simple non-static link routines (i.e. referenced outside this file) */ - int tipc_link_is_up(struct tipc_link *l_ptr) { if (!l_ptr) @@ -164,7 +158,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr) * * Returns 1 if link name is valid, otherwise 0. */ - static int link_name_validate(const char *name, struct tipc_link_name *name_parts) { @@ -180,7 +173,6 @@ static int link_name_validate(const char *name, u32 if_peer_len; /* copy link name & ensure length is OK */ - name_copy[TIPC_MAX_LINK_NAME - 1] = 0; /* need above in case non-Posix strncpy() doesn't pad with nulls */ strncpy(name_copy, name, TIPC_MAX_LINK_NAME); @@ -188,7 +180,6 @@ static int link_name_validate(const char *name, return 0; /* ensure all component parts of link name are present */ - addr_local = name_copy; if_local = strchr(addr_local, ':'); if (if_local == NULL) @@ -206,7 +197,6 @@ static int link_name_validate(const char *name, if_peer_len = strlen(if_peer) + 1; /* validate component parts of link name */ - if ((sscanf(addr_local, "%u.%u.%u%c", &z_local, &c_local, &n_local, &dummy) != 3) || (sscanf(addr_peer, "%u.%u.%u%c", @@ -220,7 +210,6 @@ static int link_name_validate(const char *name, return 0; /* return link name components, if necessary */ - if (name_parts) { name_parts->addr_local = tipc_addr(z_local, c_local, n_local); strcpy(name_parts->if_local, if_local); @@ -239,13 +228,11 @@ static int link_name_validate(const char *name, * another thread because tipc_link_delete() always cancels the link timer before * tipc_node_delete() is called.) */ - static void link_timeout(struct tipc_link *l_ptr) { tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ - l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; l_ptr->stats.queue_sz_counts++; @@ -278,7 +265,6 @@ static void link_timeout(struct tipc_link *l_ptr) } /* do all other link processing performed on a periodic basis */ - link_check_defragm_bufs(l_ptr); link_state_event(l_ptr, TIMEOUT_EVT); @@ -302,7 +288,6 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time) * * Returns pointer to link. */ - struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, struct tipc_bearer *b_ptr, const struct tipc_media_addr *media_addr) @@ -383,7 +368,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, * This routine must not grab the node lock until after link timer cancellation * to avoid a potential deadlock situation. */ - void tipc_link_delete(struct tipc_link *l_ptr) { if (!l_ptr) { @@ -419,7 +403,6 @@ static void link_start(struct tipc_link *l_ptr) * Schedules port for renewed sending of messages after link congestion * has abated. */ - static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) { struct tipc_port *p_ptr; @@ -476,7 +459,6 @@ exit: * link_release_outqueue - purge link's outbound message queue * @l_ptr: pointer to link */ - static void link_release_outqueue(struct tipc_link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; @@ -495,7 +477,6 @@ static void link_release_outqueue(struct tipc_link *l_ptr) * tipc_link_reset_fragments - purge link's inbound message fragments queue * @l_ptr: pointer to link */ - void tipc_link_reset_fragments(struct tipc_link *l_ptr) { struct sk_buff *buf = l_ptr->defragm_buf; @@ -513,7 +494,6 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) * tipc_link_stop - purge all inbound and outbound messages associated with link * @l_ptr: pointer to link */ - void tipc_link_stop(struct tipc_link *l_ptr) { struct sk_buff *buf; @@ -569,7 +549,6 @@ void tipc_link_reset(struct tipc_link *l_ptr) } /* Clean up all queues: */ - link_release_outqueue(l_ptr); kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; @@ -611,8 +590,7 @@ static void link_activate(struct tipc_link *l_ptr) * @l_ptr: pointer to link * @event: state machine event to process */ - -static void link_state_event(struct tipc_link *l_ptr, unsigned event) +static void link_state_event(struct tipc_link *l_ptr, unsigned int event) { struct tipc_link *other; u32 cont_intv = l_ptr->continuity_interval; @@ -785,7 +763,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned event) * link_bundle_buf(): Append contents of a buffer to * the tail of an existing one. */ - static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, struct sk_buff *buf) @@ -860,7 +837,6 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, * inside TIPC when the 'fast path' in tipc_send_buf * has failed, and from link_send() */ - int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); @@ -872,7 +848,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) u32 max_packet = l_ptr->max_pkt; /* Match msg importance against queue limits: */ - if (unlikely(queue_size >= queue_limit)) { if (imp <= TIPC_CRITICAL_IMPORTANCE) { link_schedule_port(l_ptr, msg_origport(msg), size); @@ -888,12 +863,10 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Fragmentation needed ? */ - if (size > max_packet) return link_send_long_buf(l_ptr, buf); - /* Packet can be queued or sent: */ - + /* Packet can be queued or sent. */ if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && !link_congested(l_ptr))) { link_add_to_outqueue(l_ptr, buf, msg); @@ -907,13 +880,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } return dsz; } - /* Congestion: can message be bundled ?: */ - + /* Congestion: can message be bundled ? */ if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && (msg_user(msg) != MSG_FRAGMENTER)) { /* Try adding message to an existing bundle */ - if (l_ptr->next_out && link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); @@ -921,7 +892,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Try creating a new bundle */ - if (size <= max_packet * 2 / 3) { struct sk_buff *bundler = tipc_buf_acquire(max_packet); struct tipc_msg bundler_hdr; @@ -951,7 +921,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) * not been selected yet, and the the owner node is not locked * Called by TIPC internal users, e.g. the name distributor */ - int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) { struct tipc_link *l_ptr; @@ -984,7 +953,6 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) * small enough not to require fragmentation. * Called without any locks held. */ - void tipc_link_send_names(struct list_head *message_list, u32 dest) { struct tipc_node *n_ptr; @@ -1013,7 +981,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest) read_unlock_bh(&tipc_net_lock); /* discard the messages if they couldn't be sent */ - list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { list_del((struct list_head *)buf); kfree_skb(buf); @@ -1026,7 +993,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest) * inclusive total message length. Very time critical. * Link is locked. Returns user data length. */ - static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, u32 *used_max_pkt) { @@ -1111,7 +1077,6 @@ again: * Try building message using port's max_pkt hint. * (Must not hold any locks while building message.) */ - res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, sender->max_pkt, !sender->user_port, &buf); @@ -1131,12 +1096,10 @@ exit: } /* Exit if build request was invalid */ - if (unlikely(res < 0)) goto exit; /* Exit if link (or bearer) is congested */ - if (link_congested(l_ptr) || !list_empty(&l_ptr->b_ptr->cong_links)) { res = link_schedule_port(l_ptr, @@ -1148,7 +1111,6 @@ exit: * Message size exceeds max_pkt hint; update hint, * then re-try fast path or fragment the message */ - sender->max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); @@ -1166,7 +1128,6 @@ exit: read_unlock_bh(&tipc_net_lock); /* Couldn't find a link to the destination node */ - if (buf) return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); if (res >= 0) @@ -1220,15 +1181,13 @@ again: sect_crs = NULL; curr_sect = -1; - /* Prepare reusable fragment header: */ - + /* Prepare reusable fragment header */ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(hdr)); msg_set_size(&fragm_hdr, max_pkt); msg_set_fragm_no(&fragm_hdr, 1); - /* Prepare header of first fragment: */ - + /* Prepare header of first fragment */ buf_chain = buf = tipc_buf_acquire(max_pkt); if (!buf) return -ENOMEM; @@ -1237,8 +1196,7 @@ again: hsz = msg_hdr_sz(hdr); skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); - /* Chop up message: */ - + /* Chop up message */ fragm_crs = INT_H_SIZE + hsz; fragm_rest = fragm_sz - hsz; @@ -1329,7 +1287,6 @@ reject: } /* Append chain of fragments to send queue & send them */ - l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; @@ -1350,7 +1307,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) /* Step to position where retransmission failed, if any, */ /* consider that buffers may have been released in meantime */ - if (r_q_size && buf) { u32 last = lesser(mod(r_q_head + r_q_size), link_last_sent(l_ptr)); @@ -1365,7 +1321,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Continue retransmission now, if there is anything: */ - if (r_q_size && buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); @@ -1381,7 +1336,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Send deferred protocol message, if any: */ - buf = l_ptr->proto_msg_queue; if (buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); @@ -1398,7 +1352,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Send one deferred data message, if send window not full: */ - buf = l_ptr->next_out; if (buf) { struct tipc_msg *msg = buf_msg(buf); @@ -1478,16 +1431,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, warn("Retransmission failure on link <%s>\n", l_ptr->name); if (l_ptr->addr) { - /* Handle failure on standard link */ - link_print(l_ptr, "Resetting link\n"); tipc_link_reset(l_ptr); } else { - /* Handle failure on broadcast link */ - struct tipc_node *n_ptr; char addr_string[16]; @@ -1536,7 +1485,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, return; } else { /* Detect repeated retransmit failures on uncongested bearer */ - if (l_ptr->last_retransmitted == msg_seqno(msg)) { if (++l_ptr->stale_count > 100) { link_retransmit_failure(l_ptr, buf); @@ -1571,7 +1519,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, /** * link_insert_deferred_queue - insert deferred messages back into receive chain */ - static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, struct sk_buff *buf) { @@ -1602,7 +1549,6 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, * TIPC will ignore the excess, under the assumption that it is optional info * introduced by a later release of the protocol. */ - static int link_recv_buf_validate(struct sk_buff *buf) { static u32 min_data_hdr_size[8] = { @@ -1648,7 +1594,6 @@ static int link_recv_buf_validate(struct sk_buff *buf) * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. */ - void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) { read_lock_bh(&tipc_net_lock); @@ -1666,22 +1611,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) head = head->next; /* Ensure bearer is still enabled */ - if (unlikely(!b_ptr->active)) goto cont; /* Ensure message is well-formed */ - if (unlikely(!link_recv_buf_validate(buf))) goto cont; /* Ensure message data is a single contiguous unit */ - if (unlikely(skb_linearize(buf))) goto cont; /* Handle arrival of a non-unicast link message */ - msg = buf_msg(buf); if (unlikely(msg_non_seq(msg))) { @@ -1693,20 +1634,17 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Discard unicast link messages destined for another node */ - if (unlikely(!msg_short(msg) && (msg_destnode(msg) != tipc_own_addr))) goto cont; /* Locate neighboring node that sent message */ - n_ptr = tipc_node_find(msg_prevnode(msg)); if (unlikely(!n_ptr)) goto cont; tipc_node_lock(n_ptr); /* Locate unicast link endpoint that should handle message */ - l_ptr = n_ptr->links[b_ptr->identity]; if (unlikely(!l_ptr)) { tipc_node_unlock(n_ptr); @@ -1714,7 +1652,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Verify that communication with node is currently allowed */ - if ((n_ptr->block_setup & WAIT_PEER_DOWN) && msg_user(msg) == LINK_PROTOCOL && (msg_type(msg) == RESET_MSG || @@ -1728,12 +1665,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Validate message sequence number info */ - seq_no = msg_seqno(msg); ackd = msg_ack(msg); /* Release acked messages */ - if (n_ptr->bclink.supported) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); @@ -1752,7 +1687,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Try sending any messages link endpoint has pending */ - if (unlikely(l_ptr->next_out)) tipc_link_push_queue(l_ptr); if (unlikely(!list_empty(&l_ptr->waiting_ports))) @@ -1763,7 +1697,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Now (finally!) process the incoming message */ - protocol_check: if (likely(link_working_working(l_ptr))) { if (likely(seq_no == mod(l_ptr->next_in_no))) { @@ -1859,7 +1792,6 @@ cont: * * Returns increase in queue length (i.e. 0 or 1) */ - u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf) { @@ -1908,7 +1840,6 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, /* * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet */ - static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, struct sk_buff *buf) { @@ -1920,14 +1851,12 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, } /* Record OOS packet arrival (force mismatch on next timeout) */ - l_ptr->checkpoint--; /* * Discard packet if a duplicate; otherwise add it to deferred queue * and notify peer of gap as per protocol specification */ - if (less(seq_no, mod(l_ptr->next_in_no))) { l_ptr->stats.duplicates++; kfree_skb(buf); @@ -1957,7 +1886,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int r_flag; /* Discard any previous message that was deferred due to congestion */ - if (l_ptr->proto_msg_queue) { kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; @@ -1967,12 +1895,10 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, return; /* Abort non-RESET send if communication with node is prohibited */ - if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) return; /* Create protocol message with "out-of-sequence" sequence number */ - msg_set_type(msg, msg_typ); msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); @@ -2040,14 +1966,12 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); /* Defer message if bearer is already congested */ - if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { l_ptr->proto_msg_queue = buf; return; } /* Defer message if attempting to send results in bearer congestion */ - if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->proto_msg_queue = buf; @@ -2056,7 +1980,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, } /* Discard message if it was sent successfully */ - l_ptr->unacked_window = 0; kfree_skb(buf); } @@ -2066,7 +1989,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, * Note that network plane id propagates through the network, and may * change at any time. The node with lowest address rules */ - static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) { u32 rec_gap = 0; @@ -2079,7 +2001,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) goto exit; /* record unnumbered packet arrival (force mismatch on next timeout) */ - l_ptr->checkpoint--; if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) @@ -2111,7 +2032,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) /* fall thru' */ case ACTIVATE_MSG: /* Update link settings according other endpoint's values */ - strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); msg_tol = msg_link_tolerance(msg); @@ -2133,7 +2053,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) l_ptr->owner->bclink.supportable = (max_pkt_info != 0); /* Synchronize broadcast link info, if not done previously */ - if (!tipc_node_is_up(l_ptr->owner)) { l_ptr->owner->bclink.last_sent = l_ptr->owner->bclink.last_in = @@ -2185,7 +2104,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Protocol message before retransmits, reduce loss risk */ - if (l_ptr->owner->bclink.supported) tipc_bclink_update_link_state(l_ptr->owner, msg_last_bcast(msg)); @@ -2243,7 +2161,6 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr, * changeover(): Send whole message queue via the remaining link * Owner node is locked. */ - void tipc_link_changeover(struct tipc_link *l_ptr) { u32 msgcount = l_ptr->out_queue_size; @@ -2343,8 +2260,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) } } - - /** * buf_extract - extracts embedded TIPC message from another message * @skb: encapsulating message buffer @@ -2353,7 +2268,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) * Returns a new message buffer containing an embedded message. The * encapsulating message itself is left unchanged. */ - static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) { struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); @@ -2370,7 +2284,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) * link_recv_changeover_msg(): Receive tunneled packet sent * via other link. Node is locked. Return extracted buffer. */ - static int link_recv_changeover_msg(struct tipc_link **l_ptr, struct sk_buff **buf) { @@ -2405,7 +2318,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr, } /* First original message ?: */ - if (tipc_link_is_up(dest_link)) { info("Resetting link <%s>, changeover initiated by peer\n", dest_link->name); @@ -2420,7 +2332,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr, } /* Receive original message */ - if (dest_link->exp_msg_count == 0) { warn("Link switchover error, " "got too many tunnelled messages\n"); @@ -2469,7 +2380,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf) * Fragmentation/defragmentation: */ - /* * link_send_long_buf: Entry for buffers needing fragmentation. * The buffer is complete, inclusive total message length. @@ -2496,12 +2406,10 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) destaddr = msg_destnode(inmsg); /* Prepare reusable fragment header: */ - tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, destaddr); /* Chop up message: */ - while (rest > 0) { struct sk_buff *fragm; @@ -2535,7 +2443,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) kfree_skb(buf); /* Append chain of fragments to send queue & send them */ - l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; @@ -2551,7 +2458,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) * help storing these values in unused, available fields in the * pending message. This makes dynamic memory allocation unnecessary. */ - static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) { msg_set_seqno(buf_msg(buf), seqno); @@ -2603,7 +2509,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, *fb = NULL; /* Is there an incomplete message waiting for this fragment? */ - while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { prev = pbuf; @@ -2629,7 +2534,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, skb_copy_to_linear_data(pbuf, imsg, msg_data_sz(fragm)); /* Prepare buffer for subsequent fragments. */ - set_long_msg_seqno(pbuf, long_msg_seq_no); set_fragm_size(pbuf, fragm_sz); set_expected_frags(pbuf, exp_fragm_cnt - 1); @@ -2650,7 +2554,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, kfree_skb(fbuf); /* Is message complete? */ - if (exp_frags == 0) { if (prev) prev->next = pbuf->next; @@ -2672,7 +2575,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, * link_check_defragm_bufs - flush stale incoming message fragments * @l_ptr: pointer to link */ - static void link_check_defragm_bufs(struct tipc_link *l_ptr) { struct sk_buff *prev = NULL; @@ -2701,8 +2603,6 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr) } } - - static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) { if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) @@ -2714,7 +2614,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); } - void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) { /* Data messages from this node, inclusive FIRST_FRAGM */ @@ -2744,7 +2643,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) * * Returns pointer to link (or 0 if invalid link name). */ - static struct tipc_link *link_find_link(const char *name, struct tipc_node **node) { @@ -2778,7 +2676,6 @@ static struct tipc_link *link_find_link(const char *name, * * Returns 1 if value is within range, 0 if not. */ - static int link_value_is_valid(u16 cmd, u32 new_value) { switch (cmd) { @@ -2794,7 +2691,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value) return 0; } - /** * link_cmd_set_value - change priority/tolerance/window for link/bearer/media * @name - ptr to link, bearer, or media name @@ -2805,7 +2701,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value) * * Returns 0 if value updated and negative value on error. */ - static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) { struct tipc_node *node; @@ -2910,7 +2805,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space * link_reset_statistics - reset link statistics * @l_ptr: pointer to link */ - static void link_reset_statistics(struct tipc_link *l_ptr) { memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); @@ -2951,7 +2845,6 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_ /** * percent - convert count to a percentage of total (rounding up or down) */ - static u32 percent(u32 count, u32 total) { return (count * 100 + (total / 2)) / total; @@ -2965,7 +2858,6 @@ static u32 percent(u32 count, u32 total) * * Returns length of print buffer data string (or 0 if error) */ - static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) { struct print_buf pb; @@ -3087,7 +2979,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s * * If no active link can be found, uses default maximum packet size. */ - u32 tipc_link_get_max_pkt(u32 dest, u32 selector) { struct tipc_node *n_ptr; @@ -3171,4 +3062,3 @@ print_state: tipc_printbuf_validate(buf); info("%s", print_area); } - diff --git a/net/tipc/link.h b/net/tipc/link.h index 73c18c140e1..d6a60a963ce 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -47,13 +47,11 @@ /* * Out-of-range value for link sequence numbers */ - #define INVALID_LINK_SEQ 0x10000 /* * Link states */ - #define WORKING_WORKING 560810u #define WORKING_UNKNOWN 560811u #define RESET_UNKNOWN 560812u @@ -63,7 +61,6 @@ * Starting value for maximum packet size negotiation on unicast links * (unless bearer MTU is less) */ - #define MAX_PKT_DEFAULT 1500 /** @@ -114,7 +111,6 @@ * @defragm_buf: list of partially reassembled inbound message fragments * @stats: collects statistics regarding link activity */ - struct tipc_link { u32 addr; char name[TIPC_MAX_LINK_NAME]; @@ -255,7 +251,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, /* * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) */ - static inline u32 buf_seqno(struct sk_buff *buf) { return msg_seqno(buf_msg(buf)); @@ -294,7 +289,6 @@ static inline u32 lesser(u32 left, u32 right) /* * Link status checking routines */ - static inline int link_working_working(struct tipc_link *l_ptr) { return l_ptr->state == WORKING_WORKING; diff --git a/net/tipc/log.c b/net/tipc/log.c index 895c6e530b0..026733f2491 100644 --- a/net/tipc/log.c +++ b/net/tipc/log.c @@ -47,7 +47,6 @@ * * Additional user-defined print buffers are also permitted. */ - static struct print_buf null_buf = { NULL, 0, NULL, 0 }; struct print_buf *const TIPC_NULL = &null_buf; @@ -72,7 +71,6 @@ struct print_buf *const TIPC_LOG = &log_buf; * on the caller to prevent simultaneous use of the print buffer(s) being * manipulated. */ - static char print_string[TIPC_PB_MAX_STR]; static DEFINE_SPINLOCK(print_lock); @@ -97,7 +95,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, * Note: If the character array is too small (or absent), the print buffer * becomes a null device that discards anything written to it. */ - void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) { pb->buf = raw; @@ -117,7 +114,6 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) * tipc_printbuf_reset - reinitialize print buffer to empty state * @pb: pointer to print buffer structure */ - static void tipc_printbuf_reset(struct print_buf *pb) { if (pb->buf) { @@ -133,7 +129,6 @@ static void tipc_printbuf_reset(struct print_buf *pb) * * Returns non-zero if print buffer is empty. */ - static int tipc_printbuf_empty(struct print_buf *pb) { return !pb->buf || (pb->crs == pb->buf); @@ -148,7 +143,6 @@ static int tipc_printbuf_empty(struct print_buf *pb) * * Returns length of print buffer data string (including trailing NUL) */ - int tipc_printbuf_validate(struct print_buf *pb) { char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n"; @@ -182,14 +176,12 @@ int tipc_printbuf_validate(struct print_buf *pb) * Current contents of destination print buffer (if any) are discarded. * Source print buffer becomes empty if a successful move occurs. */ - static void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) { int len; /* Handle the cases where contents can't be moved */ - if (!pb_to->buf) return; @@ -206,7 +198,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, } /* Copy data from char after cursor to end (if used) */ - len = pb_from->buf + pb_from->size - pb_from->crs - 2; if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) { strcpy(pb_to->buf, pb_from->crs + 1); @@ -215,7 +206,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, pb_to->crs = pb_to->buf; /* Copy data from start to cursor (always) */ - len = pb_from->crs - pb_from->buf; strcpy(pb_to->crs, pb_from->buf); pb_to->crs += len; @@ -228,7 +218,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, * @pb: pointer to print buffer * @fmt: formatted info to be printed */ - void tipc_printf(struct print_buf *pb, const char *fmt, ...) { int chars_to_add; @@ -270,7 +259,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...) * tipc_log_resize - change the size of the TIPC log buffer * @log_size: print buffer size to use */ - int tipc_log_resize(int log_size) { int res = 0; @@ -295,7 +283,6 @@ int tipc_log_resize(int log_size) /** * tipc_log_resize_cmd - reconfigure size of TIPC log buffer */ - struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) { u32 value; @@ -316,7 +303,6 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) /** * tipc_log_dump - capture TIPC log buffer contents in configuration message */ - struct sk_buff *tipc_log_dump(void) { struct sk_buff *reply; diff --git a/net/tipc/log.h b/net/tipc/log.h index 2248d96238e..d1f5eb967fd 100644 --- a/net/tipc/log.h +++ b/net/tipc/log.h @@ -44,7 +44,6 @@ * @crs: pointer to first unused space in character array (i.e. final NUL) * @echo: echo output to system console if non-zero */ - struct print_buf { char *buf; u32 size; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index e3afe162c0a..deea0d232dc 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -72,7 +72,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, * * Returns message data size or errno */ - int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf) @@ -112,7 +111,6 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, } #ifdef CONFIG_TIPC_DEBUG - void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) { u32 usr = msg_user(msg); @@ -352,5 +350,4 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); } - #endif diff --git a/net/tipc/msg.h b/net/tipc/msg.h index eba524e34a6..ba2a72beea6 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -44,7 +44,6 @@ * * Note: Some items are also used with TIPC internal message headers */ - #define TIPC_VERSION 2 /* @@ -58,7 +57,6 @@ /* * Payload message types */ - #define TIPC_CONN_MSG 0 #define TIPC_MCAST_MSG 1 #define TIPC_NAMED_MSG 2 @@ -67,7 +65,6 @@ /* * Message header sizes */ - #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ #define BASIC_H_SIZE 32 /* Basic payload message */ #define NAMED_H_SIZE 40 /* Named payload message */ @@ -121,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b) /* * Word 0 */ - static inline u32 msg_version(struct tipc_msg *m) { return msg_bits(m, 0, 29, 7); @@ -216,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz) /* * Word 1 */ - static inline u32 msg_type(struct tipc_msg *m) { return msg_bits(m, 1, 29, 0x7); @@ -291,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n) /* * Word 2 */ - static inline u32 msg_ack(struct tipc_msg *m) { return msg_bits(m, 2, 16, 0xffff); @@ -315,8 +309,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) /* * Words 3-10 */ - - static inline u32 msg_prevnode(struct tipc_msg *m) { return msg_word(m, 3); @@ -434,7 +426,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) return (struct tipc_msg *)msg_data(m); } - /* * Constants and routines used to read and write TIPC internal message headers */ @@ -442,7 +433,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Internal message users */ - #define BCAST_PROTOCOL 5 #define MSG_BUNDLER 6 #define LINK_PROTOCOL 7 @@ -456,7 +446,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Connection management protocol message types */ - #define CONN_PROBE 0 #define CONN_PROBE_REPLY 1 #define CONN_ACK 2 @@ -464,14 +453,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Name distributor message types */ - #define PUBLICATION 0 #define WITHDRAWAL 1 /* * Segmentation message types */ - #define FIRST_FRAGMENT 0 #define FRAGMENT 1 #define LAST_FRAGMENT 2 @@ -479,7 +466,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Link management protocol message types */ - #define STATE_MSG 0 #define RESET_MSG 1 #define ACTIVATE_MSG 2 @@ -493,7 +479,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Config protocol message types */ - #define DSC_REQ_MSG 0 #define DSC_RESP_MSG 1 @@ -501,7 +486,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Word 1 */ - static inline u32 msg_seq_gap(struct tipc_msg *m) { return msg_bits(m, 1, 16, 0x1fff); @@ -526,7 +510,6 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) /* * Word 2 */ - static inline u32 msg_dest_domain(struct tipc_msg *m) { return msg_word(m, 2); @@ -561,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) /* * Word 4 */ - static inline u32 msg_last_bcast(struct tipc_msg *m) { return msg_bits(m, 4, 16, 0xffff); @@ -628,7 +610,6 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n) /* * Word 5 */ - static inline u32 msg_session(struct tipc_msg *m) { return msg_bits(m, 5, 16, 0xffff); @@ -697,7 +678,6 @@ static inline char *msg_media_addr(struct tipc_msg *m) /* * Word 9 */ - static inline u32 msg_msgcnt(struct tipc_msg *m) { return msg_bits(m, 9, 16, 0xffff); @@ -744,5 +724,4 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf); - #endif diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index d57da615961..158318e67b0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -58,7 +58,6 @@ * Note: There is no field that identifies the publishing node because it is * the same for all items contained within a publication message. */ - struct distr_item { __be32 type; __be32 lower; @@ -68,17 +67,41 @@ struct distr_item { }; /** - * List of externally visible publications by this node -- - * that is, all publications having scope > TIPC_NODE_SCOPE. + * struct publ_list - list of publications made by this node + * @list: circular list of publications + * @list_size: number of entries in list */ +struct publ_list { + struct list_head list; + u32 size; +}; + +static struct publ_list publ_zone = { + .list = LIST_HEAD_INIT(publ_zone.list), + .size = 0, +}; + +static struct publ_list publ_cluster = { + .list = LIST_HEAD_INIT(publ_cluster.list), + .size = 0, +}; + +static struct publ_list publ_node = { + .list = LIST_HEAD_INIT(publ_node.list), + .size = 0, +}; + +static struct publ_list *publ_lists[] = { + NULL, + &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */ + &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */ + &publ_node /* publ_lists[TIPC_NODE_SCOPE] */ +}; -static LIST_HEAD(publ_root); -static u32 publ_cnt; /** * publ_to_item - add publication info to a publication message */ - static void publ_to_item(struct distr_item *i, struct publication *p) { i->type = htonl(p->type); @@ -91,7 +114,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p) /** * named_prepare_buf - allocate & initialize a publication message */ - static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) { struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); @@ -126,14 +148,16 @@ static void named_cluster_distribute(struct sk_buff *buf) /** * tipc_named_publish - tell other nodes about a new publication by this node */ - void tipc_named_publish(struct publication *publ) { struct sk_buff *buf; struct distr_item *item; - list_add_tail(&publ->local_list, &publ_root); - publ_cnt++; + list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list); + publ_lists[publ->scope]->size++; + + if (publ->scope == TIPC_NODE_SCOPE) + return; buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); if (!buf) { @@ -149,14 +173,16 @@ void tipc_named_publish(struct publication *publ) /** * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node */ - void tipc_named_withdraw(struct publication *publ) { struct sk_buff *buf; struct distr_item *item; list_del(&publ->local_list); - publ_cnt--; + publ_lists[publ->scope]->size--; + + if (publ->scope == TIPC_NODE_SCOPE) + return; buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); if (!buf) { @@ -169,25 +195,51 @@ void tipc_named_withdraw(struct publication *publ) named_cluster_distribute(buf); } +/* + * named_distribute - prepare name info for bulk distribution to another node + */ +static void named_distribute(struct list_head *message_list, u32 node, + struct publ_list *pls, u32 max_item_buf) +{ + struct publication *publ; + struct sk_buff *buf = NULL; + struct distr_item *item = NULL; + u32 left = 0; + u32 rest = pls->size * ITEM_SIZE; + + list_for_each_entry(publ, &pls->list, local_list) { + if (!buf) { + left = (rest <= max_item_buf) ? rest : max_item_buf; + rest -= left; + buf = named_prepare_buf(PUBLICATION, left, node); + if (!buf) { + warn("Bulk publication failure\n"); + return; + } + item = (struct distr_item *)msg_data(buf_msg(buf)); + } + publ_to_item(item, publ); + item++; + left -= ITEM_SIZE; + if (!left) { + list_add_tail((struct list_head *)buf, message_list); + buf = NULL; + } + } +} + /** * tipc_named_node_up - tell specified node about all publications by this node */ - void tipc_named_node_up(unsigned long nodearg) { struct tipc_node *n_ptr; struct tipc_link *l_ptr; - struct publication *publ; - struct distr_item *item = NULL; - struct sk_buff *buf = NULL; struct list_head message_list; u32 node = (u32)nodearg; - u32 left = 0; - u32 rest; u32 max_item_buf = 0; /* compute maximum amount of publication data to send per message */ - read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(node); if (n_ptr) { @@ -203,32 +255,11 @@ void tipc_named_node_up(unsigned long nodearg) return; /* create list of publication messages, then send them as a unit */ - INIT_LIST_HEAD(&message_list); read_lock_bh(&tipc_nametbl_lock); - rest = publ_cnt * ITEM_SIZE; - - list_for_each_entry(publ, &publ_root, local_list) { - if (!buf) { - left = (rest <= max_item_buf) ? rest : max_item_buf; - rest -= left; - buf = named_prepare_buf(PUBLICATION, left, node); - if (!buf) { - warn("Bulk publication distribution failure\n"); - goto exit; - } - item = (struct distr_item *)msg_data(buf_msg(buf)); - } - publ_to_item(item, publ); - item++; - left -= ITEM_SIZE; - if (!left) { - list_add_tail((struct list_head *)buf, &message_list); - buf = NULL; - } - } -exit: + named_distribute(&message_list, node, &publ_cluster, max_item_buf); + named_distribute(&message_list, node, &publ_zone, max_item_buf); read_unlock_bh(&tipc_nametbl_lock); tipc_link_send_names(&message_list, (u32)node); @@ -240,7 +271,6 @@ exit: * Invoked for each publication issued by a newly failed node. * Removes publication structure from name table & deletes it. */ - static void named_purge_publ(struct publication *publ) { struct publication *p; @@ -264,7 +294,6 @@ static void named_purge_publ(struct publication *publ) /** * tipc_named_recv - process name table update message sent by another node */ - void tipc_named_recv(struct sk_buff *buf) { struct publication *publ; @@ -316,21 +345,22 @@ void tipc_named_recv(struct sk_buff *buf) } /** - * tipc_named_reinit - re-initialize local publication list + * tipc_named_reinit - re-initialize local publications * * This routine is called whenever TIPC networking is enabled. - * All existing publications by this node that have "cluster" or "zone" scope - * are updated to reflect the node's new network address. + * All name table entries published by this node are updated to reflect + * the node's new network address. */ - void tipc_named_reinit(void) { struct publication *publ; + int scope; write_lock_bh(&tipc_nametbl_lock); - list_for_each_entry(publ, &publ_root, local_list) - publ->node = tipc_own_addr; + for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) + list_for_each_entry(publ, &publ_lists[scope]->list, local_list) + publ->node = tipc_own_addr; write_unlock_bh(&tipc_nametbl_lock); } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index c6a1ae36952..010f24a59da 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -56,7 +56,6 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */ * publications of the associated name sequence belong to it. * (The cluster and node lists may be empty.) */ - struct name_info { struct list_head node_list; struct list_head cluster_list; @@ -72,7 +71,6 @@ struct name_info { * @upper: name sequence upper bound * @info: pointer to name sequence publication info */ - struct sub_seq { u32 lower; u32 upper; @@ -90,7 +88,6 @@ struct sub_seq { * @subscriptions: list of subscriptions for this 'type' * @lock: spinlock controlling access to publication lists of all sub-sequences */ - struct name_seq { u32 type; struct sub_seq *sseqs; @@ -107,7 +104,6 @@ struct name_seq { * accessed via hashing on 'type'; name sequence lists are *not* sorted * @local_publ_count: number of publications issued by this node */ - struct name_table { struct hlist_head *types; u32 local_publ_count; @@ -124,7 +120,6 @@ static int hash(int x) /** * publ_create - create a publication structure */ - static struct publication *publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) @@ -151,7 +146,6 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, /** * tipc_subseq_alloc - allocate a specified number of sub-sequence structures */ - static struct sub_seq *tipc_subseq_alloc(u32 cnt) { struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); @@ -163,7 +157,6 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt) * * Allocates a single sub-sequence structure and sets it to all 0's. */ - static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) { struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); @@ -186,12 +179,23 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea return nseq; } -/** +/* + * nameseq_delete_empty - deletes a name sequence structure if now unused + */ +static void nameseq_delete_empty(struct name_seq *seq) +{ + if (!seq->first_free && list_empty(&seq->subscriptions)) { + hlist_del_init(&seq->ns_list); + kfree(seq->sseqs); + kfree(seq); + } +} + +/* * nameseq_find_subseq - find sub-sequence (if any) matching a name instance * * Very time-critical, so binary searches through sub-sequence array. */ - static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, u32 instance) { @@ -221,7 +225,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, * * Note: Similar to binary search code for locating a sub-sequence. */ - static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; @@ -242,9 +245,8 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) } /** - * tipc_nameseq_insert_publ - + * tipc_nameseq_insert_publ */ - static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) @@ -260,7 +262,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, if (sseq) { /* Lower end overlaps existing entry => need an exact match */ - if ((sseq->lower != lower) || (sseq->upper != upper)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); @@ -280,11 +281,9 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, struct sub_seq *freesseq; /* Find where lower end should be inserted */ - inspos = nameseq_locate_subseq(nseq, lower); /* Fail if upper end overlaps into an existing entry */ - if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", @@ -293,7 +292,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, } /* Ensure there is space for new sub-sequence */ - if (nseq->first_free == nseq->alloc) { struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); @@ -321,7 +319,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, INIT_LIST_HEAD(&info->zone_list); /* Insert new sub-sequence */ - sseq = &nseq->sseqs[inspos]; freesseq = &nseq->sseqs[nseq->first_free]; memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); @@ -333,8 +330,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, created_subseq = 1; } - /* Insert a publication: */ - + /* Insert a publication */ publ = publ_create(type, lower, upper, scope, node, port, key); if (!publ) return NULL; @@ -347,14 +343,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, info->cluster_list_size++; } - if (node == tipc_own_addr) { + if (in_own_node(node)) { list_add(&publ->node_list, &info->node_list); info->node_list_size++; } - /* - * Any subscriptions waiting for notification? - */ + /* Any subscriptions waiting for notification? */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, @@ -368,7 +362,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, } /** - * tipc_nameseq_remove_publ - + * tipc_nameseq_remove_publ * * NOTE: There may be cases where TIPC is asked to remove a publication * that is not in the name table. For example, if another node issues a @@ -378,7 +372,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, * A failed withdraw request simply returns a failure indication and lets the * caller issue any error or warning messages associated with such a problem. */ - static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) { @@ -395,7 +388,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i info = sseq->info; /* Locate publication, if it exists */ - list_for_each_entry(publ, &info->zone_list, zone_list) { if ((publ->key == key) && (publ->ref == ref) && (!publ->node || (publ->node == node))) @@ -405,26 +397,22 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i found: /* Remove publication from zone scope list */ - list_del(&publ->zone_list); info->zone_list_size--; /* Remove publication from cluster scope list, if present */ - if (in_own_cluster(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } /* Remove publication from node scope list, if present */ - - if (node == tipc_own_addr) { + if (in_own_node(node)) { list_del(&publ->node_list); info->node_list_size--; } /* Contract subseq list if no more publications for that subseq */ - if (list_empty(&info->zone_list)) { kfree(info); free = &nseq->sseqs[nseq->first_free--]; @@ -433,7 +421,6 @@ found: } /* Notify any waiting subscriptions */ - list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, @@ -452,7 +439,6 @@ found: * the prescribed number of events if there is any sub- * sequence overlapping with the requested sequence */ - static void tipc_nameseq_subscribe(struct name_seq *nseq, struct tipc_subscription *s) { @@ -504,9 +490,10 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, { struct name_seq *seq = nametbl_find_seq(type); - if (lower > upper) { - warn("Failed to publish illegal {%u,%u,%u}\n", - type, lower, upper); + if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || + (lower > upper)) { + dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n", + type, lower, upper, scope); return NULL; } @@ -529,12 +516,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, return NULL; publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); - - if (!seq->first_free && list_empty(&seq->subscriptions)) { - hlist_del_init(&seq->ns_list); - kfree(seq->sseqs); - kfree(seq); - } + nameseq_delete_empty(seq); return publ; } @@ -551,7 +533,6 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, * - if name translation is attempted and fails, sets 'destnode' to 0 * and returns 0 */ - u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) { struct sub_seq *sseq; @@ -574,7 +555,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) spin_lock_bh(&seq->lock); info = sseq->info; - /* Closest-First Algorithm: */ + /* Closest-First Algorithm */ if (likely(!*destnode)) { if (!list_empty(&info->node_list)) { publ = list_first_entry(&info->node_list, @@ -597,14 +578,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) } } - /* Round-Robin Algorithm: */ + /* Round-Robin Algorithm */ else if (*destnode == tipc_own_addr) { if (list_empty(&info->node_list)) goto no_match; publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); - } else if (in_own_cluster(*destnode)) { + } else if (in_own_cluster_exact(*destnode)) { if (list_empty(&info->cluster_list)) goto no_match; publ = list_first_entry(&info->cluster_list, struct publication, @@ -638,7 +619,6 @@ not_found: * * Returns non-zero if any off-node ports overlap */ - int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, struct tipc_port_list *dports) { @@ -682,7 +662,6 @@ exit: /* * tipc_nametbl_publish - add name publication to network name tables */ - struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key) { @@ -695,11 +674,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, } write_lock_bh(&tipc_nametbl_lock); - table.local_publ_count++; publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); - if (publ && (scope != TIPC_NODE_SCOPE)) + if (likely(publ)) { + table.local_publ_count++; tipc_named_publish(publ); + } write_unlock_bh(&tipc_nametbl_lock); return publ; } @@ -707,7 +687,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, /** * tipc_nametbl_withdraw - withdraw name publication from network name tables */ - int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) { struct publication *publ; @@ -716,8 +695,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); if (likely(publ)) { table.local_publ_count--; - if (publ->scope != TIPC_NODE_SCOPE) - tipc_named_withdraw(publ); + tipc_named_withdraw(publ); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); @@ -733,7 +711,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) /** * tipc_nametbl_subscribe - add a subscription object to the name table */ - void tipc_nametbl_subscribe(struct tipc_subscription *s) { u32 type = s->seq.type; @@ -757,7 +734,6 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) /** * tipc_nametbl_unsubscribe - remove a subscription object from name table */ - void tipc_nametbl_unsubscribe(struct tipc_subscription *s) { struct name_seq *seq; @@ -768,11 +744,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); spin_unlock_bh(&seq->lock); - if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { - hlist_del_init(&seq->ns_list); - kfree(seq->sseqs); - kfree(seq); - } + nameseq_delete_empty(seq); } write_unlock_bh(&tipc_nametbl_lock); } @@ -781,7 +753,6 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) /** * subseq_list: print specified sub-sequence contents into the given buffer */ - static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, u32 index) { @@ -818,7 +789,6 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, /** * nameseq_list: print specified name sequence contents into the given buffer */ - static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, u32 type, u32 lowbound, u32 upbound, u32 index) { @@ -849,7 +819,6 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, /** * nametbl_header - print name table header into the given buffer */ - static void nametbl_header(struct print_buf *buf, u32 depth) { const char *header[] = { @@ -871,7 +840,6 @@ static void nametbl_header(struct print_buf *buf, u32 depth) /** * nametbl_list - print specified name table contents into the given buffer */ - static void nametbl_list(struct print_buf *buf, u32 depth_info, u32 type, u32 lowbound, u32 upbound) { @@ -970,7 +938,6 @@ void tipc_nametbl_stop(void) return; /* Verify name table is empty, then release it */ - write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < tipc_nametbl_size; i++) { if (!hlist_empty(&table.types[i])) @@ -980,4 +947,3 @@ void tipc_nametbl_stop(void) table.types = NULL; write_unlock_bh(&tipc_nametbl_lock); } - diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 207d59ebf84..71cb4dc712d 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -45,10 +45,8 @@ struct tipc_port_list; /* * TIPC name types reserved for internal TIPC use (both current and planned) */ - #define TIPC_ZM_SRV 3 /* zone master service name type */ - /** * struct publication - info about a published (name or) name sequence * @type: name sequence type @@ -67,7 +65,6 @@ struct tipc_port_list; * * Note that the node list, cluster list, and zone list are circular lists. */ - struct publication { u32 type; u32 lower; diff --git a/net/tipc/net.c b/net/tipc/net.c index d4531b07076..7c236c89cf5 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -175,17 +175,14 @@ int tipc_net_start(u32 addr) { char addr_string[16]; - tipc_subscr_stop(); - tipc_cfg_stop(); - + write_lock_bh(&tipc_net_lock); tipc_own_addr = addr; tipc_named_reinit(); tipc_port_reinit(); - tipc_bclink_init(); + write_unlock_bh(&tipc_net_lock); - tipc_k_signal((Handler)tipc_subscr_start, 0); - tipc_k_signal((Handler)tipc_cfg_init, 0); + tipc_cfg_reinit(); info("Started in network mode\n"); info("Own node address %s, network identity %u\n", diff --git a/net/tipc/node.c b/net/tipc/node.c index a34cabc2c43..d4fd341e6e0 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -58,7 +58,7 @@ static atomic_t tipc_num_links = ATOMIC_INIT(0); * entries has been chosen so that no hash chain exceeds 8 nodes and will * usually be much smaller (typically only a single node). */ -static inline unsigned int tipc_hashfn(u32 addr) +static unsigned int tipc_hashfn(u32 addr) { return addr & (NODE_HTABLE_SIZE - 1); } @@ -66,13 +66,12 @@ static inline unsigned int tipc_hashfn(u32 addr) /* * tipc_node_find - locate specified node object, if it exists */ - struct tipc_node *tipc_node_find(u32 addr) { struct tipc_node *node; struct hlist_node *pos; - if (unlikely(!in_own_cluster(addr))) + if (unlikely(!in_own_cluster_exact(addr))) return NULL; hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { @@ -91,7 +90,6 @@ struct tipc_node *tipc_node_find(u32 addr) * time. (It would be preferable to switch to holding net_lock in write mode, * but this is a non-trivial change.) */ - struct tipc_node *tipc_node_create(u32 addr) { struct tipc_node *n_ptr, *temp_node; @@ -142,13 +140,11 @@ void tipc_node_delete(struct tipc_node *n_ptr) tipc_num_nodes--; } - /** * tipc_node_link_up - handle addition of link * * Link becomes active (alone or shared) or standby, depending on its priority. */ - void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; @@ -181,7 +177,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) /** * node_select_active_links - select active link */ - static void node_select_active_links(struct tipc_node *n_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; @@ -209,7 +204,6 @@ static void node_select_active_links(struct tipc_node *n_ptr) /** * tipc_node_link_down - handle loss of link */ - void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active; @@ -300,7 +294,6 @@ static void node_lost_contact(struct tipc_node *n_ptr) tipc_addr_string_fill(addr_string, n_ptr->addr)); /* Flush broadcast link info associated with lost node */ - if (n_ptr->bclink.supported) { while (n_ptr->bclink.deferred_head) { struct sk_buff *buf = n_ptr->bclink.deferred_head; @@ -334,7 +327,6 @@ static void node_lost_contact(struct tipc_node *n_ptr) tipc_nodesub_notify(n_ptr); /* Prevent re-contact with node until cleanup is done */ - n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); } @@ -362,7 +354,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) } /* For now, get space for all other nodes */ - payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; if (payload_size > 32768u) { read_unlock_bh(&tipc_net_lock); @@ -376,7 +367,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) } /* Add TLVs for all nodes in scope */ - list_for_each_entry(n_ptr, &tipc_node_list, list) { if (!tipc_in_scope(domain, n_ptr->addr)) continue; @@ -412,7 +402,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) read_lock_bh(&tipc_net_lock); /* Get space for all unicast links + broadcast link */ - payload_size = TLV_SPACE(sizeof(link_info)) * (atomic_read(&tipc_num_links) + 1); if (payload_size > 32768u) { @@ -427,14 +416,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) } /* Add TLV for broadcast link */ - link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); link_info.up = htonl(1); strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); /* Add TLVs for any other links in scope */ - list_for_each_entry(n_ptr, &tipc_node_list, list) { u32 i; diff --git a/net/tipc/node.h b/net/tipc/node.h index 72561c971d6..cfcaf4d6e48 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -48,7 +48,6 @@ #define INVALID_NODE_SIG 0x10000 /* Flags used to block (re)establishment of contact with a neighboring node */ - #define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ #define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ #define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ @@ -79,7 +78,6 @@ * @deferred_tail: newest OOS b'cast message received from node * @defragm: list of partially reassembled b'cast message fragments from node */ - struct tipc_node { u32 addr; spinlock_t lock; diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c index c3c2815ae63..7a27344108f 100644 --- a/net/tipc/node_subscr.c +++ b/net/tipc/node_subscr.c @@ -41,11 +41,10 @@ /** * tipc_nodesub_subscribe - create "node down" subscription for specified node */ - void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { - if (addr == tipc_own_addr) { + if (in_own_node(addr)) { node_sub->node = NULL; return; } @@ -66,7 +65,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, /** * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) */ - void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) { if (!node_sub->node) @@ -82,7 +80,6 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) * * Note: node is locked by caller */ - void tipc_nodesub_notify(struct tipc_node *node) { struct tipc_node_subscr *ns; diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h index 4bc2ca0867a..c95d20727de 100644 --- a/net/tipc/node_subscr.h +++ b/net/tipc/node_subscr.h @@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle); * @usr_handle: argument to pass to routine when node fails * @nodesub_list: adjacent entries in list of subscriptions for the node */ - struct tipc_node_subscr { struct tipc_node *node; net_ev_handler handle_node_down; diff --git a/net/tipc/port.c b/net/tipc/port.c index 94d2904cce6..2ad37a4db37 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -69,10 +69,30 @@ static u32 port_peerport(struct tipc_port *p_ptr) return msg_destport(&p_ptr->phdr); } +/* + * tipc_port_peer_msg - verify message was sent by connected port's peer + * + * Handles cases where the node's network address has changed from + * the default of <0.0.0> to its configured setting. + */ +int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) +{ + u32 peernode; + u32 orignode; + + if (msg_origport(msg) != port_peerport(p_ptr)) + return 0; + + orignode = msg_orignode(msg); + peernode = port_peernode(p_ptr); + return (orignode == peernode) || + (!orignode && (peernode == tipc_own_addr)) || + (!peernode && (orignode == tipc_own_addr)); +} + /** * tipc_multicast - send a multicast message to local and remote destinations */ - int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -89,7 +109,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, return -EINVAL; /* Create multicast message */ - hdr = &oport->phdr; msg_set_type(hdr, TIPC_MCAST_MSG); msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); @@ -105,12 +124,10 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, return res; /* Figure out where to send multicast message */ - ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, TIPC_NODE_SCOPE, &dports); /* Send message to destinations (duplicate it only if necessary) */ - if (ext_targets) { if (dports.count != 0) { ibuf = skb_copy(buf, GFP_ATOMIC); @@ -141,7 +158,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, * * If there is no port list, perform a lookup to create one */ - void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) { struct tipc_msg *msg; @@ -152,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) msg = buf_msg(buf); /* Create destination port list, if one wasn't supplied */ - if (dp == NULL) { tipc_nametbl_mc_translate(msg_nametype(msg), msg_namelower(msg), @@ -163,7 +178,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) } /* Deliver a copy of message to each destination port */ - if (dp->count != 0) { msg_set_destnode(msg, tipc_own_addr); if (dp->count == 1) { @@ -196,7 +210,6 @@ exit: * * Returns pointer to (locked) TIPC port, or NULL if unable to create it */ - struct tipc_port *tipc_createport_raw(void *usr_handle, u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), void (*wakeup)(struct tipc_port *), @@ -221,18 +234,24 @@ struct tipc_port *tipc_createport_raw(void *usr_handle, p_ptr->usr_handle = usr_handle; p_ptr->max_pkt = MAX_PKT_DEFAULT; p_ptr->ref = ref; - msg = &p_ptr->phdr; - tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); - msg_set_origport(msg, ref); INIT_LIST_HEAD(&p_ptr->wait_list); INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); p_ptr->dispatcher = dispatcher; p_ptr->wakeup = wakeup; p_ptr->user_port = NULL; k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); - spin_lock_bh(&tipc_port_list_lock); INIT_LIST_HEAD(&p_ptr->publications); INIT_LIST_HEAD(&p_ptr->port_list); + + /* + * Must hold port list lock while initializing message header template + * to ensure a change to node's own network address doesn't result + * in template containing out-dated network address information + */ + spin_lock_bh(&tipc_port_list_lock); + msg = &p_ptr->phdr; + tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); + msg_set_origport(msg, ref); list_add_tail(&p_ptr->port_list, &ports); spin_unlock_bh(&tipc_port_list_lock); return p_ptr; @@ -361,7 +380,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) u32 rmsg_sz; /* discard rejected message if it shouldn't be returned to sender */ - if (WARN(!msg_isdata(msg), "attempt to reject message with user=%u", msg_user(msg))) { dump_stack(); @@ -374,7 +392,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) * construct returned message by copying rejected message header and * data (or subset), then updating header fields that need adjusting */ - hdr_sz = msg_hdr_sz(msg); rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE); @@ -413,9 +430,8 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) } /* send returned message & dispose of rejected message */ - src_node = msg_prevnode(msg); - if (src_node == tipc_own_addr) + if (in_own_node(src_node)) tipc_port_recv_msg(rbuf); else tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); @@ -519,25 +535,20 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) struct tipc_msg *msg = buf_msg(buf); struct tipc_port *p_ptr; struct sk_buff *r_buf = NULL; - u32 orignode = msg_orignode(msg); - u32 origport = msg_origport(msg); u32 destport = msg_destport(msg); int wakeable; /* Validate connection */ - p_ptr = tipc_port_lock(destport); - if (!p_ptr || !p_ptr->connected || - (port_peernode(p_ptr) != orignode) || - (port_peerport(p_ptr) != origport)) { + if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) { r_buf = tipc_buf_acquire(BASIC_H_SIZE); if (r_buf) { msg = buf_msg(r_buf); tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG, - BASIC_H_SIZE, orignode); + BASIC_H_SIZE, msg_orignode(msg)); msg_set_errcode(msg, TIPC_ERR_NO_PORT); msg_set_origport(msg, destport); - msg_set_destport(msg, origport); + msg_set_destport(msg, msg_origport(msg)); } if (p_ptr) tipc_port_unlock(p_ptr); @@ -545,7 +556,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) } /* Process protocol message sent by peer */ - switch (msg_type(msg)) { case CONN_ACK: wakeable = tipc_port_congested(p_ptr) && p_ptr->congested && @@ -646,8 +656,6 @@ void tipc_port_reinit(void) spin_lock_bh(&tipc_port_list_lock); list_for_each_entry(p_ptr, &ports, port_list) { msg = &p_ptr->phdr; - if (msg_orignode(msg) == tipc_own_addr) - break; msg_set_prevnode(msg, tipc_own_addr); msg_set_orignode(msg, tipc_own_addr); } @@ -659,7 +667,6 @@ void tipc_port_reinit(void) * port_dispatcher_sigh(): Signal handler for messages destinated * to the tipc_port interface. */ - static void port_dispatcher_sigh(void *dummy) { struct sk_buff *buf; @@ -676,6 +683,7 @@ static void port_dispatcher_sigh(void *dummy) struct tipc_name_seq dseq; void *usr_handle; int connected; + int peer_invalid; int published; u32 message_type; @@ -696,6 +704,7 @@ static void port_dispatcher_sigh(void *dummy) up_ptr = p_ptr->user_port; usr_handle = up_ptr->usr_handle; connected = p_ptr->connected; + peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg); published = p_ptr->published; if (unlikely(msg_errcode(msg))) @@ -705,8 +714,6 @@ static void port_dispatcher_sigh(void *dummy) case TIPC_CONN_MSG:{ tipc_conn_msg_event cb = up_ptr->conn_msg_cb; - u32 peer_port = port_peerport(p_ptr); - u32 peer_node = port_peernode(p_ptr); u32 dsz; tipc_port_unlock(p_ptr); @@ -715,8 +722,7 @@ static void port_dispatcher_sigh(void *dummy) if (unlikely(!connected)) { if (tipc_connect2port(dref, &orig)) goto reject; - } else if ((msg_origport(msg) != peer_port) || - (msg_orignode(msg) != peer_node)) + } else if (peer_invalid) goto reject; dsz = msg_data_sz(msg); if (unlikely(dsz && @@ -768,14 +774,9 @@ err: case TIPC_CONN_MSG:{ tipc_conn_shutdown_event cb = up_ptr->conn_err_cb; - u32 peer_port = port_peerport(p_ptr); - u32 peer_node = port_peernode(p_ptr); tipc_port_unlock(p_ptr); - if (!cb || !connected) - break; - if ((msg_origport(msg) != peer_port) || - (msg_orignode(msg) != peer_node)) + if (!cb || !connected || peer_invalid) break; tipc_disconnect(dref); skb_pull(buf, msg_hdr_sz(msg)); @@ -826,7 +827,6 @@ reject: * port_dispatcher(): Dispatcher for messages destinated * to the tipc_port interface. Called with port locked. */ - static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) { buf->next = NULL; @@ -843,10 +843,8 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) } /* - * Wake up port after congestion: Called with port locked, - * + * Wake up port after congestion: Called with port locked */ - static void port_wakeup_sh(unsigned long ref) { struct tipc_port *p_ptr; @@ -892,7 +890,6 @@ void tipc_acknowledge(u32 ref, u32 ack) /* * tipc_createport(): user level call. */ - int tipc_createport(void *usr_handle, unsigned int importance, tipc_msg_err_event error_cb, @@ -901,7 +898,7 @@ int tipc_createport(void *usr_handle, tipc_msg_event msg_cb, tipc_named_msg_event named_msg_cb, tipc_conn_msg_event conn_msg_cb, - tipc_continue_event continue_event_cb,/* May be zero */ + tipc_continue_event continue_event_cb, /* May be zero */ u32 *portref) { struct user_port *up_ptr; @@ -975,10 +972,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) if (p_ptr->connected) goto exit; - if (seq->lower > seq->upper) - goto exit; - if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE)) - goto exit; key = ref + p_ptr->pub_count + 1; if (key == ref) { res = -EADDRINUSE; @@ -1078,7 +1071,6 @@ exit: * * Port must be locked. */ - int tipc_disconnect_port(struct tipc_port *tp_ptr) { int res; @@ -1099,7 +1091,6 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr) * tipc_disconnect(): Disconnect port form peer. * This is a node local operation. */ - int tipc_disconnect(u32 ref) { struct tipc_port *p_ptr; @@ -1134,7 +1125,6 @@ int tipc_shutdown(u32 ref) /** * tipc_port_recv_msg - receive message from lower layer and deliver to port user */ - int tipc_port_recv_msg(struct sk_buff *buf) { struct tipc_port *p_ptr; @@ -1152,17 +1142,6 @@ int tipc_port_recv_msg(struct sk_buff *buf) /* validate destination & pass to port, otherwise reject message */ p_ptr = tipc_port_lock(destport); if (likely(p_ptr)) { - if (likely(p_ptr->connected)) { - if ((unlikely(msg_origport(msg) != - tipc_peer_port(p_ptr))) || - (unlikely(msg_orignode(msg) != - tipc_peer_node(p_ptr))) || - (unlikely(!msg_connected(msg)))) { - err = TIPC_ERR_NO_PORT; - tipc_port_unlock(p_ptr); - goto reject; - } - } err = p_ptr->dispatcher(p_ptr, buf); tipc_port_unlock(p_ptr); if (likely(!err)) @@ -1170,7 +1149,7 @@ int tipc_port_recv_msg(struct sk_buff *buf) } else { err = TIPC_ERR_NO_PORT; } -reject: + return tipc_reject_msg(buf, err); } @@ -1178,7 +1157,6 @@ reject: * tipc_port_recv_sections(): Concatenate and deliver sectioned * message for this node. */ - static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1196,7 +1174,6 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se /** * tipc_send - send message sections on connection */ - int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { @@ -1211,7 +1188,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, p_ptr->congested = 1; if (!tipc_port_congested(p_ptr)) { destnode = port_peernode(p_ptr); - if (likely(destnode != tipc_own_addr)) + if (likely(!in_own_node(destnode))) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); else @@ -1235,7 +1212,6 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, /** * tipc_send2name - send message sections to port name */ - int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1261,13 +1237,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, msg_set_destport(msg, destport); if (likely(destport || destnode)) { - if (likely(destnode == tipc_own_addr)) + if (likely(in_own_node(destnode))) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); - else + else if (tipc_own_addr) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); + else + res = tipc_port_reject_sections(p_ptr, msg, msg_sect, + num_sect, total_len, + TIPC_ERR_NO_NODE); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; @@ -1285,7 +1265,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, /** * tipc_send2port - send message sections to port identity */ - int tipc_send2port(u32 ref, struct tipc_portid const *dest, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1305,12 +1284,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest, msg_set_destport(msg, dest->ref); msg_set_hdr_sz(msg, BASIC_H_SIZE); - if (dest->node == tipc_own_addr) + if (in_own_node(dest->node)) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); - else + else if (tipc_own_addr) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, dest->node); + else + res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, + total_len, TIPC_ERR_NO_NODE); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; @@ -1325,7 +1307,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest, /** * tipc_send_buf2port - send message buffer to port identity */ - int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, struct sk_buff *buf, unsigned int dsz) { @@ -1349,7 +1330,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, skb_push(buf, BASIC_H_SIZE); skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE); - if (dest->node == tipc_own_addr) + if (in_own_node(dest->node)) res = tipc_port_recv_msg(buf); else res = tipc_send_buf_fast(buf, dest->node); @@ -1362,4 +1343,3 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, return dsz; return -ELINKCONG; } - diff --git a/net/tipc/port.h b/net/tipc/port.h index 9b88531e5a6..98cbec9c453 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -81,7 +81,6 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref); * @ref: object reference to associated TIPC port * <various callback routines> */ - struct user_port { void *usr_handle; u32 ref; @@ -201,6 +200,7 @@ int tipc_shutdown(u32 ref); * The following routines require that the port be locked on entry */ int tipc_disconnect_port(struct tipc_port *tp_ptr); +int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg); /* * TIPC messaging routines @@ -235,7 +235,6 @@ void tipc_port_reinit(void); /** * tipc_port_lock - lock port instance referred to and return its pointer */ - static inline struct tipc_port *tipc_port_lock(u32 ref) { return (struct tipc_port *)tipc_ref_lock(ref); @@ -246,7 +245,6 @@ static inline struct tipc_port *tipc_port_lock(u32 ref) * * Can use pointer instead of tipc_ref_unlock() since port is already locked. */ - static inline void tipc_port_unlock(struct tipc_port *p_ptr) { spin_unlock_bh(p_ptr->lock); @@ -257,16 +255,6 @@ static inline struct tipc_port *tipc_port_deref(u32 ref) return (struct tipc_port *)tipc_ref_deref(ref); } -static inline u32 tipc_peer_port(struct tipc_port *p_ptr) -{ - return msg_destport(&p_ptr->phdr); -} - -static inline u32 tipc_peer_node(struct tipc_port *p_ptr) -{ - return msg_destnode(&p_ptr->phdr); -} - static inline int tipc_port_congested(struct tipc_port *p_ptr) { return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 9e37b7812c3..5cada0e38e0 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -43,7 +43,6 @@ * @lock: spinlock controlling access to object * @ref: reference value for object (combines instance & array index info) */ - struct reference { void *object; spinlock_t lock; @@ -60,7 +59,6 @@ struct reference { * @index_mask: bitmask for array index portion of reference values * @start_mask: initial value for instance value portion of reference values */ - struct ref_table { struct reference *entries; u32 capacity; @@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock); /** * tipc_ref_table_init - create reference table for objects */ - int tipc_ref_table_init(u32 requested_size, u32 start) { struct reference *table; @@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) /* do nothing */ ; /* allocate table & mark all entries as uninitialized */ - table = vzalloc(actual_size * sizeof(struct reference)); if (table == NULL) return -ENOMEM; @@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) /** * tipc_ref_table_stop - destroy reference table for objects */ - void tipc_ref_table_stop(void) { if (!tipc_ref_table.entries) @@ -149,7 +144,6 @@ void tipc_ref_table_stop(void) * register a partially initialized object, without running the risk that * the object will be accessed before initialization is complete. */ - u32 tipc_ref_acquire(void *object, spinlock_t **lock) { u32 index; @@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) } /* take a free entry, if available; otherwise initialize a new entry */ - write_lock_bh(&ref_table_lock); if (tipc_ref_table.first_free) { index = tipc_ref_table.first_free; @@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) * Disallow future references to an object and free up the entry for re-use. * Note: The entry's spin_lock may still be busy after discard */ - void tipc_ref_discard(u32 ref) { struct reference *entry; @@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref) * mark entry as unused; increment instance part of entry's reference * to invalidate any subsequent references */ - entry->object = NULL; entry->ref = (ref & ~index_mask) + (index_mask + 1); /* append entry to free entry list */ - if (tipc_ref_table.first_free == 0) tipc_ref_table.first_free = index; else @@ -261,7 +251,6 @@ exit: /** * tipc_ref_lock - lock referenced object and return pointer to it */ - void *tipc_ref_lock(u32 ref) { if (likely(tipc_ref_table.entries)) { @@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref) /** * tipc_ref_deref - return pointer referenced object (without locking it) */ - void *tipc_ref_deref(u32 ref) { if (likely(tipc_ref_table.entries)) { @@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref) } return NULL; } - diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 29e957f6445..5577a447f53 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -123,7 +123,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0); * * Caller must hold socket lock */ - static void advance_rx_queue(struct sock *sk) { kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); @@ -135,7 +134,6 @@ static void advance_rx_queue(struct sock *sk) * * Caller must hold socket lock */ - static void discard_rx_queue(struct sock *sk) { struct sk_buff *buf; @@ -151,7 +149,6 @@ static void discard_rx_queue(struct sock *sk) * * Caller must hold socket lock */ - static void reject_rx_queue(struct sock *sk) { struct sk_buff *buf; @@ -174,7 +171,6 @@ static void reject_rx_queue(struct sock *sk) * * Returns 0 on success, errno otherwise */ - static int tipc_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -184,7 +180,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, struct tipc_port *tp_ptr; /* Validate arguments */ - if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; @@ -207,13 +202,11 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, } /* Allocate socket's protocol area */ - sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); if (sk == NULL) return -ENOMEM; /* Allocate TIPC port for socket to use */ - tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!tp_ptr)) { @@ -222,7 +215,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, } /* Finish initializing socket data structures */ - sock->ops = ops; sock->state = state; @@ -258,7 +250,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, * * Returns 0 on success, errno otherwise */ - static int release(struct socket *sock) { struct sock *sk = sock->sk; @@ -270,7 +261,6 @@ static int release(struct socket *sock) * Exit if socket isn't fully initialized (occurs when a failed accept() * releases a pre-allocated child socket that was never used) */ - if (sk == NULL) return 0; @@ -281,7 +271,6 @@ static int release(struct socket *sock) * Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer) */ - while (sock->state != SS_DISCONNECTING) { buf = __skb_dequeue(&sk->sk_receive_queue); if (buf == NULL) @@ -303,15 +292,12 @@ static int release(struct socket *sock) * Delete TIPC port; this ensures no more messages are queued * (also disconnects an active connection & sends a 'FIN-' to peer) */ - res = tipc_deleteport(tport->ref); /* Discard any remaining (connection-based) messages in receive queue */ - discard_rx_queue(sk); /* Reject any messages that accumulated in backlog queue */ - sock->state = SS_DISCONNECTING; release_sock(sk); @@ -336,7 +322,6 @@ static int release(struct socket *sock) * NOTE: This routine doesn't need to take the socket lock since it doesn't * access any non-constant socket information. */ - static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; @@ -376,7 +361,6 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) * accesses socket information that is unchanging (or which changes in * a completely predictable manner). */ - static int get_name(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { @@ -444,7 +428,6 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, * imply that the operation will succeed, merely that it should be performed * and will not block. */ - static unsigned int poll(struct file *file, struct socket *sock, poll_table *wait) { @@ -482,7 +465,6 @@ static unsigned int poll(struct file *file, struct socket *sock, * * Returns 0 if permission is granted, otherwise errno */ - static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) { struct tipc_cfg_msg_hdr hdr; @@ -518,7 +500,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) * * Returns the number of bytes sent on success, or errno otherwise */ - static int send_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -535,7 +516,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, (dest->family != AF_TIPC))) return -EINVAL; if ((total_len > TIPC_MAX_USER_MSG_SIZE) || - (m->msg_iovlen > (unsigned)INT_MAX)) + (m->msg_iovlen > (unsigned int)INT_MAX)) return -EMSGSIZE; if (iocb) @@ -562,7 +543,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, } /* Abort any pending connection attempts (very unlikely) */ - reject_rx_queue(sk); } @@ -631,7 +611,6 @@ exit: * * Returns the number of bytes sent on success, or errno otherwise */ - static int send_packet(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -642,12 +621,11 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, int res; /* Handle implied connection establishment */ - if (unlikely(dest)) return send_msg(iocb, sock, m, total_len); if ((total_len > TIPC_MAX_USER_MSG_SIZE) || - (m->msg_iovlen > (unsigned)INT_MAX)) + (m->msg_iovlen > (unsigned int)INT_MAX)) return -EMSGSIZE; if (iocb) @@ -695,7 +673,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, * Returns the number of bytes sent on success (or partial success), * or errno if no data sent */ - static int send_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -715,7 +692,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, lock_sock(sk); /* Handle special cases where there is no connection */ - if (unlikely(sock->state != SS_CONNECTED)) { if (sock->state == SS_UNCONNECTED) { res = send_packet(NULL, sock, m, total_len); @@ -734,8 +710,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, goto exit; } - if ((total_len > (unsigned)INT_MAX) || - (m->msg_iovlen > (unsigned)INT_MAX)) { + if ((total_len > (unsigned int)INT_MAX) || + (m->msg_iovlen > (unsigned int)INT_MAX)) { res = -EMSGSIZE; goto exit; } @@ -747,7 +723,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, * (i.e. one large iovec entry), but could be improved to pass sets * of small iovec entries into send_packet(). */ - curr_iov = m->msg_iov; curr_iovlen = m->msg_iovlen; my_msg.msg_iov = &my_iov; @@ -796,7 +771,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int auto_connect(struct socket *sock, struct tipc_msg *msg) { struct tipc_sock *tsock = tipc_sk(sock->sk); @@ -821,7 +795,6 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg) * * Note: Address is not captured if not requested by receiver. */ - static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; @@ -847,7 +820,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) * * Returns 0 if successful, otherwise errno */ - static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, struct tipc_port *tport) { @@ -861,7 +833,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, return 0; /* Optionally capture errored message object(s) */ - err = msg ? msg_errcode(msg) : 0; if (unlikely(err)) { anc_data[0] = err; @@ -878,7 +849,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, } /* Optionally capture message destination object */ - dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; switch (dest_type) { case TIPC_NAMED_MSG: @@ -923,7 +893,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, * * Returns size of returned message data, errno otherwise */ - static int recv_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { @@ -937,7 +906,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, int res; /* Catch invalid receive requests */ - if (unlikely(!buf_len)) return -EINVAL; @@ -952,7 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, restart: /* Look for a message in receive queue; wait if necessary */ - while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; @@ -970,14 +937,12 @@ restart: } /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Complete connection setup for an implied connect */ - if (unlikely(sock->state == SS_CONNECTING)) { res = auto_connect(sock, msg); if (res) @@ -985,24 +950,20 @@ restart: } /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Capture sender's address (optional) */ - set_orig_addr(m, msg); /* Capture ancillary data (optional) */ - res = anc_data_recv(m, msg, tport); if (res) goto exit; /* Capture message data (if valid) & compute return value (always) */ - if (!err) { if (unlikely(buf_len < sz)) { sz = buf_len; @@ -1022,7 +983,6 @@ restart: } /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { if ((sock->state != SS_READY) && (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) @@ -1046,7 +1006,6 @@ exit: * * Returns size of returned message data, errno otherwise */ - static int recv_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { @@ -1062,7 +1021,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, int res = 0; /* Catch invalid receive attempts */ - if (unlikely(!buf_len)) return -EINVAL; @@ -1076,10 +1034,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); -restart: +restart: /* Look for a message in receive queue; wait if necessary */ - while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; @@ -1097,21 +1054,18 @@ restart: } /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Optionally capture sender's address & ancillary data of first msg */ - if (sz_copied == 0) { set_orig_addr(m, msg); res = anc_data_recv(m, msg, tport); @@ -1120,7 +1074,6 @@ restart: } /* Capture message data (if valid) & compute return value (always) */ - if (!err) { u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); @@ -1152,7 +1105,6 @@ restart: } /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) tipc_acknowledge(tport->ref, tport->conn_unacked); @@ -1160,7 +1112,6 @@ restart: } /* Loop around if more data is required */ - if ((sz_copied < buf_len) && /* didn't get all requested data */ (!skb_queue_empty(&sk->sk_receive_queue) || (sz_copied < target)) && /* and more is ready or required */ @@ -1181,7 +1132,6 @@ exit: * * Returns 1 if queue is unable to accept message, 0 otherwise */ - static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) { u32 threshold; @@ -1214,7 +1164,6 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ - static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) { struct socket *sock = sk->sk_socket; @@ -1222,12 +1171,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) u32 recv_q_len; /* Reject message if it is wrong sort of message for socket */ - - /* - * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD? - * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY - * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC - */ + if (msg_type(msg) > TIPC_DIRECT_MSG) + return TIPC_ERR_NO_PORT; if (sock->state == SS_READY) { if (msg_connected(msg)) @@ -1236,7 +1181,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) if (msg_mcast(msg)) return TIPC_ERR_NO_PORT; if (sock->state == SS_CONNECTED) { - if (!msg_connected(msg)) + if (!msg_connected(msg) || + !tipc_port_peer_msg(tipc_sk_port(sk), msg)) return TIPC_ERR_NO_PORT; } else if (sock->state == SS_CONNECTING) { if (!msg_connected(msg) && (msg_errcode(msg) == 0)) @@ -1253,7 +1199,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) } /* Reject message if there isn't room to queue it */ - recv_q_len = (u32)atomic_read(&tipc_queue_size); if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) @@ -1266,13 +1211,11 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) } /* Enqueue message (finally!) */ - TIPC_SKB_CB(buf)->handle = 0; atomic_inc(&tipc_queue_size); __skb_queue_tail(&sk->sk_receive_queue, buf); /* Initiate connection termination for an incoming 'FIN' */ - if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { sock->state = SS_DISCONNECTING; tipc_disconnect_port(tipc_sk_port(sk)); @@ -1292,7 +1235,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) * * Returns 0 */ - static int backlog_rcv(struct sock *sk, struct sk_buff *buf) { u32 res; @@ -1312,7 +1254,6 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf) * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ - static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) { struct sock *sk = (struct sock *)tport->usr_handle; @@ -1324,12 +1265,11 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) * This code is based on sk_receive_skb(), but must be distinct from it * since a TIPC-specific filter/reject mechanism is utilized */ - bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { res = filter_rcv(sk, buf); } else { - if (sk_add_backlog(sk, buf)) + if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) res = TIPC_ERR_OVERLOAD; else res = TIPC_OK; @@ -1345,7 +1285,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) * * Called with port lock already taken. */ - static void wakeupdispatch(struct tipc_port *tport) { struct sock *sk = (struct sock *)tport->usr_handle; @@ -1363,7 +1302,6 @@ static void wakeupdispatch(struct tipc_port *tport) * * Returns 0 on success, errno otherwise */ - static int connect(struct socket *sock, struct sockaddr *dest, int destlen, int flags) { @@ -1378,21 +1316,18 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, lock_sock(sk); /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ - if (sock->state == SS_READY) { res = -EOPNOTSUPP; goto exit; } /* For now, TIPC does not support the non-blocking form of connect() */ - if (flags & O_NONBLOCK) { res = -EOPNOTSUPP; goto exit; } /* Issue Posix-compliant error code if socket is in the wrong state */ - if (sock->state == SS_LISTENING) { res = -EOPNOTSUPP; goto exit; @@ -1412,18 +1347,15 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, * Note: send_msg() validates the rest of the address fields, * so there's no need to do it here */ - if (dst->addrtype == TIPC_ADDR_MCAST) { res = -EINVAL; goto exit; } /* Reject any messages already in receive queue (very unlikely) */ - reject_rx_queue(sk); /* Send a 'SYN-' to destination */ - m.msg_name = dest; m.msg_namelen = destlen; res = send_msg(NULL, sock, &m, 0); @@ -1431,7 +1363,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, goto exit; /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ - timeout = tipc_sk(sk)->conn_timeout; release_sock(sk); res = wait_event_interruptible_timeout(*sk_sleep(sk), @@ -1476,7 +1407,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int listen(struct socket *sock, int len) { struct sock *sk = sock->sk; @@ -1503,7 +1433,6 @@ static int listen(struct socket *sock, int len) * * Returns 0 on success, errno otherwise */ - static int accept(struct socket *sock, struct socket *new_sock, int flags) { struct sock *sk = sock->sk; @@ -1546,11 +1475,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - reject_rx_queue(new_sk); /* Connect new socket to it's peer */ - new_tsock->peer_name.ref = msg_origport(msg); new_tsock->peer_name.node = msg_orignode(msg); tipc_connect2port(new_ref, &new_tsock->peer_name); @@ -1566,7 +1493,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) * Respond to 'SYN-' by discarding it & returning 'ACK'-. * Respond to 'SYN+' by queuing it on new socket. */ - if (!msg_data_sz(msg)) { struct msghdr m = {NULL,}; @@ -1592,7 +1518,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; @@ -1609,8 +1534,8 @@ static int shutdown(struct socket *sock, int how) case SS_CONNECTING: case SS_CONNECTED: - /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ restart: + /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ buf = __skb_dequeue(&sk->sk_receive_queue); if (buf) { atomic_dec(&tipc_queue_size); @@ -1631,7 +1556,6 @@ restart: case SS_DISCONNECTING: /* Discard any unreceived messages; wake up sleeping tasks */ - discard_rx_queue(sk); if (waitqueue_active(sk_sleep(sk))) wake_up_interruptible(sk_sleep(sk)); @@ -1659,7 +1583,6 @@ restart: * * Returns 0 on success, errno otherwise */ - static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov, unsigned int ol) { @@ -1719,7 +1642,6 @@ static int setsockopt(struct socket *sock, * * Returns 0 on success, errno otherwise */ - static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov, int __user *ol) { @@ -1780,7 +1702,6 @@ static int getsockopt(struct socket *sock, /** * Protocol switches for the various types of TIPC sockets */ - static const struct proto_ops msg_ops = { .owner = THIS_MODULE, .family = AF_TIPC, @@ -1886,7 +1807,6 @@ int tipc_socket_init(void) /** * tipc_socket_stop - stop TIPC socket interface */ - void tipc_socket_stop(void) { if (!sockets_enabled) @@ -1896,4 +1816,3 @@ void tipc_socket_stop(void) sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); } - diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index b2964e9895d..f976e9cd6a7 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -46,7 +46,6 @@ * @subscriber_list: adjacent subscribers in top. server's list of subscribers * @subscription_list: list of subscription objects for this subscriber */ - struct tipc_subscriber { u32 port_ref; spinlock_t *lock; @@ -56,13 +55,11 @@ struct tipc_subscriber { /** * struct top_srv - TIPC network topology subscription service - * @user_ref: TIPC userid of subscription service * @setup_port: reference to TIPC port that handles subscription requests * @subscription_count: number of active subscriptions (not subscribers!) * @subscriber_list: list of ports subscribing to service * @lock: spinlock govering access to subscriber list */ - struct top_srv { u32 setup_port; atomic_t subscription_count; @@ -79,7 +76,6 @@ static struct top_srv topsrv; * * Returns converted value */ - static u32 htohl(u32 in, int swap) { return swap ? swab32(in) : in; @@ -91,7 +87,6 @@ static u32 htohl(u32 in, int swap) * Note: Must not hold subscriber's server port lock, since tipc_send() will * try to take the lock if the message is rejected and returned! */ - static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, @@ -117,7 +112,6 @@ static void subscr_send_event(struct tipc_subscription *sub, * * Returns 1 if there is overlap, otherwise 0. */ - int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper) @@ -137,7 +131,6 @@ int tipc_subscr_overlap(struct tipc_subscription *sub, * * Protected by nameseq.lock in name_table.c */ - void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, @@ -157,43 +150,35 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, /** * subscr_timeout - subscription timeout has occurred */ - static void subscr_timeout(struct tipc_subscription *sub) { struct tipc_port *server_port; /* Validate server port reference (in case subscriber is terminating) */ - server_port = tipc_port_lock(sub->server_ref); if (server_port == NULL) return; /* Validate timeout (in case subscription is being cancelled) */ - if (sub->timeout == TIPC_WAIT_FOREVER) { tipc_port_unlock(server_port); return; } /* Unlink subscription from name table */ - tipc_nametbl_unsubscribe(sub); /* Unlink subscription from subscriber */ - list_del(&sub->subscription_list); /* Release subscriber's server port */ - tipc_port_unlock(server_port); /* Notify subscriber of timeout */ - subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, TIPC_SUBSCR_TIMEOUT, 0, 0); /* Now destroy subscription */ - k_term_timer(&sub->timer); kfree(sub); atomic_dec(&topsrv.subscription_count); @@ -204,7 +189,6 @@ static void subscr_timeout(struct tipc_subscription *sub) * * Called with subscriber port locked. */ - static void subscr_del(struct tipc_subscription *sub) { tipc_nametbl_unsubscribe(sub); @@ -223,7 +207,6 @@ static void subscr_del(struct tipc_subscription *sub) * a new object reference in the interim that uses this lock; this routine will * simply wait for it to be released, then claim it.) */ - static void subscr_terminate(struct tipc_subscriber *subscriber) { u32 port_ref; @@ -231,18 +214,15 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) struct tipc_subscription *sub_temp; /* Invalidate subscriber reference */ - port_ref = subscriber->port_ref; subscriber->port_ref = 0; spin_unlock_bh(subscriber->lock); /* Sever connection to subscriber */ - tipc_shutdown(port_ref); tipc_deleteport(port_ref); /* Destroy any existing subscriptions for subscriber */ - list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { if (sub->timeout != TIPC_WAIT_FOREVER) { @@ -253,17 +233,14 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) } /* Remove subscriber from topology server's subscriber list */ - spin_lock_bh(&topsrv.lock); list_del(&subscriber->subscriber_list); spin_unlock_bh(&topsrv.lock); /* Reclaim subscriber lock */ - spin_lock_bh(subscriber->lock); /* Now destroy subscriber */ - kfree(subscriber); } @@ -276,7 +253,6 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) * * Note that fields of 's' use subscriber's endianness! */ - static void subscr_cancel(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { @@ -285,7 +261,6 @@ static void subscr_cancel(struct tipc_subscr *s, int found = 0; /* Find first matching subscription, exit if not found */ - list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { @@ -297,7 +272,6 @@ static void subscr_cancel(struct tipc_subscr *s, return; /* Cancel subscription timer (if used), then delete subscription */ - if (sub->timeout != TIPC_WAIT_FOREVER) { sub->timeout = TIPC_WAIT_FOREVER; spin_unlock_bh(subscriber->lock); @@ -313,7 +287,6 @@ static void subscr_cancel(struct tipc_subscr *s, * * Called with subscriber port locked. */ - static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { @@ -321,11 +294,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, int swap; /* Determine subscriber's endianness */ - swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); /* Detect & process a subscription cancellation request */ - if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); subscr_cancel(s, subscriber); @@ -333,7 +304,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Refuse subscription if global limit exceeded */ - if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { warn("Subscription rejected, subscription limit reached (%u)\n", tipc_max_subscriptions); @@ -342,7 +312,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Allocate subscription object */ - sub = kmalloc(sizeof(*sub), GFP_ATOMIC); if (!sub) { warn("Subscription rejected, no memory\n"); @@ -351,7 +320,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Initialize subscription object */ - sub->seq.type = htohl(s->seq.type, swap); sub->seq.lower = htohl(s->seq.lower, swap); sub->seq.upper = htohl(s->seq.upper, swap); @@ -385,7 +353,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, * * Called with subscriber's server port unlocked. */ - static void subscr_conn_shutdown_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -409,7 +376,6 @@ static void subscr_conn_shutdown_event(void *usr_handle, * * Called with subscriber's server port unlocked. */ - static void subscr_conn_msg_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -424,7 +390,6 @@ static void subscr_conn_msg_event(void *usr_handle, * Lock subscriber's server port (& make a local copy of lock pointer, * in case subscriber is deleted while processing subscription request) */ - if (tipc_port_lock(port_ref) == NULL) return; @@ -452,7 +417,6 @@ static void subscr_conn_msg_event(void *usr_handle, * timeout code cannot delete the subscription, * so the subscription object is still protected. */ - tipc_nametbl_subscribe(sub); } } @@ -461,7 +425,6 @@ static void subscr_conn_msg_event(void *usr_handle, /** * subscr_named_msg_event - handle request to establish a new subscriber */ - static void subscr_named_msg_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -475,7 +438,6 @@ static void subscr_named_msg_event(void *usr_handle, u32 server_port_ref; /* Create subscriber object */ - subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); if (subscriber == NULL) { warn("Subscriber rejected, no memory\n"); @@ -485,7 +447,6 @@ static void subscr_named_msg_event(void *usr_handle, INIT_LIST_HEAD(&subscriber->subscriber_list); /* Create server port & establish connection to subscriber */ - tipc_createport(subscriber, importance, NULL, @@ -504,26 +465,21 @@ static void subscr_named_msg_event(void *usr_handle, tipc_connect2port(subscriber->port_ref, orig); /* Lock server port (& save lock address for future use) */ - subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock; /* Add subscriber to topology server's subscriber list */ - spin_lock_bh(&topsrv.lock); list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); spin_unlock_bh(&topsrv.lock); /* Unlock server port */ - server_port_ref = subscriber->port_ref; spin_unlock_bh(subscriber->lock); /* Send an ACK- to complete connection handshaking */ - tipc_send(server_port_ref, 0, NULL, 0); /* Handle optional subscription request */ - if (size != 0) { subscr_conn_msg_event(subscriber, server_port_ref, buf, data, size); @@ -535,7 +491,6 @@ int tipc_subscr_start(void) struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; int res; - memset(&topsrv, 0, sizeof(topsrv)); spin_lock_init(&topsrv.lock); INIT_LIST_HEAD(&topsrv.subscriber_list); diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index ef6529c8456..218d2e07f0c 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -51,7 +51,6 @@ struct tipc_subscription; * @swap: indicates if subscriber uses opposite endianness in its messages * @evt: template for events generated by subscription */ - struct tipc_subscription { struct tipc_name_seq seq; u32 timeout; @@ -80,5 +79,4 @@ int tipc_subscr_start(void); void tipc_subscr_stop(void); - #endif diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d510353ef43..641f2e47f16 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -149,9 +149,10 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) * each socket state is protected by separate spin lock. */ -static inline unsigned unix_hash_fold(__wsum n) +static inline unsigned int unix_hash_fold(__wsum n) { - unsigned hash = (__force unsigned)n; + unsigned int hash = (__force unsigned int)n; + hash ^= hash>>16; hash ^= hash>>8; return hash&(UNIX_HASH_SIZE-1); @@ -200,7 +201,7 @@ static inline void unix_release_addr(struct unix_address *addr) * - if started by zero, it is abstract name. */ -static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) +static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) { if (len <= sizeof(short) || len > sizeof(*sunaddr)) return -EINVAL; @@ -250,7 +251,7 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) static struct sock *__unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, - int len, int type, unsigned hash) + int len, int type, unsigned int hash) { struct sock *s; struct hlist_node *node; @@ -273,7 +274,7 @@ found: static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, int type, - unsigned hash) + unsigned int hash) { struct sock *s; @@ -760,7 +761,7 @@ out: mutex_unlock(&u->readlock); static struct sock *unix_find_other(struct net *net, struct sockaddr_un *sunname, int len, - int type, unsigned hash, int *error) + int type, unsigned int hash, int *error) { struct sock *u; struct path path; @@ -824,7 +825,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct dentry *dentry = NULL; struct path path; int err; - unsigned hash; + unsigned int hash; struct unix_address *addr; struct hlist_head *list; @@ -964,7 +965,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, struct net *net = sock_net(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *other; - unsigned hash; + unsigned int hash; int err; if (addr->sa_family != AF_UNSPEC) { @@ -1062,7 +1063,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; - unsigned hash; + unsigned int hash; int st; int err; long timeo; @@ -1437,11 +1438,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; - unsigned hash; + unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; int max_level; + int data_len = 0; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1475,7 +1477,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (len > sk->sk_sndbuf - 32) goto out; - skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); + if (len > SKB_MAX_ALLOC) + data_len = min_t(size_t, + len - SKB_MAX_ALLOC, + MAX_SKB_FRAGS * PAGE_SIZE); + + skb = sock_alloc_send_pskb(sk, len - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; @@ -1485,8 +1493,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, max_level = err + 1; unix_get_secdata(siocb->scm, skb); - skb_reset_transport_header(skb); - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + skb_put(skb, len - data_len); + skb->data_len = data_len; + skb->len = len; + err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); if (err) goto out_free; diff --git a/net/unix/diag.c b/net/unix/diag.c index f0486ae9ebe..47d3002737f 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -310,7 +310,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); } -static struct sock_diag_handler unix_diag_handler = { +static const struct sock_diag_handler unix_diag_handler = { .family = AF_UNIX, .dump = unix_diag_handler_dump, }; diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index 397cffebb3b..b34b5b9792f 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -26,12 +26,6 @@ static ctl_table unix_table[] = { { } }; -static struct ctl_path unix_path[] = { - { .procname = "net", }, - { .procname = "unix", }, - { }, -}; - int __net_init unix_sysctl_register(struct net *net) { struct ctl_table *table; @@ -41,7 +35,7 @@ int __net_init unix_sysctl_register(struct net *net) goto err_alloc; table[0].data = &net->unx.sysctl_max_dgram_qlen; - net->unx.ctl = register_net_sysctl_table(net, unix_path, table); + net->unx.ctl = register_net_sysctl(net, "net/unix", table); if (net->unx.ctl == NULL) goto err_reg; @@ -58,6 +52,6 @@ void unix_sysctl_unregister(struct net *net) struct ctl_table *table; table = net->unx.ctl->ctl_table_arg; - unregister_sysctl_table(net->unx.ctl); + unregister_net_sysctl_table(net->unx.ctl); kfree(table); } diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 3c65eae701c..a6470ac3949 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -187,7 +187,7 @@ out: static void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, - unsigned allowed_states_bm) + unsigned int allowed_states_bm) { if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", @@ -425,7 +425,8 @@ static size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, unsigned char *addr, size_t addr_len) { - unsigned cnt, total; + unsigned int cnt, total; + for (total = cnt = 0; cnt < addr_len; cnt++) total += scnprintf(addr_str + total, addr_str_size - total, "%02x%c", addr[cnt], diff --git a/net/wireless/core.c b/net/wireless/core.c index ccdfed89765..39f2538a46f 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -422,10 +422,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy) const struct ieee80211_iface_combination *c; int i, j; - /* If we have combinations enforce them */ - if (wiphy->n_iface_combinations) - wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS; - for (i = 0; i < wiphy->n_iface_combinations; i++) { u32 cnt = 0; u16 all_iftypes = 0; @@ -708,6 +704,10 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); + + if (rdev->wowlan && rdev->ops->set_wakeup) + rdev->ops->set_wakeup(&rdev->wiphy, false); + cfg80211_rdev_free_wowlan(rdev); } EXPORT_SYMBOL(wiphy_unregister); @@ -720,7 +720,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&scan->pub); - cfg80211_rdev_free_wowlan(rdev); kfree(rdev); } diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c index 9bde4d1d3e9..7eecdf40cf8 100644 --- a/net/wireless/ethtool.c +++ b/net/wireless/ethtool.c @@ -68,6 +68,32 @@ static int cfg80211_set_ringparam(struct net_device *dev, return -ENOTSUPP; } +static int cfg80211_get_sset_count(struct net_device *dev, int sset) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_sset_count) + return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset); + return -EOPNOTSUPP; +} + +static void cfg80211_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_stats) + rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data); +} + +static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_strings) + rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data); +} + const struct ethtool_ops cfg80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = cfg80211_get_regs_len, @@ -75,4 +101,7 @@ const struct ethtool_ops cfg80211_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ringparam = cfg80211_get_ringparam, .set_ringparam = cfg80211_set_ringparam, + .get_strings = cfg80211_get_strings, + .get_ethtool_stats = cfg80211_get_stats, + .get_sset_count = cfg80211_get_sset_count, }; diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 30f20fe4a5f..d2a19b0ff71 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -473,7 +473,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, /* fixed already - and no change */ if (wdev->wext.ibss.bssid && bssid && - compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) + ether_addr_equal(bssid, wdev->wext.ibss.bssid)) return 0; wdev_lock(wdev); diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index 755738d26bb..1526c211db6 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -304,10 +304,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: received packet without ExtIV" - " flag from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n", + hdr->addr2); key->dot11RSNAStatsCCMPFormatErrors++; return -2; } @@ -318,11 +316,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -6; } if (!key->key_set) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: received packet from %pM" - " with keyid=%d that does not have a configured" - " key\n", hdr->addr2, keyidx); - } + net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n", + hdr->addr2, keyidx); return -3; } @@ -336,15 +331,11 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (ccmp_replay_check(pn, key->rx_pn)) { #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " - "previous PN %02x%02x%02x%02x%02x%02x " - "received PN %02x%02x%02x%02x%02x%02x\n", - hdr->addr2, - key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], - key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], - pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); - } + net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n", + hdr->addr2, + key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], + key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], + pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); #endif key->dot11RSNAStatsCCMPReplays++; return -4; @@ -370,10 +361,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: decrypt failed: STA=" - "%pM\n", hdr->addr2); - } + net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n", + hdr->addr2); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index 38734846c19..d475cfc8568 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -360,12 +360,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct scatterlist sg; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { - if (net_ratelimit()) { - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *)skb->data; - printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "TX packet to %pM\n", hdr->addr1); - } + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n", + hdr->addr1); return -1; } @@ -420,10 +417,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) hdr = (struct ieee80211_hdr *)skb->data; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { - if (net_ratelimit()) { - printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "received packet from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n", + hdr->addr2); return -1; } @@ -433,10 +428,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet without ExtIV" - " flag from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n", + hdr->addr2); return -2; } keyidx >>= 6; @@ -446,11 +439,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -6; } if (!tkey->key_set) { - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet from %pM" - " with keyid=%d that does not have a configured" - " key\n", hdr->addr2, keyidx); - } + net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n", + hdr->addr2, keyidx); return -3; } iv16 = (pos[0] << 8) | pos[2]; @@ -459,12 +449,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" - " previous TSC %08x%04x received TSC " - "%08x%04x\n", hdr->addr2, - tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); - } + net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n", + hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, + iv32, iv16); #endif tkey->dot11RSNAStatsTKIPReplays++; return -4; @@ -481,11 +468,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { - if (net_ratelimit()) { - printk(KERN_DEBUG ": TKIP: failed to decrypt " - "received packet from %pM\n", - hdr->addr2); - } + net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n", + hdr->addr2); return -7; } @@ -501,10 +485,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) tkey->rx_phase1_done = 0; } #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: ICV error detected: STA=" - "%pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n", + hdr->addr2); #endif tkey->dot11RSNAStatsTKIPICVErrors++; return -5; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index ba21ab22187..2749cb86b46 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -38,6 +38,7 @@ #define MESH_MAX_PREQ_RETRIES 4 +#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50 const struct mesh_config default_mesh_config = { .dot11MeshRetryTimeout = MESH_RET_T, @@ -48,6 +49,7 @@ const struct mesh_config default_mesh_config = { .element_ttl = MESH_DEFAULT_ELEMENT_TTL, .auto_open_plinks = true, .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, + .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX, .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, @@ -59,9 +61,11 @@ const struct mesh_config default_mesh_config = { .dot11MeshGateAnnouncementProtocol = false, .dot11MeshForwarding = true, .rssi_threshold = MESH_RSSI_THRESHOLD, + .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, }; const struct mesh_setup default_mesh_setup = { + .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, .ie = NULL, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index f5a7ac3a093..eb90988bbd3 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/nl80211.h> #include <linux/slab.h> @@ -100,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev, ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -115,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); - from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, @@ -154,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, return; if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_sme_disassoc(dev, wdev->current_bss); cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); @@ -165,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } EXPORT_SYMBOL(__cfg80211_send_disassoc); @@ -285,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, return -EINVAL; if (wdev->current_bss && - memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) + ether_addr_equal(bssid, wdev->current_bss->pub.bssid)) return -EALREADY; memset(&req, 0, sizeof(req)); @@ -362,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, memset(&req, 0, sizeof(req)); if (wdev->current_bss && prev_bssid && - memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) { /* * Trying to reassociate: Allow this to proceed and let the old * association to be dropped when the new one is completed. @@ -446,7 +447,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, if (local_state_change) { if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -495,7 +496,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, req.local_state_change = local_state_change; req.ie = ie; req.ie_len = ie_len; - if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) + if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; @@ -758,8 +759,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; } - if (memcmp(wdev->current_bss->pub.bssid, - mgmt->bssid, ETH_ALEN)) { + if (!ether_addr_equal(wdev->current_bss->pub.bssid, + mgmt->bssid)) { err = -ENOTCONN; break; } @@ -772,8 +773,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; /* for station, check that DA is the AP */ - if (memcmp(wdev->current_bss->pub.bssid, - mgmt->da, ETH_ALEN)) { + if (!ether_addr_equal(wdev->current_bss->pub.bssid, + mgmt->da)) { err = -ENOTCONN; break; } @@ -781,11 +782,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: - if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) + if (!ether_addr_equal(mgmt->bssid, dev->dev_addr)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: - if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) { + if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } @@ -804,7 +805,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return err; } - if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) + if (!ether_addr_equal(mgmt->sa, dev->dev_addr)) return -EINVAL; /* Transmit the Action frame as requested by user space */ @@ -928,6 +929,33 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); +void cfg80211_ch_switch_notify(struct net_device *dev, int freq, + enum nl80211_channel_type type) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_channel *chan; + + wdev_lock(wdev); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO)) + goto out; + + chan = rdev_freq_to_chan(rdev, freq, type); + if (WARN_ON(!chan)) + goto out; + + wdev->channel = chan; + + nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); +out: + wdev_unlock(wdev); + return; +} +EXPORT_SYMBOL(cfg80211_ch_switch_notify); + bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f432c57af05..b67b1114e25 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -356,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, static int nl80211_msg_put_channel(struct sk_buff *msg, struct ieee80211_channel *chan) { - NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, - chan->center_freq); + if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, + chan->center_freq)) + goto nla_put_failure; - if (chan->flags & IEEE80211_CHAN_DISABLED) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); - if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); - if (chan->flags & IEEE80211_CHAN_NO_IBSS) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); - if (chan->flags & IEEE80211_CHAN_RADAR) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); + if ((chan->flags & IEEE80211_CHAN_DISABLED) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_RADAR) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) + goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, - DBM_TO_MBM(chan->max_power)); + if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, + DBM_TO_MBM(chan->max_power))) + goto nla_put_failure; return 0; @@ -621,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) i = 0; while (ifmodes) { - if (ifmodes & 1) - NLA_PUT_FLAG(msg, i); + if ((ifmodes & 1) && nla_put_flag(msg, i)) + goto nla_put_failure; ifmodes >>= 1; i++; } @@ -665,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, nl_limit = nla_nest_start(msg, j + 1); if (!nl_limit) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX, - c->limits[j].max); + if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, + c->limits[j].max)) + goto nla_put_failure; if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, c->limits[j].types)) goto nla_put_failure; @@ -675,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, nla_nest_end(msg, nl_limits); - if (c->beacon_int_infra_match) - NLA_PUT_FLAG(msg, - NL80211_IFACE_COMB_STA_AP_BI_MATCH); - NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, - c->num_different_channels); - NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM, - c->max_interfaces); + if (c->beacon_int_infra_match && + nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) + goto nla_put_failure; + if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, + c->num_different_channels) || + nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, + c->max_interfaces)) + goto nla_put_failure; nla_nest_end(msg, nl_combi); } @@ -712,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); - NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, - cfg80211_rdev_list_generation); - - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, - dev->wiphy.retry_short); - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, - dev->wiphy.retry_long); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, - dev->wiphy.frag_threshold); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, - dev->wiphy.rts_threshold); - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, - dev->wiphy.coverage_class); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, - dev->wiphy.max_scan_ssids); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, - dev->wiphy.max_sched_scan_ssids); - NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, - dev->wiphy.max_scan_ie_len); - NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, - dev->wiphy.max_sched_scan_ie_len); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS, - dev->wiphy.max_match_sets); - - if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); - if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); - if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD); - if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) - NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT); - if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) - NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT); - if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) - NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP); - - NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, - sizeof(u32) * dev->wiphy.n_cipher_suites, - dev->wiphy.cipher_suites); - - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, - dev->wiphy.max_num_pmkids); - - if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) - NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); - - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, - dev->wiphy.available_antennas_tx); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, - dev->wiphy.available_antennas_rx); - - if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) - NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, - dev->wiphy.probe_resp_offload); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || + nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, + cfg80211_rdev_list_generation) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, + dev->wiphy.retry_short) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, + dev->wiphy.retry_long) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + dev->wiphy.frag_threshold) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, + dev->wiphy.rts_threshold) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, + dev->wiphy.coverage_class) || + nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + dev->wiphy.max_scan_ssids) || + nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, + dev->wiphy.max_sched_scan_ssids) || + nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, + dev->wiphy.max_scan_ie_len) || + nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, + dev->wiphy.max_sched_scan_ie_len) || + nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, + dev->wiphy.max_match_sets)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && + nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && + nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && + nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) + goto nla_put_failure; + + if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, + sizeof(u32) * dev->wiphy.n_cipher_suites, + dev->wiphy.cipher_suites)) + goto nla_put_failure; + + if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, + dev->wiphy.max_num_pmkids)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && + nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, + dev->wiphy.available_antennas_tx) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, + dev->wiphy.available_antennas_rx)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && + nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, + dev->wiphy.probe_resp_offload)) + goto nla_put_failure; if ((dev->wiphy.available_antennas_tx || dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { @@ -777,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, int res; res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); if (!res) { - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, + tx_ant) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, + rx_ant)) + goto nla_put_failure; } } @@ -799,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, goto nla_put_failure; /* add HT info */ - if (dev->wiphy.bands[band]->ht_cap.ht_supported) { - NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, - sizeof(dev->wiphy.bands[band]->ht_cap.mcs), - &dev->wiphy.bands[band]->ht_cap.mcs); - NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, - dev->wiphy.bands[band]->ht_cap.cap); - NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, - dev->wiphy.bands[band]->ht_cap.ampdu_factor); - NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, - dev->wiphy.bands[band]->ht_cap.ampdu_density); - } + if (dev->wiphy.bands[band]->ht_cap.ht_supported && + (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, + sizeof(dev->wiphy.bands[band]->ht_cap.mcs), + &dev->wiphy.bands[band]->ht_cap.mcs) || + nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, + dev->wiphy.bands[band]->ht_cap.cap) || + nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + dev->wiphy.bands[band]->ht_cap.ampdu_factor) || + nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + dev->wiphy.bands[band]->ht_cap.ampdu_density))) + goto nla_put_failure; /* add frequencies */ nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); @@ -842,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, goto nla_put_failure; rate = &dev->wiphy.bands[band]->bitrates[i]; - NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, - rate->bitrate); - if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) - NLA_PUT_FLAG(msg, - NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); + if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, + rate->bitrate)) + goto nla_put_failure; + if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && + nla_put_flag(msg, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE)) + goto nla_put_failure; nla_nest_end(msg, nl_rate); } @@ -866,7 +889,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, do { \ if (dev->ops->op) { \ i++; \ - NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ + if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ + goto nla_put_failure; \ } \ } while (0) @@ -894,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); + if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) + goto nla_put_failure; } CMD(set_channel, SET_CHANNEL); CMD(set_wds_peer, SET_WDS_PEER); @@ -908,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(set_noack_map, SET_NOACK_MAP); if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); + if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) + goto nla_put_failure; } #ifdef CONFIG_NL80211_TESTMODE @@ -919,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (dev->ops->connect || dev->ops->auth) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); + if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) + goto nla_put_failure; } if (dev->ops->disconnect || dev->ops->deauth) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); + if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) + goto nla_put_failure; } nla_nest_end(msg, nl_cmds); if (dev->ops->remain_on_channel && - dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) - NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, - dev->wiphy.max_remain_on_channel_duration); + (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && + nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, + dev->wiphy.max_remain_on_channel_duration)) + goto nla_put_failure; - if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) - NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); + if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && + nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) + goto nla_put_failure; if (mgmt_stypes) { u16 stypes; @@ -953,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, i = 0; stypes = mgmt_stypes[ift].tx; while (stypes) { - if (stypes & 1) - NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, - (i << 4) | IEEE80211_FTYPE_MGMT); + if ((stypes & 1) && + nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, + (i << 4) | IEEE80211_FTYPE_MGMT)) + goto nla_put_failure; stypes >>= 1; i++; } @@ -975,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, i = 0; stypes = mgmt_stypes[ift].rx; while (stypes) { - if (stypes & 1) - NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, - (i << 4) | IEEE80211_FTYPE_MGMT); + if ((stypes & 1) && + nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, + (i << 4) | IEEE80211_FTYPE_MGMT)) + goto nla_put_failure; stypes >>= 1; i++; } @@ -994,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!nl_wowlan) goto nla_put_failure; - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); + if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) + goto nla_put_failure; if (dev->wiphy.wowlan.n_patterns) { struct nl80211_wowlan_pattern_support pat = { .max_patterns = dev->wiphy.wowlan.n_patterns, @@ -1018,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, }; - NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, - sizeof(pat), &pat); + if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, + sizeof(pat), &pat)) + goto nla_put_failure; } nla_nest_end(msg, nl_wowlan); @@ -1032,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (nl80211_put_iface_combinations(&dev->wiphy, msg)) goto nla_put_failure; - if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) - NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, - dev->wiphy.ap_sme_capa); + if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && + nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, + dev->wiphy.ap_sme_capa)) + goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); + if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, + dev->wiphy.features)) + goto nla_put_failure; - if (dev->wiphy.ht_capa_mod_mask) - NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, - sizeof(*dev->wiphy.ht_capa_mod_mask), - dev->wiphy.ht_capa_mod_mask); + if (dev->wiphy.ht_capa_mod_mask && + nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, + sizeof(*dev->wiphy.ht_capa_mod_mask), + dev->wiphy.ht_capa_mod_mask)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -1104,17 +1142,20 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { - if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || + if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; - txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); + txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); + if (txq_params->ac >= NL80211_NUM_ACS) + return -EINVAL; + return 0; } @@ -1489,14 +1530,28 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); - NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) || + nla_put_u32(msg, NL80211_ATTR_IFTYPE, + dev->ieee80211_ptr->iftype) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, + rdev->devlist_generation ^ + (cfg80211_rdev_list_generation << 2))) + goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, - rdev->devlist_generation ^ - (cfg80211_rdev_list_generation << 2)); + if (rdev->ops->get_channel) { + struct ieee80211_channel *chan; + enum nl80211_channel_type channel_type; + + chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type); + if (chan && + (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, + chan->center_freq) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, + channel_type))) + goto nla_put_failure; + } return genlmsg_end(msg, hdr); @@ -1794,35 +1849,34 @@ static void get_key_callback(void *c, struct key_params *params) struct nlattr *key; struct get_key_cookie *cookie = c; - if (params->key) - NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, - params->key_len, params->key); - - if (params->seq) - NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, - params->seq_len, params->seq); - - if (params->cipher) - NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, - params->cipher); + if ((params->key && + nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, + params->key_len, params->key)) || + (params->seq && + nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && + nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER, + params->cipher))) + goto nla_put_failure; key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; - if (params->key) - NLA_PUT(cookie->msg, NL80211_KEY_DATA, - params->key_len, params->key); - - if (params->seq) - NLA_PUT(cookie->msg, NL80211_KEY_SEQ, - params->seq_len, params->seq); - - if (params->cipher) - NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, - params->cipher); + if ((params->key && + nla_put(cookie->msg, NL80211_KEY_DATA, + params->key_len, params->key)) || + (params->seq && + nla_put(cookie->msg, NL80211_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && + nla_put_u32(cookie->msg, NL80211_KEY_CIPHER, + params->cipher))) + goto nla_put_failure; - NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); + if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx)) + goto nla_put_failure; nla_nest_end(cookie->msg, key); @@ -1880,10 +1934,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) cookie.msg = msg; cookie.idx = key_idx; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); - if (mac_addr) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx)) + goto nla_put_failure; + if (mac_addr && + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; if (pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) @@ -2373,15 +2429,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(info); - if (bitrate > 0) - NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); - - if (info->flags & RATE_INFO_FLAGS_MCS) - NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); - if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); - if (info->flags & RATE_INFO_FLAGS_SHORT_GI) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); + if ((bitrate > 0 && + nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) || + ((info->flags & RATE_INFO_FLAGS_MCS) && + nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) || + ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) && + nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) || + ((info->flags & RATE_INFO_FLAGS_SHORT_GI) && + nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))) + goto nla_put_failure; nla_nest_end(msg, rate); return true; @@ -2403,43 +2459,50 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation)) + goto nla_put_failure; sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; - if (sinfo->filled & STATION_INFO_CONNECTED_TIME) - NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, - sinfo->connected_time); - if (sinfo->filled & STATION_INFO_INACTIVE_TIME) - NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, - sinfo->inactive_time); - if (sinfo->filled & STATION_INFO_RX_BYTES) - NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, - sinfo->rx_bytes); - if (sinfo->filled & STATION_INFO_TX_BYTES) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, - sinfo->tx_bytes); - if (sinfo->filled & STATION_INFO_LLID) - NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, - sinfo->llid); - if (sinfo->filled & STATION_INFO_PLID) - NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, - sinfo->plid); - if (sinfo->filled & STATION_INFO_PLINK_STATE) - NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, - sinfo->plink_state); + if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) && + nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME, + sinfo->connected_time)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) && + nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME, + sinfo->inactive_time)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_RX_BYTES) && + nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, + sinfo->rx_bytes)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_BYTES) && + nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, + sinfo->tx_bytes)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_LLID) && + nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_PLID) && + nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_PLINK_STATE) && + nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE, + sinfo->plink_state)) + goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - if (sinfo->filled & STATION_INFO_SIGNAL) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, - sinfo->signal); - if (sinfo->filled & STATION_INFO_SIGNAL_AVG) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, - sinfo->signal_avg); + if ((sinfo->filled & STATION_INFO_SIGNAL) && + nla_put_u8(msg, NL80211_STA_INFO_SIGNAL, + sinfo->signal)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) && + nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG, + sinfo->signal_avg)) + goto nla_put_failure; break; default: break; @@ -2454,49 +2517,60 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; } - if (sinfo->filled & STATION_INFO_RX_PACKETS) - NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, - sinfo->rx_packets); - if (sinfo->filled & STATION_INFO_TX_PACKETS) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, - sinfo->tx_packets); - if (sinfo->filled & STATION_INFO_TX_RETRIES) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES, - sinfo->tx_retries); - if (sinfo->filled & STATION_INFO_TX_FAILED) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, - sinfo->tx_failed); - if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) - NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS, - sinfo->beacon_loss_count); + if ((sinfo->filled & STATION_INFO_RX_PACKETS) && + nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS, + sinfo->rx_packets)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_PACKETS) && + nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS, + sinfo->tx_packets)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_RETRIES) && + nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES, + sinfo->tx_retries)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_FAILED) && + nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED, + sinfo->tx_failed)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) && + nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS, + sinfo->beacon_loss_count)) + goto nla_put_failure; if (sinfo->filled & STATION_INFO_BSS_PARAM) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) - NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) - NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) - NLA_PUT_FLAG(msg, - NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); - NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, - sinfo->bss_param.dtim_period); - NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, - sinfo->bss_param.beacon_interval); + if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || + ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || + ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || + nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, + sinfo->bss_param.dtim_period) || + nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + sinfo->bss_param.beacon_interval)) + goto nla_put_failure; nla_nest_end(msg, bss_param); } - if (sinfo->filled & STATION_INFO_STA_FLAGS) - NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS, - sizeof(struct nl80211_sta_flag_update), - &sinfo->sta_flags); + if ((sinfo->filled & STATION_INFO_STA_FLAGS) && + nla_put(msg, NL80211_STA_INFO_STA_FLAGS, + sizeof(struct nl80211_sta_flag_update), + &sinfo->sta_flags)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_T_OFFSET) && + nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET, + sinfo->t_offset)) + goto nla_put_failure; nla_nest_end(msg, sinfoattr); - if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) - NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, - sinfo->assoc_req_ies); + if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) && + nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, + sinfo->assoc_req_ies)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -2918,36 +2992,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); - NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) || + nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; - if (pinfo->filled & MPATH_INFO_FRAME_QLEN) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, - pinfo->frame_qlen); - if (pinfo->filled & MPATH_INFO_SN) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, - pinfo->sn); - if (pinfo->filled & MPATH_INFO_METRIC) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, - pinfo->metric); - if (pinfo->filled & MPATH_INFO_EXPTIME) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, - pinfo->exptime); - if (pinfo->filled & MPATH_INFO_FLAGS) - NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, - pinfo->flags); - if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, - pinfo->discovery_timeout); - if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) - NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, - pinfo->discovery_retries); + if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) && + nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN, + pinfo->frame_qlen)) + goto nla_put_failure; + if (((pinfo->filled & MPATH_INFO_SN) && + nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) || + ((pinfo->filled & MPATH_INFO_METRIC) && + nla_put_u32(msg, NL80211_MPATH_INFO_METRIC, + pinfo->metric)) || + ((pinfo->filled & MPATH_INFO_EXPTIME) && + nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME, + pinfo->exptime)) || + ((pinfo->filled & MPATH_INFO_FLAGS) && + nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS, + pinfo->flags)) || + ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) && + nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, + pinfo->discovery_timeout)) || + ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) && + nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, + pinfo->discovery_retries))) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -3273,47 +3348,52 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, - cur_params.dot11MeshRetryTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, - cur_params.dot11MeshConfirmTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, - cur_params.dot11MeshHoldingTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, - cur_params.dot11MeshMaxPeerLinks); - NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, - cur_params.dot11MeshMaxRetries); - NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, - cur_params.dot11MeshTTL); - NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL, - cur_params.element_ttl); - NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, - cur_params.auto_open_plinks); - NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, - cur_params.dot11MeshHWMPmaxPREQretries); - NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, - cur_params.path_refresh_time); - NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, - cur_params.min_discovery_timeout); - NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, - cur_params.dot11MeshHWMPactivePathTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, - cur_params.dot11MeshHWMPpreqMinInterval); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - cur_params.dot11MeshHWMPperrMinInterval); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, - cur_params.dot11MeshHWMPnetDiameterTraversalTime); - NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, - cur_params.dot11MeshHWMPRootMode); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, - cur_params.dot11MeshHWMPRannInterval); - NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - cur_params.dot11MeshGateAnnouncementProtocol); - NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING, - cur_params.dot11MeshForwarding); - NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, - cur_params.rssi_threshold); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, + cur_params.dot11MeshRetryTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, + cur_params.dot11MeshConfirmTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, + cur_params.dot11MeshHoldingTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, + cur_params.dot11MeshMaxPeerLinks) || + nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES, + cur_params.dot11MeshMaxRetries) || + nla_put_u8(msg, NL80211_MESHCONF_TTL, + cur_params.dot11MeshTTL) || + nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL, + cur_params.element_ttl) || + nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, + cur_params.auto_open_plinks) || + nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, + cur_params.dot11MeshNbrOffsetMaxNeighbor) || + nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + cur_params.dot11MeshHWMPmaxPREQretries) || + nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, + cur_params.path_refresh_time) || + nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + cur_params.min_discovery_timeout) || + nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + cur_params.dot11MeshHWMPactivePathTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + cur_params.dot11MeshHWMPpreqMinInterval) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, + cur_params.dot11MeshHWMPperrMinInterval) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + cur_params.dot11MeshHWMPnetDiameterTraversalTime) || + nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, + cur_params.dot11MeshHWMPRootMode) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, + cur_params.dot11MeshHWMPRannInterval) || + nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, + cur_params.dot11MeshGateAnnouncementProtocol) || + nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, + cur_params.dot11MeshForwarding) || + nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, + cur_params.rssi_threshold) || + nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, + cur_params.ht_opmode)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -3334,6 +3414,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, + [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, @@ -3347,10 +3428,12 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 }, [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32}, + [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16}, }; static const struct nla_policy nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { + [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, @@ -3403,6 +3486,9 @@ do {\ mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, + mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, + nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); @@ -3440,6 +3526,8 @@ do {\ mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, + mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16); if (mask_out) *mask_out = mask; @@ -3460,6 +3548,12 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, nl80211_mesh_setup_params_policy)) return -EINVAL; + if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) + setup->sync_method = + (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ? + IEEE80211_SYNC_METHOD_VENDOR : + IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET; + if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) setup->path_sel_proto = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? @@ -3544,11 +3638,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) if (!hdr) goto put_failure; - NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, - cfg80211_regdomain->alpha2); - if (cfg80211_regdomain->dfs_region) - NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, - cfg80211_regdomain->dfs_region); + if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, + cfg80211_regdomain->alpha2) || + (cfg80211_regdomain->dfs_region && + nla_put_u8(msg, NL80211_ATTR_DFS_REGION, + cfg80211_regdomain->dfs_region))) + goto nla_put_failure; nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) @@ -3568,18 +3663,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) if (!nl_reg_rule) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, - reg_rule->flags); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, - freq_range->start_freq_khz); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, - freq_range->end_freq_khz); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, - freq_range->max_bandwidth_khz); - NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, - power_rule->max_antenna_gain); - NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, - power_rule->max_eirp); + if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, + reg_rule->flags) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START, + freq_range->start_freq_khz) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END, + freq_range->end_freq_khz) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, + freq_range->max_bandwidth_khz) || + nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + power_rule->max_antenna_gain) || + nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, + power_rule->max_eirp)) + goto nla_put_failure; nla_nest_end(msg, nl_reg_rule); } @@ -4150,37 +4246,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, genl_dump_check_consistent(cb, hdr, &nl80211_fam); - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) + goto nla_put_failure; bss = nla_nest_start(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; - if (!is_zero_ether_addr(res->bssid)) - NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); - if (res->information_elements && res->len_information_elements) - NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, - res->len_information_elements, - res->information_elements); - if (res->beacon_ies && res->len_beacon_ies && - res->beacon_ies != res->information_elements) - NLA_PUT(msg, NL80211_BSS_BEACON_IES, - res->len_beacon_ies, res->beacon_ies); - if (res->tsf) - NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); - if (res->beacon_interval) - NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); - NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); - NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); - NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, - jiffies_to_msecs(jiffies - intbss->ts)); + if ((!is_zero_ether_addr(res->bssid) && + nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) || + (res->information_elements && res->len_information_elements && + nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, + res->len_information_elements, + res->information_elements)) || + (res->beacon_ies && res->len_beacon_ies && + res->beacon_ies != res->information_elements && + nla_put(msg, NL80211_BSS_BEACON_IES, + res->len_beacon_ies, res->beacon_ies))) + goto nla_put_failure; + if (res->tsf && + nla_put_u64(msg, NL80211_BSS_TSF, res->tsf)) + goto nla_put_failure; + if (res->beacon_interval && + nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) + goto nla_put_failure; + if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || + nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || + nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, + jiffies_to_msecs(jiffies - intbss->ts))) + goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); + if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) + goto nla_put_failure; break; case CFG80211_SIGNAL_TYPE_UNSPEC: - NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); + if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal)) + goto nla_put_failure; break; default: break; @@ -4189,14 +4292,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, switch (wdev->iftype) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - if (intbss == wdev->current_bss) - NLA_PUT_U32(msg, NL80211_BSS_STATUS, - NL80211_BSS_STATUS_ASSOCIATED); + if (intbss == wdev->current_bss && + nla_put_u32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_ASSOCIATED)) + goto nla_put_failure; break; case NL80211_IFTYPE_ADHOC: - if (intbss == wdev->current_bss) - NLA_PUT_U32(msg, NL80211_BSS_STATUS, - NL80211_BSS_STATUS_IBSS_JOINED); + if (intbss == wdev->current_bss && + nla_put_u32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_IBSS_JOINED)) + goto nla_put_failure; break; default: break; @@ -4265,34 +4370,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -ENOMEM; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); if (!infoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, - survey->channel->center_freq); - if (survey->filled & SURVEY_INFO_NOISE_DBM) - NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, - survey->noise); - if (survey->filled & SURVEY_INFO_IN_USE) - NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, - survey->channel_time); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, - survey->channel_time_busy); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, - survey->channel_time_ext_busy); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, - survey->channel_time_rx); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, - survey->channel_time_tx); + if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY, + survey->channel->center_freq)) + goto nla_put_failure; + + if ((survey->filled & SURVEY_INFO_NOISE_DBM) && + nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_IN_USE) && + nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, + survey->channel_time)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, + survey->channel_time_busy)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, + survey->channel_time_ext_busy)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, + survey->channel_time_rx)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, + survey->channel_time_tx)) + goto nla_put_failure; nla_nest_end(msg, infoattr); @@ -4973,7 +5087,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, NL80211_CMD_TESTMODE); struct nlattr *tmdata; - if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) { + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { genlmsg_cancel(skb, hdr); break; } @@ -5024,7 +5138,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, return NULL; } - NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) + goto nla_put_failure; data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); ((void **)skb->cb)[0] = rdev; @@ -5403,7 +5518,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, if (err) goto free_msg; - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -5545,6 +5661,9 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, sband, nla_data(tb[NL80211_TXRATE_LEGACY]), nla_len(tb[NL80211_TXRATE_LEGACY])); + if ((mask.control[band].legacy == 0) && + nla_len(tb[NL80211_TXRATE_LEGACY])) + return -EINVAL; } if (tb[NL80211_TXRATE_MCS]) { if (!ht_rateset_to_mask( @@ -5690,7 +5809,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) goto free_msg; if (msg) { - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -5795,7 +5915,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) else ps_state = NL80211_PS_DISABLED; - NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); + if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state)) + goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -5942,20 +6063,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!nl_wowlan) goto nla_put_failure; - if (rdev->wowlan->any) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); - if (rdev->wowlan->disconnect) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); - if (rdev->wowlan->magic_pkt) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); - if (rdev->wowlan->gtk_rekey_failure) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); - if (rdev->wowlan->eap_identity_req) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); - if (rdev->wowlan->four_way_handshake) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); - if (rdev->wowlan->rfkill_release) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); + if ((rdev->wowlan->any && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || + (rdev->wowlan->disconnect && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || + (rdev->wowlan->magic_pkt && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || + (rdev->wowlan->gtk_rekey_failure && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || + (rdev->wowlan->eap_identity_req && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || + (rdev->wowlan->four_way_handshake && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || + (rdev->wowlan->rfkill_release && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) + goto nla_put_failure; if (rdev->wowlan->n_patterns) { struct nlattr *nl_pats, *nl_pat; int i, pat_len; @@ -5970,12 +6092,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!nl_pat) goto nla_put_failure; pat_len = rdev->wowlan->patterns[i].pattern_len; - NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK, - DIV_ROUND_UP(pat_len, 8), - rdev->wowlan->patterns[i].mask); - NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, - rdev->wowlan->patterns[i].pattern); + if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, + DIV_ROUND_UP(pat_len, 8), + rdev->wowlan->patterns[i].mask) || + nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, + pat_len, + rdev->wowlan->patterns[i].pattern)) + goto nla_put_failure; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); @@ -6000,6 +6123,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct cfg80211_wowlan new_triggers = {}; struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; int err, i; + bool prev_enabled = rdev->wowlan; if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) return -EOPNOTSUPP; @@ -6132,6 +6256,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) rdev->wowlan = NULL; } + if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) + rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan); + return 0; error: for (i = 0; i < new_triggers.n_patterns; i++) @@ -6248,7 +6375,8 @@ static int nl80211_probe_client(struct sk_buff *skb, if (err) goto free_msg; - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -6916,19 +7044,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg, nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; - for (i = 0; i < req->n_ssids; i++) - NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); + for (i = 0; i < req->n_ssids; i++) { + if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) + goto nla_put_failure; + } nla_nest_end(msg, nest); nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; - for (i = 0; i < req->n_channels; i++) - NLA_PUT_U32(msg, i, req->channels[i]->center_freq); + for (i = 0; i < req->n_channels; i++) { + if (nla_put_u32(msg, i, req->channels[i]->center_freq)) + goto nla_put_failure; + } nla_nest_end(msg, nest); - if (req->ie) - NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); + if (req->ie && + nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) + goto nla_put_failure; return 0; nla_put_failure: @@ -6947,8 +7080,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); @@ -6972,8 +7106,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -7096,26 +7231,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) } /* Userspace can always count this one always being set */ - NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); - - if (request->alpha2[0] == '0' && request->alpha2[1] == '0') - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_WORLD); - else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_CUSTOM_WORLD); - else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || - request->intersect) - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_INTERSECTION); - else { - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_COUNTRY); - NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); - } - - if (wiphy_idx_valid(request->wiphy_idx)) - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); + if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator)) + goto nla_put_failure; + + if (request->alpha2[0] == '0' && request->alpha2[1] == '0') { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_WORLD)) + goto nla_put_failure; + } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_CUSTOM_WORLD)) + goto nla_put_failure; + } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || + request->intersect) { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_INTERSECTION)) + goto nla_put_failure; + } else { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_COUNTRY) || + nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, + request->alpha2)) + goto nla_put_failure; + } + + if (wiphy_idx_valid(request->wiphy_idx) && + nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7149,9 +7291,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7229,10 +7372,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7280,15 +7424,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (bssid) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); - if (req_ie) - NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); - if (resp_ie) - NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || + nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || + (req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || + (resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7320,13 +7464,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - if (req_ie) - NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); - if (resp_ie) - NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || + (req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || + (resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7357,14 +7502,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (from_ap && reason) - NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); - if (from_ap) - NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); - if (ie) - NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (from_ap && reason && + nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || + (from_ap && + nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || + (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7395,9 +7540,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7428,11 +7574,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr); - if (ie_len && ie) - NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) || + (ie_len && ie && + nla_put(msg, NL80211_ATTR_IE, ie_len , ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7463,15 +7610,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (addr) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); - if (key_id != -1) - NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); - if (tsc) - NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) || + nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) || + (key_id != -1 && + nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) || + (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7506,7 +7652,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) + goto nla_put_failure; /* Before */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); @@ -7558,14 +7705,16 @@ static void nl80211_send_remain_on_chan_event( return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; - if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) - NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); + if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL && + nla_put_u32(msg, NL80211_ATTR_DURATION, duration)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7636,8 +7785,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7673,9 +7823,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, return true; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) + goto nla_put_failure; err = genlmsg_end(msg, hdr); if (err < 0) { @@ -7724,12 +7875,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, return -ENOMEM; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); - if (sig_dbm) - NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || + (sig_dbm && + nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7759,12 +7911,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - if (ack) - NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7796,15 +7948,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, - rssi_event); + if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, + rssi_event)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -7837,16 +7991,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) + goto nla_put_failure; rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); if (!rekey_attr) goto nla_put_failure; - NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, - NL80211_REPLAY_CTR_LEN, replay_ctr); + if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR, + NL80211_REPLAY_CTR_LEN, replay_ctr)) + goto nla_put_failure; nla_nest_end(msg, rekey_attr); @@ -7879,17 +8035,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); if (!attr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index); - NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid); - if (preauth) - NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH); + if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) || + nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) || + (preauth && + nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH))) + goto nla_put_failure; nla_nest_end(msg, attr); @@ -7904,6 +8062,39 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } +void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, + struct net_device *netdev, int freq, + enum nl80211_channel_type type, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY); + if (!hdr) { + nlmsg_free(msg); + return; + } + + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + void nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *peer, @@ -7923,15 +8114,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets); + if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -7965,12 +8158,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - if (acked) - NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) + goto nla_put_failure; err = genlmsg_end(msg, hdr); if (err < 0) { @@ -8010,12 +8203,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - if (freq) - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); - if (sig_dbm) - NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + (freq && + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || + (sig_dbm && + nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || + nla_put(msg, NL80211_ATTR_FRAME, len, frame)) + goto nla_put_failure; genlmsg_end(msg, hdr); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 4ffe50df9f3..01a1122c3b3 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -118,6 +118,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp); +void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, + struct net_device *dev, int freq, + enum nl80211_channel_type type, gfp_t gfp); + bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp); bool nl80211_unexpected_4addr_frame(struct net_device *dev, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e9a0ac83b84..15f347477a9 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -388,7 +388,15 @@ static void reg_regdb_query(const char *alpha2) schedule_work(®_regdb_work); } + +/* Feel free to add any other sanity checks here */ +static void reg_regdb_size_check(void) +{ + /* We should ideally BUILD_BUG_ON() but then random builds would fail */ + WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it..."); +} #else +static inline void reg_regdb_size_check(void) {} static inline void reg_regdb_query(const char *alpha2) {} #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ @@ -2322,6 +2330,8 @@ int __init regulatory_init(void) spin_lock_init(®_requests_lock); spin_lock_init(®_pending_beacons_lock); + reg_regdb_size_check(); + cfg80211_regdomain = cfg80211_world_regdom; user_alpha2[0] = '9'; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 70faadf16a3..af2b1caa37f 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -18,7 +18,7 @@ #include "nl80211.h" #include "wext-compat.h" -#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) +#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) { @@ -281,7 +281,7 @@ static bool is_bss(struct cfg80211_bss *a, { const u8 *ssidie; - if (bssid && compare_ether_addr(a->bssid, bssid)) + if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; if (!ssid) @@ -378,7 +378,11 @@ static int cmp_bss_core(struct cfg80211_bss *a, b->len_information_elements); } - return memcmp(a->bssid, b->bssid, ETH_ALEN); + /* + * we can't use compare_ether_addr here since we need a < > operator. + * The binary return value of compare_ether_addr isn't enough + */ + return memcmp(a->bssid, b->bssid, sizeof(a->bssid)); } static int cmp_bss(struct cfg80211_bss *a, diff --git a/net/wireless/util.c b/net/wireless/util.c index 957f2562161..1cd255892a4 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -370,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, iftype != NL80211_IFTYPE_P2P_CLIENT && iftype != NL80211_IFTYPE_MESH_POINT) || (is_multicast_ether_addr(dst) && - !compare_ether_addr(src, addr))) + ether_addr_equal(src, addr))) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { struct ieee80211s_hdr *meshdr = @@ -398,9 +398,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; - if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || - compare_ether_addr(payload, bridge_tunnel_header) == 0)) { + ether_addr_equal(payload, bridge_tunnel_header))) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + 6); @@ -609,10 +609,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, payload = frame->data; ethertype = (payload[6] << 8) | payload[7]; - if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || - compare_ether_addr(payload, - bridge_tunnel_header) == 0)) { + ether_addr_equal(payload, bridge_tunnel_header))) { /* remove RFC1042 or Bridge-Tunnel * encapsulation and replace EtherType */ skb_pull(frame, 6); @@ -946,13 +945,6 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, if (rdev->wiphy.software_iftypes & BIT(iftype)) return 0; - /* - * Drivers will gradually all set this flag, until all - * have it we only enforce for those that set it. - */ - if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS)) - return 0; - memset(num, 0, sizeof(num)); num[iftype] = 1; @@ -972,6 +964,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, } mutex_unlock(&rdev->devlist_mtx); + if (total == 1) + return 0; + for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct ieee80211_iface_limit *limits; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3c24eb97e9d..6a6181a673c 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -821,6 +821,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_channel *chan; + enum nl80211_channel_type channel_type; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -831,7 +832,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, if (!rdev->ops->get_channel) return -EINVAL; - chan = rdev->ops->get_channel(wdev->wiphy); + chan = rdev->ops->get_channel(wdev->wiphy, &channel_type); if (!chan) return -EINVAL; freq->m = chan->center_freq; diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index af648e08e61..b0eb7aa49b6 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -256,7 +256,7 @@ static const struct iw_ioctl_description standard_ioctl[] = { .max_tokens = sizeof(struct iw_pmksa), }, }; -static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); +static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl); /* * Meta-data about all the additional standard Wireless Extension events @@ -306,7 +306,7 @@ static const struct iw_ioctl_description standard_event[] = { .max_tokens = sizeof(struct iw_pmkid_cand), }, }; -static const unsigned standard_event_num = ARRAY_SIZE(standard_event); +static const unsigned int standard_event_num = ARRAY_SIZE(standard_event); /* Size (in bytes) of various events */ static const int event_type_size[] = { @@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); + if (nla_put_string(skb, IFLA_IFNAME, dev->name)) + goto nla_put_failure; return nlh; nla_put_failure: @@ -428,7 +429,7 @@ void wireless_send_event(struct net_device * dev, int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ - unsigned cmd_index; /* *MUST* be unsigned */ + unsigned int cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 7c01c2f3b6c..7decbd357d5 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -276,7 +276,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && - compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) + ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; err = __cfg80211_disconnect(rdev, dev, diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c index 5d643a548fe..33bef22e44e 100644 --- a/net/wireless/wext-spy.c +++ b/net/wireless/wext-spy.c @@ -203,7 +203,7 @@ void wireless_spy_update(struct net_device * dev, /* Update all records that match */ for (i = 0; i < spydata->spy_number; i++) - if (!compare_ether_addr(address, spydata->spy_address[i])) { + if (ether_addr_equal(address, spydata->spy_address[i])) { memcpy(&(spydata->spy_stat[i]), wstats, sizeof(struct iw_quality)); match = i; diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index d2efd29f434..43239527a20 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c @@ -73,18 +73,12 @@ static struct ctl_table x25_table[] = { { 0, }, }; -static struct ctl_path x25_path[] = { - { .procname = "net", }, - { .procname = "x25", }, - { } -}; - void __init x25_register_sysctl(void) { - x25_table_header = register_sysctl_paths(x25_path, x25_table); + x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); } void x25_unregister_sysctl(void) { - unregister_sysctl_table(x25_table_header); + unregister_net_sysctl_table(x25_table_header); } diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index f0ce862d1f4..a8a236338e6 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { - queued = !sk_add_backlog(sk, skb); + queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); } bh_unlock_sock(sk); sock_put(sk); diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 36384a1fa9f..66c638730c7 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -231,7 +231,7 @@ int x25_create_facilities(unsigned char *buffer, } if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) { - unsigned bytecount = (dte_facs->calling_len + 1) >> 1; + unsigned int bytecount = (dte_facs->calling_len + 1) >> 1; *p++ = X25_FAC_CALLING_AE; *p++ = 1 + bytecount; *p++ = dte_facs->calling_len; @@ -240,7 +240,7 @@ int x25_create_facilities(unsigned char *buffer, } if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) { - unsigned bytecount = (dte_facs->called_len % 2) ? + unsigned int bytecount = (dte_facs->called_len % 2) ? dte_facs->called_len / 2 + 1 : dte_facs->called_len / 2; *p++ = X25_FAC_CALLED_AE; diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 6d081674515..ce90b8d9236 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -3,12 +3,17 @@ # config XFRM bool - select CRYPTO depends on NET +config XFRM_ALGO + tristate + select XFRM + select CRYPTO + config XFRM_USER tristate "Transformation user configuration interface" - depends on INET && XFRM + depends on INET + select XFRM_ALGO ---help--- Support for Transformation(XFRM) user configuration interface like IPsec used by native Linux tools. @@ -48,13 +53,13 @@ config XFRM_STATISTICS config XFRM_IPCOMP tristate - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_DEFLATE config NET_KEY tristate "PF_KEY sockets" - select XFRM + select XFRM_ALGO ---help--- PF_KEYv2 socket family, compatible to KAME ones. They are required if you are going to use IPsec tools ported diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index aa429eefe91..c0e961983f1 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -3,8 +3,9 @@ # obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ - xfrm_input.o xfrm_output.o xfrm_algo.o \ + xfrm_input.o xfrm_output.o \ xfrm_sysctl.o xfrm_replay.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o +obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 791ab2e77f3..4ce2d93162c 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -15,9 +15,6 @@ #include <linux/crypto.h> #include <linux/scatterlist.h> #include <net/xfrm.h> -#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) -#include <net/ah.h> -#endif #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) #include <net/esp.h> #endif @@ -752,3 +749,5 @@ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) } EXPORT_SYMBOL_GPL(pskb_put); #endif + +MODULE_LICENSE("GPL"); diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 7199d78b2aa..716502ada53 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -45,10 +45,10 @@ static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr, return (h ^ (h >> 16)) & hmask; } -static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr, - const xfrm_address_t *saddr, - unsigned short family, - unsigned int hmask) +static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + unsigned short family, + unsigned int hmask) { unsigned int h = family; switch (family) { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7661576b6f4..3c87a1c4066 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -56,7 +56,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *xdst); static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir); -static inline int +static inline bool __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi4 *fl4 = &fl->u.ip4; @@ -69,7 +69,7 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); } -static inline int +static inline bool __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi6 *fl6 = &fl->u.ip6; @@ -82,8 +82,8 @@ __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); } -int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, - unsigned short family) +bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, + unsigned short family) { switch (family) { case AF_INET: @@ -91,7 +91,7 @@ int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, case AF_INET6: return __xfrm6_selector_match(sel, fl); } - return 0; + return false; } static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, @@ -877,7 +877,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol, u8 type, u16 family, int dir) { const struct xfrm_selector *sel = &pol->selector; - int match, ret = -ESRCH; + int ret = -ESRCH; + bool match; if (pol->family != family || (fl->flowi_mark & pol->mark.m) != pol->mark.v || @@ -1006,8 +1007,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, read_lock_bh(&xfrm_policy_lock); if ((pol = sk->sk_policy[dir]) != NULL) { - int match = xfrm_selector_match(&pol->selector, fl, - sk->sk_family); + bool match = xfrm_selector_match(&pol->selector, fl, + sk->sk_family); int err = 0; if (match) { @@ -2767,8 +2768,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); #endif #ifdef CONFIG_XFRM_MIGRATE -static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, - const struct xfrm_selector *sel_tgt) +static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, + const struct xfrm_selector *sel_tgt) { if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { if (sel_tgt->family == sel_cmp->family && @@ -2778,14 +2779,14 @@ static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, sel_cmp->family) == 0 && sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { - return 1; + return true; } } else { if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { - return 1; + return true; } } - return 0; + return false; } static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c index 05640bc9594..380976f74c4 100644 --- a/net/xfrm/xfrm_sysctl.c +++ b/net/xfrm/xfrm_sysctl.c @@ -54,7 +54,7 @@ int __net_init xfrm_sysctl_init(struct net *net) table[2].data = &net->xfrm.sysctl_larval_drop; table[3].data = &net->xfrm.sysctl_acq_expires; - net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table); + net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); if (!net->xfrm.sysctl_hdr) goto out_register; return 0; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7128dde0fe1..44293b3fd6a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x, { copy_to_user_state(x, p); - if (x->coaddr) - NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); + if (x->coaddr && + nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr)) + goto nla_put_failure; - if (x->lastused) - NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); + if (x->lastused && + nla_put_u64(skb, XFRMA_LASTUSED, x->lastused)) + goto nla_put_failure; - if (x->aead) - NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); - if (x->aalg) { - if (copy_to_user_auth(x->aalg, skb)) - goto nla_put_failure; + if (x->aead && + nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead)) + goto nla_put_failure; - NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, - xfrm_alg_auth_len(x->aalg), x->aalg); - } - if (x->ealg) - NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); - if (x->calg) - NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); + if (x->aalg && + (copy_to_user_auth(x->aalg, skb) || + nla_put(skb, XFRMA_ALG_AUTH_TRUNC, + xfrm_alg_auth_len(x->aalg), x->aalg))) + goto nla_put_failure; - if (x->encap) - NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); + if (x->ealg && + nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg)) + goto nla_put_failure; - if (x->tfcpad) - NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); + if (x->calg && + nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg)) + goto nla_put_failure; + + if (x->encap && + nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap)) + goto nla_put_failure; + + if (x->tfcpad && + nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad)) + goto nla_put_failure; if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; - if (x->replay_esn) - NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, - xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); + if (x->replay_esn && + nla_put(skb, XFRMA_REPLAY_ESN_VAL, + xfrm_replay_state_esn_len(x->replay_esn), + x->replay_esn)) + goto nla_put_failure; - if (x->security && copy_sec_ctx(x->security, skb) < 0) + if (x->security && copy_sec_ctx(x->security, skb)) goto nla_put_failure; return 0; @@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, sph.spdhcnt = si.spdhcnt; sph.spdhmcnt = si.spdhmcnt; - NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); - NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); + if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) || + nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net, sh.sadhmcnt = si.sadhmcnt; sh.sadhcnt = si.sadhcnt; - NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); - NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); + if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) || + nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct id->reqid = x->props.reqid; id->flags = c->data.aevent; - if (x->replay_esn) - NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, - xfrm_replay_state_esn_len(x->replay_esn), - x->replay_esn); - else - NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); - - NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); + if (x->replay_esn) { + if (nla_put(skb, XFRMA_REPLAY_ESN_VAL, + xfrm_replay_state_esn_len(x->replay_esn), + x->replay_esn)) + goto nla_put_failure; + } else { + if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), + &x->replay)) + goto nla_put_failure; + } + if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft)) + goto nla_put_failure; - if (id->flags & XFRM_AE_RTHR) - NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); + if ((id->flags & XFRM_AE_RTHR) && + nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff)) + goto nla_put_failure; - if (id->flags & XFRM_AE_ETHR) - NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, - x->replay_maxage * 10 / HZ); + if ((id->flags & XFRM_AE_ETHR) && + nla_put_u32(skb, XFRMA_ETIMER_THRESH, + x->replay_maxage * 10 / HZ)) + goto nla_put_failure; if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; @@ -2835,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto, ur->proto = proto; memcpy(&ur->sel, sel, sizeof(ur->sel)); - if (addr) - NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); + if (addr && + nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr)) + goto nla_put_failure; return nlmsg_end(skb, nlh); diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 0920ea3bf59..d309e7f472d 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -14,7 +14,6 @@ #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/if.h> -#include <linux/netfilter_ipv4/ip_queue.h> #include <linux/inet_diag.h> #include <linux/xfrm.h> #include <linux/audit.h> @@ -70,12 +69,6 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, }; -static struct nlmsg_perm nlmsg_firewall_perms[] = -{ - { IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, - { IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, -}; - static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, @@ -145,12 +138,6 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) sizeof(nlmsg_route_perms)); break; - case SECCLASS_NETLINK_FIREWALL_SOCKET: - case SECCLASS_NETLINK_IP6FW_SOCKET: - err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms, - sizeof(nlmsg_firewall_perms)); - break; - case SECCLASS_NETLINK_TCPDIAG_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms, sizeof(nlmsg_tcpdiag_perms)); diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index 7579f19e61e..81847dd08bd 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h @@ -203,6 +203,7 @@ void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); +bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); struct virtqueue *vring_new_virtqueue(unsigned int num, diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c index 6bf95f99536..e626fa553c5 100644 --- a/tools/virtio/virtio_test.c +++ b/tools/virtio/virtio_test.c @@ -144,7 +144,8 @@ static void wait_for_interrupt(struct vdev_info *dev) } } -static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs) +static void run_test(struct vdev_info *dev, struct vq_info *vq, + bool delayed, int bufs) { struct scatterlist sl; long started = 0, completed = 0; @@ -183,8 +184,12 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs) assert(started <= bufs); if (completed == bufs) break; - if (virtqueue_enable_cb(vq->vq)) { - wait_for_interrupt(dev); + if (delayed) { + if (virtqueue_enable_cb_delayed(vq->vq)) + wait_for_interrupt(dev); + } else { + if (virtqueue_enable_cb(vq->vq)) + wait_for_interrupt(dev); } } test = 0; @@ -216,6 +221,14 @@ const struct option longopts[] = { .val = 'i', }, { + .name = "delayed-interrupt", + .val = 'D', + }, + { + .name = "no-delayed-interrupt", + .val = 'd', + }, + { } }; @@ -224,6 +237,7 @@ static void help() fprintf(stderr, "Usage: virtio_test [--help]" " [--no-indirect]" " [--no-event-idx]" + " [--delayed-interrupt]" "\n"); } @@ -233,6 +247,7 @@ int main(int argc, char **argv) unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | (1ULL << VIRTIO_RING_F_EVENT_IDX); int o; + bool delayed = false; for (;;) { o = getopt_long(argc, argv, optstring, longopts, NULL); @@ -251,6 +266,9 @@ int main(int argc, char **argv) case 'i': features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC); break; + case 'D': + delayed = true; + break; default: assert(0); break; @@ -260,6 +278,6 @@ int main(int argc, char **argv) done: vdev_info_init(&dev, features); vq_info_add(&dev, 256); - run_test(&dev, &dev.vqs[0], 0x100000); + run_test(&dev, &dev.vqs[0], delayed, 0x100000); return 0; } |