diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-08-06 17:31:36 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-08-06 17:31:36 +0200 |
commit | d2ce98ca0a192e1bcb89068adfe6cb736f5e351c (patch) | |
tree | 888ccaba7b3e09bf4b81324dff38a65f5667296c | |
parent | 85eae57bbb0612387201635659be543aaac2109e (diff) | |
parent | d72e90f33aa4709ebecc5005562f52335e106a60 (diff) | |
download | linux-rpi3-d2ce98ca0a192e1bcb89068adfe6cb736f5e351c.tar.gz linux-rpi3-d2ce98ca0a192e1bcb89068adfe6cb736f5e351c.tar.bz2 linux-rpi3-d2ce98ca0a192e1bcb89068adfe6cb736f5e351c.zip |
Merge tag 'v4.18-rc6' into HEAD
Pull bug fixes into the KVM development tree to avoid nasty conflicts.
1068 files changed, 9515 insertions, 5585 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index efc7aa7a0670..533ff5c68970 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4846,3 +4846,8 @@ xirc2ps_cs= [NET,PCMCIA] Format: <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] + + xhci-hcd.quirks [USB,KNL] + A hex value specifying bitmask with supplemental xhci + host controller quirks. Meaning of each bit can be + consulted in header drivers/usb/host/xhci.h. diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 8b9164990956..8f1d3de449b5 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -324,8 +324,7 @@ Global Attributes ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to control its functionality at the system level. They are located in the -``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all -CPUs. +``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs. Some of them are not present if the ``intel_pstate=per_cpu_perf_limits`` argument is passed to the kernel in the command line. @@ -379,6 +378,17 @@ argument is passed to the kernel in the command line. but it affects the maximum possible value of per-policy P-state limits (see `Interpretation of Policy Attributes`_ below for details). +``hwp_dynamic_boost`` + This attribute is only present if ``intel_pstate`` works in the + `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in + the processor. If set (equal to 1), it causes the minimum P-state limit + to be increased dynamically for a short time whenever a task previously + waiting on I/O is selected to run on a given logical CPU (the purpose + of this mechanism is to improve performance). + + This setting has no effect on logical CPUs whose minimum P-state limit + is directly set to the highest non-turbo P-state or above it. + .. _status_attr: ``status`` diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt index 4424fa2c67d7..01532b3008ae 100644 --- a/Documentation/device-mapper/writecache.txt +++ b/Documentation/device-mapper/writecache.txt @@ -15,6 +15,8 @@ Constructor parameters: size) 5. the number of optional parameters (the parameters with an argument count as two) + start_sector n (default: 0) + offset from the start of cache device in 512-byte sectors high_watermark n (default: 50) start writeback when the number of used blocks reach this watermark diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt index bdadc3da9556..6970f30a3770 100644 --- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt +++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt @@ -66,7 +66,7 @@ Required root node properties: - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale Octa board. - "insignal,origen" - for Exynos4210-based Insignal Origen board. - - "insignal,origen4412 - for Exynos4412-based Insignal Origen board. + - "insignal,origen4412" - for Exynos4412-based Insignal Origen board. Optional nodes: diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt index 6fddb4f4f71a..3055d5c2c04e 100644 --- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt +++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt @@ -36,7 +36,7 @@ Optional nodes: - port/ports: to describe a connection to an external encoder. The binding follows Documentation/devicetree/bindings/graph.txt and - suppors a single port with a single endpoint. + supports a single port with a single endpoint. - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting diff --git a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt index 20fc72d9e61e..45a61b462287 100644 --- a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt +++ b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt @@ -1,7 +1,7 @@ Nintendo Wii (Hollywood) GPIO controller Required properties: -- compatible: "nintendo,hollywood-gpio +- compatible: "nintendo,hollywood-gpio" - reg: Physical base address and length of the controller's registers. - gpio-controller: Marks the device node as a GPIO controller. - #gpio-cells: Should be <2>. The first cell is the pin number and the diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt new file mode 100644 index 000000000000..f2ec0d4f2dff --- /dev/null +++ b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt @@ -0,0 +1,23 @@ +Spreadtrum SC27xx PMIC Vibrator + +Required properties: +- compatible: should be "sprd,sc2731-vibrator". +- reg: address of vibrator control register. + +Example : + + sc2731_pmic: pmic@0 { + compatible = "sprd,sc2731"; + reg = <0>; + spi-max-frequency = <26000000>; + interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + vibrator@eb4 { + compatible = "sprd,sc2731-vibrator"; + reg = <0xeb4>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt index 121d9b7c79a2..1063c30d53f7 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt @@ -32,7 +32,7 @@ i2c@00000000 { reg = <0x6c>; interrupt-parent = <&gpx1>; interrupts = <2 IRQ_TYPE_LEVEL_LOW>; - vdd-supply = <&ldo15_reg>"; + vdd-supply = <&ldo15_reg>; vid-supply = <&ldo18_reg>; reset-gpios = <&gpx1 5 0>; touchscreen-size-x = <1080>; diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt index 1099fe0788fa..f246ccbf8838 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt @@ -15,7 +15,7 @@ Required properties: include "nvidia,tegra30-ictlr". - reg : Specifies base physical address and size of the registers. Each controller must be described separately (Tegra20 has 4 of them, - whereas Tegra30 and later have 5" + whereas Tegra30 and later have 5). - interrupt-controller : Identifies the node as an interrupt controller. - #interrupt-cells : Specifies the number of cells needed to encode an interrupt source. The value must be 3. diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt index 136bd612bd83..6a36bf66d932 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt @@ -12,7 +12,7 @@ Required properties: specifier, shall be 2 - interrupts: interrupts references to primary interrupt controller (only needed for exti controller with multiple exti under - same parent interrupt: st,stm32-exti and st,stm32h7-exti") + same parent interrupt: st,stm32-exti and st,stm32h7-exti) Example: diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt index 356c29789cf5..3a66d3c483e1 100644 --- a/Documentation/devicetree/bindings/mips/brcm/soc.txt +++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt @@ -152,7 +152,7 @@ Required properties: - compatible : should contain one of: "brcm,bcm7425-timers" "brcm,bcm7429-timers" - "brcm,bcm7435-timers and + "brcm,bcm7435-timers" and "brcm,brcmstb-timers" - reg : the timers register range - interrupts : the interrupt line for this timer block diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index df873d1f3b7c..f8c33890bc29 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -238,7 +238,7 @@ PROPERTIES Must include one of the following: - "fsl,fman-dtsec" for dTSEC MAC - "fsl,fman-xgec" for XGEC MAC - - "fsl,fman-memac for mEMAC MAC + - "fsl,fman-memac" for mEMAC MAC - cell-index Usage: required diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 9b387f861aed..7dec508987c7 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a node with the label "power". In the second example the consumer device are partitioned across two PM domains, the first with index 0 and the second with index 1, of a power controller that -is represented by a node with the label "power. +is represented by a node with the label "power". Optional properties: - required-opps: This contains phandle to an OPP node in another device's OPP diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt index ca69f5e3040c..ae326f263597 100644 --- a/Documentation/devicetree/bindings/regulator/tps65090.txt +++ b/Documentation/devicetree/bindings/regulator/tps65090.txt @@ -16,7 +16,7 @@ Required properties: Optional properties: - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3. If DCDCs are externally controlled then this property should be there. -- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3. +- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3. If DCDCs are externally controlled and if it is from GPIO then GPIO number should be provided. If it is externally controlled and no GPIO entry then driver will just configure this rails as external control diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt index a21658f18fe6..3661e6153a92 100644 --- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt +++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt @@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset controller binding usage. Required properties: -- compatible: Should be st,stih407-softreset"; +- compatible: Should be "st,stih407-softreset"; - #reset-cells: 1, see below example: diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt index d330c73de9a2..68b7d6207e3d 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt @@ -39,7 +39,7 @@ Required properties: Optional property: - clock-frequency: Desired I2C bus clock frequency in Hz. - When missing default to 400000Hz. + When missing default to 100000Hz. Child nodes should conform to I2C bus binding as described in i2c.txt. diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt index 6a4aadc4ce06..84b28dbe9f15 100644 --- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt +++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt @@ -30,7 +30,7 @@ Required properties: Board connectors: * Headset Mic - * Secondary Mic", + * Secondary Mic * DMIC * Ext Spk diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt index aa54e49fc8a2..c7600a93ab39 100644 --- a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt +++ b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt @@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio. "Digital Mic3" Audio pins and MicBias on WCD9335 Codec: - "MIC_BIAS1 + "MIC_BIAS1" "MIC_BIAS2" "MIC_BIAS3" "MIC_BIAS4" diff --git a/Documentation/devicetree/bindings/w1/w1-gpio.txt b/Documentation/devicetree/bindings/w1/w1-gpio.txt index 6e09c35d9f1a..37091902a021 100644 --- a/Documentation/devicetree/bindings/w1/w1-gpio.txt +++ b/Documentation/devicetree/bindings/w1/w1-gpio.txt @@ -15,7 +15,7 @@ Optional properties: Examples: - onewire@0 { + onewire { compatible = "w1-gpio"; gpios = <&gpio 126 0>, <&gpio 105 0>; }; diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 2c391338c675..37bf0a9de75c 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -441,8 +441,6 @@ prototypes: int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -473,7 +471,7 @@ prototypes: }; locking rules: - All except for ->poll_mask may block. + All may block. ->llseek() locking has moved from llseek to the individual llseek implementations. If your fs is not using generic_file_llseek, you @@ -505,9 +503,6 @@ in sys_read() and friends. the lease within the individual filesystem to record the result of the operation -->poll_mask can be called with or without the waitqueue lock for the waitqueue -returned from ->get_poll_head. - --------------------------- dquot_operations ------------------------------- prototypes: int (*write_dquot) (struct dquot *); diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 829a7b7857a4..f608180ad59d 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -857,8 +857,6 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -903,17 +901,6 @@ otherwise noted. activity on this file and (optionally) go to sleep until there is activity. Called by the select(2) and poll(2) system calls - get_poll_head: Returns the struct wait_queue_head that callers can - wait on. Callers need to check the returned events using ->poll_mask - once woken. Can return NULL to indicate polling is not supported, - or any error code using the ERR_PTR convention to indicate that a - grave error occured and ->poll_mask shall not be called. - - poll_mask: return the mask of EPOLL* values describing the file descriptor - state. Called either before going to sleep on the waitqueue returned by - get_poll_head, or after it has been woken. If ->get_poll_head and - ->poll_mask are implemented ->poll does not need to be implement. - unlocked_ioctl: called by the ioctl(2) system call. compat_ioctl: called by the ioctl(2) system call when 32 bit system calls diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 6c9c69ec3986..114c7ce7b58d 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -50,6 +50,11 @@ LDFLAGS_MODULE -------------------------------------------------- Additional options used for $(LD) when linking modules. +KBUILD_KCONFIG +-------------------------------------------------- +Set the top-level Kconfig file to the value of this environment +variable. The default name is "Kconfig". + KBUILD_VERBOSE -------------------------------------------------- Set the kbuild verbosity. Can be assigned same values as "V=...". @@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the directory name found in the arch/ directory. But some architectures such as x86 and sparc have aliases. x86: i386 for 32 bit, x86_64 for 64 bit -sparc: sparc for 32 bit, sparc64 for 64 bit +sh: sh for 32 bit, sh64 for 64 bit +sparc: sparc32 for 32 bit, sparc64 for 64 bit CROSS_COMPILE -------------------------------------------------- @@ -148,15 +154,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then the default option --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used as the options to the strip command. -INSTALL_FW_PATH --------------------------------------------------- -INSTALL_FW_PATH specifies where to install the firmware blobs. -The default value is: - - $(INSTALL_MOD_PATH)/lib/firmware - -The value can be overridden in which case the default value is ignored. - INSTALL_HDR_PATH -------------------------------------------------- INSTALL_HDR_PATH specifies where to install user space headers when diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index 3534a84d206c..64e0775a62d4 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt @@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses to use it. It should be placed at the top of the configuration, before any other statement. +'#' Kconfig source file comment: + +An unquoted '#' character anywhere in a source file line indicates +the beginning of a source file comment. The remainder of that line +is a comment. + Kconfig hints ------------- diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt index 7233118f3a05..68c82914c0f3 100644 --- a/Documentation/kbuild/kconfig.txt +++ b/Documentation/kbuild/kconfig.txt @@ -2,9 +2,9 @@ This file contains some assistance for using "make *config". Use "make help" to list all of the possible configuration targets. -The xconfig ('qconf') and menuconfig ('mconf') programs also -have embedded help text. Be sure to check it for navigation, -search, and other general help text. +The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf') +programs also have embedded help text. Be sure to check that for +navigation, search, and other general help text. ====================================================================== General @@ -17,13 +17,16 @@ this happens, using a previously working .config file and running for you, so you may find that you need to see what NEW kernel symbols have been introduced. -To see a list of new config symbols when using "make oldconfig", use +To see a list of new config symbols, use cp user/some/old.config .config make listnewconfig and the config program will list any new symbols, one per line. +Alternatively, you can use the brute force method: + + make oldconfig scripts/diffconfig .config.old .config | less ______________________________________________________________________ @@ -160,7 +163,7 @@ Searching in menuconfig: This lists all config symbols that contain "hotplug", e.g., HOTPLUG_CPU, MEMORY_HOTPLUG. - For search help, enter / followed TAB-TAB-TAB (to highlight + For search help, enter / followed by TAB-TAB (to highlight <Help>) and Enter. This will tell you that you can also use regular expressions (regexes) in the search string, so if you are not interested in MEMORY_HOTPLUG, you could try @@ -203,6 +206,39 @@ Example: ====================================================================== +nconfig +-------------------------------------------------- + +nconfig is an alternate text-based configurator. It lists function +keys across the bottom of the terminal (window) that execute commands. +You can also just use the corresponding numeric key to execute the +commands unless you are in a data entry window. E.g., instead of F6 +for Save, you can just press 6. + +Use F1 for Global help or F3 for the Short help menu. + +Searching in nconfig: + + You can search either in the menu entry "prompt" strings + or in the configuration symbols. + + Use / to begin a search through the menu entries. This does + not support regular expressions. Use <Down> or <Up> for + Next hit and Previous hit, respectively. Use <Esc> to + terminate the search mode. + + F8 (SymSearch) searches the configuration symbols for the + given string or regular expression (regex). + +NCONFIG_MODE +-------------------------------------------------- +This mode shows all sub-menus in one large tree. + +Example: + make NCONFIG_MODE=single_menu nconfig + + +====================================================================== xconfig -------------------------------------------------- @@ -230,8 +266,7 @@ gconfig Searching in gconfig: - None (gconfig isn't maintained as well as xconfig or menuconfig); - however, gconfig does have a few more viewing choices than - xconfig does. + There is no search command in gconfig. However, gconfig does + have several different viewing choices, modes, and options. ### diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index c13214d073a4..d3e5dd26db12 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -1490,7 +1490,7 @@ To remove an ARP target: To configure the interval between learning packet transmits: # echo 12 > /sys/class/net/bond0/bonding/lp_interval - NOTE: the lp_inteval is the number of seconds between instances where + NOTE: the lp_interval is the number of seconds between instances where the bonding driver sends learning packets to each slaves peer switch. The default interval is 1 second. diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst index 9708f5fa76de..f81111eba9c5 100644 --- a/Documentation/networking/e100.rst +++ b/Documentation/networking/e100.rst @@ -47,41 +47,45 @@ Driver Configuration Parameters The default value for each parameter is generally the recommended setting, unless otherwise noted. -Rx Descriptors: Number of receive descriptors. A receive descriptor is a data +Rx Descriptors: + Number of receive descriptors. A receive descriptor is a data structure that describes a receive buffer and its attributes to the network controller. The data in the descriptor is used by the controller to write data from the controller to host memory. In the 3.x.x driver the valid range for this parameter is 64-256. The default value is 256. This parameter can be changed using the command:: - ethtool -G eth? rx n + ethtool -G eth? rx n Where n is the number of desired Rx descriptors. -Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data +Tx Descriptors: + Number of transmit descriptors. A transmit descriptor is a data structure that describes a transmit buffer and its attributes to the network controller. The data in the descriptor is used by the controller to read data from the host memory to the controller. In the 3.x.x driver the valid range for this parameter is 64-256. The default value is 128. This parameter can be changed using the command:: - ethtool -G eth? tx n + ethtool -G eth? tx n Where n is the number of desired Tx descriptors. -Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by +Speed/Duplex: + The driver auto-negotiates the link speed and duplex settings by default. The ethtool utility can be used as follows to force speed/duplex.:: - ethtool -s eth? autoneg off speed {10|100} duplex {full|half} + ethtool -s eth? autoneg off speed {10|100} duplex {full|half} NOTE: setting the speed/duplex to incorrect values will cause the link to fail. -Event Log Message Level: The driver uses the message level flag to log events +Event Log Message Level: + The driver uses the message level flag to log events to syslog. The message level can be set at driver load time. It can also be set using the command:: - ethtool -s eth? msglvl n + ethtool -s eth? msglvl n Additional Configurations @@ -92,7 +96,7 @@ Configuring the Driver on Different Distributions Configuring a network driver to load properly when the system is started is distribution dependent. Typically, the configuration process involves -adding an alias line to /etc/modprobe.d/*.conf as well as editing other +adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other system startup scripts and/or configuration files. Many popular Linux distributions ship with tools to make these changes for you. To learn the proper way to configure a network device for your system, refer to @@ -160,7 +164,10 @@ This results in unbalanced receive traffic. If you have multiple interfaces in a server, either turn on ARP filtering by -(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter +(1) entering:: + + echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter + (this only works if your kernel's version is higher than 2.4.5), or (2) installing the interfaces in separate broadcast domains (either diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst index 144b87eef153..f10dd4086921 100644 --- a/Documentation/networking/e1000.rst +++ b/Documentation/networking/e1000.rst @@ -34,7 +34,8 @@ Command Line Parameters The default value for each parameter is generally the recommended setting, unless otherwise noted. -NOTES: For more information about the AutoNeg, Duplex, and Speed +NOTES: + For more information about the AutoNeg, Duplex, and Speed parameters, see the "Speed and Duplex Configuration" section in this document. @@ -45,22 +46,27 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed AutoNeg ------- + (Supported only on adapters with copper connections) -Valid Range: 0x01-0x0F, 0x20-0x2F -Default Value: 0x2F + +:Valid Range: 0x01-0x0F, 0x20-0x2F +:Default Value: 0x2F This parameter is a bit-mask that specifies the speed and duplex settings advertised by the adapter. When this parameter is used, the Speed and Duplex parameters must not be specified. -NOTE: Refer to the Speed and Duplex section of this readme for more +NOTE: + Refer to the Speed and Duplex section of this readme for more information on the AutoNeg parameter. Duplex ------ + (Supported only on adapters with copper connections) -Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) -Default Value: 0 + +:Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) +:Default Value: 0 This defines the direction in which data is allowed to flow. Can be either one or two-directional. If both Duplex and the link partner are @@ -70,18 +76,22 @@ duplex. FlowControl ----------- -Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) -Default Value: Reads flow control settings from the EEPROM + +:Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) +:Default Value: Reads flow control settings from the EEPROM This parameter controls the automatic generation(Tx) and response(Rx) to Ethernet PAUSE frames. InterruptThrottleRate --------------------- + (not supported on Intel(R) 82542, 82543 or 82544-based adapters) -Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, - 4=simplified balancing) -Default Value: 3 + +:Valid Range: + 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, + 4=simplified balancing) +:Default Value: 3 The driver can limit the amount of interrupts per second that the adapter will generate for incoming packets. It does this by writing a value to the @@ -135,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation and may improve small packet latency, but is generally not suitable for bulk throughput traffic. -NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and +NOTE: + InterruptThrottleRate takes precedence over the TxAbsIntDelay and RxAbsIntDelay parameters. In other words, minimizing the receive and/or transmit absolute delays does not force the controller to generate more interrupts than what the Interrupt Throttle Rate allows. -CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection +CAUTION: + If you are using the Intel(R) PRO/1000 CT Network Connection (controller 82547), setting InterruptThrottleRate to a value greater than 75,000, may hang (stop transmitting) adapters under certain network conditions. If this occurs a NETDEV @@ -151,7 +163,8 @@ CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection hang, ensure that InterruptThrottleRate is set no greater than 75,000 and is not set to 0. -NOTE: When e1000 is loaded with default settings and multiple adapters +NOTE: + When e1000 is loaded with default settings and multiple adapters are in use simultaneously, the CPU utilization may increase non- linearly. In order to limit the CPU utilization without impacting the overall throughput, we recommend that you load the driver as @@ -168,9 +181,11 @@ NOTE: When e1000 is loaded with default settings and multiple adapters RxDescriptors ------------- -Valid Range: 48-256 for 82542 and 82543-based adapters - 48-4096 for all other supported adapters -Default Value: 256 + +:Valid Range: + - 48-256 for 82542 and 82543-based adapters + - 48-4096 for all other supported adapters +:Default Value: 256 This value specifies the number of receive buffer descriptors allocated by the driver. Increasing this value allows the driver to buffer more @@ -180,15 +195,17 @@ Each descriptor is 16 bytes. A receive buffer is also allocated for each descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending on the MTU setting. The maximum MTU size is 16110. -NOTE: MTU designates the frame size. It only needs to be set for Jumbo +NOTE: + MTU designates the frame size. It only needs to be set for Jumbo Frames. Depending on the available system resources, the request for a higher number of receive descriptors may be denied. In this case, use a lower number. RxIntDelay ---------- -Valid Range: 0-65535 (0=off) -Default Value: 0 + +:Valid Range: 0-65535 (0=off) +:Default Value: 0 This value delays the generation of receive interrupts in units of 1.024 microseconds. Receive interrupt reduction can improve CPU efficiency if @@ -198,7 +215,8 @@ of TCP traffic. If the system is reporting dropped receives, this value may be set too high, causing the driver to run out of available receive descriptors. -CAUTION: When setting RxIntDelay to a value other than 0, adapters may +CAUTION: + When setting RxIntDelay to a value other than 0, adapters may hang (stop transmitting) under certain network conditions. If this occurs a NETDEV WATCHDOG message is logged in the system event log. In addition, the controller is automatically reset, @@ -207,9 +225,11 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may RxAbsIntDelay ------------- + (This parameter is supported only on 82540, 82545 and later adapters.) -Valid Range: 0-65535 (0=off) -Default Value: 128 + +:Valid Range: 0-65535 (0=off) +:Default Value: 128 This value, in units of 1.024 microseconds, limits the delay in which a receive interrupt is generated. Useful only if RxIntDelay is non-zero, @@ -220,9 +240,11 @@ conditions. Speed ----- + (This parameter is supported only on adapters with copper connections.) -Valid Settings: 0, 10, 100, 1000 -Default Value: 0 (auto-negotiate at all supported speeds) + +:Valid Settings: 0, 10, 100, 1000 +:Default Value: 0 (auto-negotiate at all supported speeds) Speed forces the line speed to the specified value in megabits per second (Mbps). If this parameter is not specified or is set to 0 and the link @@ -231,22 +253,26 @@ speed. Duplex should also be set when Speed is set to either 10 or 100. TxDescriptors ------------- -Valid Range: 48-256 for 82542 and 82543-based adapters - 48-4096 for all other supported adapters -Default Value: 256 + +:Valid Range: + - 48-256 for 82542 and 82543-based adapters + - 48-4096 for all other supported adapters +:Default Value: 256 This value is the number of transmit descriptors allocated by the driver. Increasing this value allows the driver to queue more transmits. Each descriptor is 16 bytes. -NOTE: Depending on the available system resources, the request for a +NOTE: + Depending on the available system resources, the request for a higher number of transmit descriptors may be denied. In this case, use a lower number. TxIntDelay ---------- -Valid Range: 0-65535 (0=off) -Default Value: 8 + +:Valid Range: 0-65535 (0=off) +:Default Value: 8 This value delays the generation of transmit interrupts in units of 1.024 microseconds. Transmit interrupt reduction can improve CPU @@ -256,9 +282,11 @@ causing the driver to run out of available transmit descriptors. TxAbsIntDelay ------------- + (This parameter is supported only on 82540, 82545 and later adapters.) -Valid Range: 0-65535 (0=off) -Default Value: 32 + +:Valid Range: 0-65535 (0=off) +:Default Value: 32 This value, in units of 1.024 microseconds, limits the delay in which a transmit interrupt is generated. Useful only if TxIntDelay is non-zero, @@ -269,18 +297,21 @@ network conditions. XsumRX ------ + (This parameter is NOT supported on the 82542-based adapter.) -Valid Range: 0-1 -Default Value: 1 + +:Valid Range: 0-1 +:Default Value: 1 A value of '1' indicates that the driver should enable IP checksum offload for received packets (both UDP and TCP) to the adapter hardware. Copybreak --------- -Valid Range: 0-xxxxxxx (0=off) -Default Value: 256 -Usage: modprobe e1000.ko copybreak=128 + +:Valid Range: 0-xxxxxxx (0=off) +:Default Value: 256 +:Usage: modprobe e1000.ko copybreak=128 Driver copies all packets below or equaling this size to a fresh RX buffer before handing it up the stack. @@ -292,8 +323,9 @@ it is also available during runtime at SmartPowerDownEnable -------------------- -Valid Range: 0-1 -Default Value: 0 (disabled) + +:Valid Range: 0-1 +:Default Value: 0 (disabled) Allows PHY to turn off in lower power states. The user can turn off this parameter in supported chipsets. @@ -309,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex. For copper-based boards, the keywords interact as follows: - The default operation is auto-negotiate. The board advertises all +- The default operation is auto-negotiate. The board advertises all supported speed and duplex combinations, and it links at the highest common speed and duplex mode IF the link partner is set to auto-negotiate. - If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps +- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps is advertised (The 1000BaseT spec requires auto-negotiation.) - If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- +- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- negotiation is disabled, and the AutoNeg parameter is ignored. Partner SHOULD also be forced. @@ -328,13 +360,15 @@ process. The parameter may be specified as either a decimal or hexadecimal value as determined by the bitmap below. +============== ====== ====== ======= ======= ====== ====== ======= ====== Bit position 7 6 5 4 3 2 1 0 Decimal Value 128 64 32 16 8 4 2 1 Hex value 80 40 20 10 8 4 2 1 Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 Duplex Full Full Half Full Half +============== ====== ====== ======= ======= ====== ====== ======= ====== -Some examples of using AutoNeg: +Some examples of using AutoNeg:: modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) modprobe e1000 AutoNeg=1 (Same as above) @@ -357,56 +391,59 @@ Additional Configurations Jumbo Frames ------------ -Jumbo Frames support is enabled by changing the MTU to a value larger -than the default of 1500. Use the ifconfig command to increase the MTU -size. For example:: + + Jumbo Frames support is enabled by changing the MTU to a value larger than + the default of 1500. Use the ifconfig command to increase the MTU size. + For example:: ifconfig eth<x> mtu 9000 up -This setting is not saved across reboots. It can be made permanent if -you add:: + This setting is not saved across reboots. It can be made permanent if + you add:: MTU=9000 -to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example -applies to the Red Hat distributions; other distributions may store this -setting in a different location. + to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example + applies to the Red Hat distributions; other distributions may store this + setting in a different location. + +Notes: + Degradation in throughput performance may be observed in some Jumbo frames + environments. If this is observed, increasing the application's socket buffer + size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. + See the specific application manual and /usr/src/linux*/Documentation/ + networking/ip-sysctl.txt for more details. -Notes: Degradation in throughput performance may be observed in some -Jumbo frames environments. If this is observed, increasing the -application's socket buffer size and/or increasing the -/proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific -application manual and /usr/src/linux*/Documentation/ -networking/ip-sysctl.txt for more details. + - The maximum MTU setting for Jumbo Frames is 16110. This value coincides + with the maximum Jumbo Frames size of 16128. -- The maximum MTU setting for Jumbo Frames is 16110. This value - coincides with the maximum Jumbo Frames size of 16128. + - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in + poor performance or loss of link. -- Using Jumbo frames at 10 or 100 Mbps is not supported and may result - in poor performance or loss of link. + - Adapters based on the Intel(R) 82542 and 82573V/E controller do not + support Jumbo Frames. These correspond to the following product names:: -- Adapters based on the Intel(R) 82542 and 82573V/E controller do not - support Jumbo Frames. These correspond to the following product names: - Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network - Connection + Intel(R) PRO/1000 Gigabit Server Adapter + Intel(R) PRO/1000 PM Network Connection ethtool ------- -The driver utilizes the ethtool interface for driver configuration and -diagnostics, as well as displaying statistical information. The ethtool -version 1.6 or later is required for this functionality. -The latest release of ethtool can be found from -https://www.kernel.org/pub/software/network/ethtool/ + The driver utilizes the ethtool interface for driver configuration and + diagnostics, as well as displaying statistical information. The ethtool + version 1.6 or later is required for this functionality. + + The latest release of ethtool can be found from + https://www.kernel.org/pub/software/network/ethtool/ Enabling Wake on LAN* (WoL) --------------------------- -WoL is configured through the ethtool* utility. -WoL will be enabled on the system during the next shut down or reboot. -For this driver version, in order to enable WoL, the e1000 driver must be -loaded when shutting down or rebooting the system. + WoL is configured through the ethtool* utility. + WoL will be enabled on the system during the next shut down or reboot. + For this driver version, in order to enable WoL, the e1000 driver must be + loaded when shutting down or rebooting the system. Support ======= diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt index 635e57493709..b8cb38a98c19 100644 --- a/Documentation/usb/gadget_configfs.txt +++ b/Documentation/usb/gadget_configfs.txt @@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function> where <config name>.<number> specify the configuration and <function> is a symlink to a function being removed from the configuration, e.g.: -$ rm configfs/c.1/ncm.usb0 +$ rm configs/c.1/ncm.usb0 ... ... diff --git a/MAINTAINERS b/MAINTAINERS index e19ec6d90a44..0fe4228f78cb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -581,7 +581,7 @@ W: https://www.infradead.org/~dhowells/kafs/ AGPGART DRIVER M: David Airlie <airlied@linux.ie> -T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) +T: git git://anongit.freedesktop.org/drm/drm S: Maintained F: drivers/char/agp/ F: include/linux/agp* @@ -2523,7 +2523,7 @@ S: Supported F: drivers/scsi/esas2r ATUSB IEEE 802.15.4 RADIO DRIVER -M: Stefan Schmidt <stefan@osg.samsung.com> +M: Stefan Schmidt <stefan@datenfreihafen.org> L: linux-wpan@vger.kernel.org S: Maintained F: drivers/net/ieee802154/atusb.c @@ -2971,9 +2971,13 @@ N: bcm585* N: bcm586* N: bcm88312 N: hr2 -F: arch/arm64/boot/dts/broadcom/ns2* +N: stingray +F: arch/arm64/boot/dts/broadcom/northstar2/* +F: arch/arm64/boot/dts/broadcom/stingray/* F: drivers/clk/bcm/clk-ns* +F: drivers/clk/bcm/clk-sr* F: drivers/pinctrl/bcm/pinctrl-ns* +F: include/dt-bindings/clock/bcm-sr* BROADCOM KONA GPIO DRIVER M: Ray Jui <rjui@broadcom.com> @@ -4456,6 +4460,7 @@ F: Documentation/blockdev/drbd/ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +R: "Rafael J. Wysocki" <rafael@kernel.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt @@ -4626,7 +4631,7 @@ F: include/uapi/drm/vmwgfx_drm.h DRM DRIVERS M: David Airlie <airlied@linux.ie> L: dri-devel@lists.freedesktop.org -T: git git://people.freedesktop.org/~airlied/linux +T: git git://anongit.freedesktop.org/drm/drm B: https://bugs.freedesktop.org/ C: irc://chat.freenode.net/dri-devel S: Maintained @@ -5669,7 +5674,7 @@ F: drivers/crypto/caam/ F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt FREESCALE DIU FRAMEBUFFER DRIVER -M: Timur Tabi <timur@tabi.org> +M: Timur Tabi <timur@kernel.org> L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/fsl-diu-fb.* @@ -5769,7 +5774,7 @@ S: Maintained F: drivers/net/wan/fsl_ucc_hdlc* FREESCALE QUICC ENGINE UCC UART DRIVER -M: Timur Tabi <timur@tabi.org> +M: Timur Tabi <timur@kernel.org> L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/tty/serial/ucc_uart.c @@ -5785,7 +5790,6 @@ F: include/linux/fsl/ FREESCALE SOC FS_ENET DRIVER M: Pantelis Antoniou <pantelis.antoniou@gmail.com> -M: Vitaly Bordug <vbordug@ru.mvista.com> L: linuxppc-dev@lists.ozlabs.org L: netdev@vger.kernel.org S: Maintained @@ -5793,7 +5797,7 @@ F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h FREESCALE SOC SOUND DRIVERS -M: Timur Tabi <timur@tabi.org> +M: Timur Tabi <timur@kernel.org> M: Nicolin Chen <nicoleotsuka@gmail.com> M: Xiubo Li <Xiubo.Lee@gmail.com> R: Fabio Estevam <fabio.estevam@nxp.com> @@ -6904,7 +6908,7 @@ F: drivers/clk/clk-versaclock5.c IEEE 802.15.4 SUBSYSTEM M: Alexander Aring <alex.aring@gmail.com> -M: Stefan Schmidt <stefan@osg.samsung.com> +M: Stefan Schmidt <stefan@datenfreihafen.org> L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git @@ -8624,7 +8628,7 @@ MARVELL MWIFIEX WIRELESS DRIVER M: Amitkumar Karwar <amitkarwar@gmail.com> M: Nishant Sarmukadam <nishants@marvell.com> M: Ganapathi Bhat <gbhat@marvell.com> -M: Xinming Hu <huxm@marvell.com> +M: Xinming Hu <huxinming820@gmail.com> L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/marvell/mwifiex/ @@ -9070,7 +9074,7 @@ S: Maintained F: drivers/usb/mtu3/ MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES -M: Peter Senna Tschudin <peter.senna@collabora.com> +M: Peter Senna Tschudin <peter.senna@gmail.com> M: Martin Donnelly <martin.donnelly@ge.com> M: Martyn Welch <martyn.welch@collabora.co.uk> S: Maintained @@ -10209,11 +10213,13 @@ F: sound/soc/codecs/sgtl5000* NXP TDA998X DRM DRIVER M: Russell King <linux@armlinux.org.uk> -S: Supported +S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h +F: include/dt-bindings/display/tda998x.h +K: "nxp,tda998x" NXP TFA9879 DRIVER M: Peter Rosin <peda@axentia.se> @@ -11477,6 +11483,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54 S: Obsolete F: drivers/net/wireless/intersil/prism54/ +PROC FILESYSTEM +R: Alexey Dobriyan <adobriyan@gmail.com> +L: linux-kernel@vger.kernel.org +L: linux-fsdevel@vger.kernel.org +S: Maintained +F: fs/proc/ +F: include/linux/proc_fs.h +F: tools/testing/selftests/proc/ + PROC SYSCTL M: "Luis R. Rodriguez" <mcgrof@kernel.org> M: Kees Cook <keescook@chromium.org> @@ -11809,9 +11824,9 @@ F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt F: drivers/cpufreq/qcom-cpufreq-kryo.c QUALCOMM EMAC GIGABIT ETHERNET DRIVER -M: Timur Tabi <timur@codeaurora.org> +M: Timur Tabi <timur@kernel.org> L: netdev@vger.kernel.org -S: Supported +S: Maintained F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM HEXAGON ARCHITECTURE @@ -11822,7 +11837,7 @@ S: Supported F: arch/hexagon/ QUALCOMM HIDMA DRIVER -M: Sinan Kaya <okaya@codeaurora.org> +M: Sinan Kaya <okaya@kernel.org> L: linux-arm-kernel@lists.infradead.org L: linux-arm-msm@vger.kernel.org L: dmaengine@vger.kernel.org @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc6 NAME = Merciless Moray # *DOCUMENTATION* @@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ else if [ -x /bin/bash ]; then echo /bin/bash; \ else echo sh; fi ; fi) -HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS) -HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS) -HOST_LFS_LIBS := $(shell getconf LFS_LIBS) +HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) +HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) +HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) HOSTCC = gcc HOSTCXX = g++ @@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO endif -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y) - CC_CAN_LINK := y - export CC_CAN_LINK -endif - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@ -1717,6 +1712,6 @@ endif # skip-makefile PHONY += FORCE FORCE: -# Declare the contents of the .PHONY variable as phony. We keep that +# Declare the contents of the PHONY variable as phony. We keep that # information in a variable so we can use it in if_changed and friends. .PHONY: $(PHONY) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 6e921754c8fc..c210a25dd6da 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { - unsigned int status = 0; struct rusage r; - long err = kernel_wait4(pid, &status, options, &r); + long err = kernel_wait4(pid, ustatus, options, &r); if (err <= 0) return err; - if (put_user(status, ustatus)) - return -EFAULT; if (!ur) return err; if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime)) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index e81bcd271be7..9cf59fc60eab 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -413,7 +413,7 @@ config ARC_HAS_DIV_REM config ARC_HAS_ACCL_REGS bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" - default n + default y help Depending on the configuration, CPU can contain accumulator reg-pair (also referred to as r58:r59). These can also be used by gcc as GPR so diff --git a/arch/arc/Makefile b/arch/arc/Makefile index d37f49d6a27f..6c1b20dd76ad 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -16,7 +16,7 @@ endif KBUILD_DEFCONFIG := nsim_700_defconfig -cflags-y += -fno-common -pipe -fno-builtin -D__linux__ +cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs @@ -140,16 +140,3 @@ dtbs: scripts archclean: $(Q)$(MAKE) $(clean)=$(boot) - -# Hacks to enable final link due to absence of link-time branch relexation -# and gcc choosing optimal(shorter) branches at -O3 -# -# vineetg Feb 2010: -mlong-calls switched off for overall kernel build -# However lib/decompress_inflate.o (.init.text) calls -# zlib_inflate_workspacesize (.text) causing relocation errors. -# Thus forcing all exten calls in this file to be long calls -export CFLAGS_decompress_inflate.o = -mmedium-calls -export CFLAGS_initramfs.o = -mmedium-calls -ifdef CONFIG_SMP -export CFLAGS_core.o = -mmedium-calls -endif diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 09f85154c5a4..a635ea972304 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 09fed3ef22b6..aa507e423075 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index ea2f6d817d1a..eba07f468654 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index ab231c040efe..098b19fbaa51 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index cf449cbf440d..0104c404d897 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1b54c72f4296..6491be0ddbc9 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 31c2c70b34a1..99e05cf63fca 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index a578c721d50f..0dc4f9b737e7 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 37d7395f3272..be3c30a15e54 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 1e1470e2a7f0..3a74b9b21772 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 084a6e42685b..ea2834b4dc1d 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index f36d47990415..80a5a1b4924b 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 1aca2e8fd1ba..2cc87f909747 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index ec36d5b6d435..29f3988c9424 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -234,6 +234,9 @@ POP gp RESTORE_R12_TO_R0 +#ifdef CONFIG_ARC_CURR_IN_REG + ld r25, [sp, 12] +#endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm @@ -315,6 +318,9 @@ POP gp RESTORE_R12_TO_R0 +#ifdef CONFIG_ARC_CURR_IN_REG + ld r25, [sp, 12] +#endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 51597f344a62..302b0db8ea2b 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -86,9 +86,6 @@ POP r1 POP r0 -#ifdef CONFIG_ARC_CURR_IN_REG - ld r25, [sp, 12] -#endif .endm /*-------------------------------------------------------------- diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index c28e6c347b49..871f3cb16af9 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -34,9 +34,7 @@ struct machine_desc { const char *name; const char **dt_compat; void (*init_early)(void); -#ifdef CONFIG_SMP void (*init_per_cpu)(unsigned int); -#endif void (*init_machine)(void); void (*init_late)(void); diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 109baa06831c..09ddddf71cc5 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 8ec5599a0957..cf4be70d5892 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* Decode a PTE containing swap "identifier "into constituents */ #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) -#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) +#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) /* NOPs, to keep generic kernel happy */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 538b36afe89e..62b185057c04 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -31,10 +31,10 @@ void __init init_IRQ(void) /* a SMP H/w block could do IPI IRQ request here */ if (plat_smp_ops.init_per_cpu) plat_smp_ops.init_per_cpu(smp_processor_id()); +#endif if (machine_desc->init_per_cpu) machine_desc->init_per_cpu(smp_processor_id()); -#endif } /* diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 5ac3b547453f..4674541eba3f 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls) SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) { struct pt_regs *regs = current_pt_regs(); - int uval = -EFAULT; + u32 uval; + int ret; /* * This is only for old cores lacking LLOCK/SCOND, which by defintion @@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) /* Z indicates to userspace if operation succeded */ regs->status32 &= ~STATUS_Z_MASK; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; + ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)); + if (!ret) + goto fail; +again: preempt_disable(); - if (__get_user(uval, uaddr)) - goto done; + ret = __get_user(uval, uaddr); + if (ret) + goto fault; - if (uval == expected) { - if (!__put_user(new, uaddr)) - regs->status32 |= STATUS_Z_MASK; - } + if (uval != expected) + goto out; -done: - preempt_enable(); + ret = __put_user(new, uaddr); + if (ret) + goto fault; + + regs->status32 |= STATUS_Z_MASK; +out: + preempt_enable(); return uval; + +fault: + preempt_enable(); + + if (unlikely(ret != -EFAULT)) + goto fail; + + down_read(¤t->mm->mmap_sem); + ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, + FAULT_FLAG_WRITE, NULL); + up_read(¤t->mm->mmap_sem); + + if (likely(!ret)) + goto again; + +fail: + force_sig(SIGSEGV, current); + return ret; } #ifdef CONFIG_ISA_ARCV2 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 19ab3cf98f0f..9356753c2ed8 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -7,5 +7,8 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" + depends on ISA_ARCV2 + select ARC_HAS_ACCL_REGS select CLK_HSDK select RESET_HSDK + select MIGHT_HAVE_PCI diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 2958aedb649a..2588b842407c 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) +#define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000) + +static void __init hsdk_enable_gpio_intc_wire(void) +{ + /* + * Peripherals on CPU Card are wired to cpu intc via intermediate + * DW APB GPIO blocks (mainly for debouncing) + * + * --------------------- + * | snps,archs-intc | + * --------------------- + * | + * ---------------------- + * | snps,archs-idu-intc | + * ---------------------- + * | | | | | + * | [eth] [USB] [... other peripherals] + * | + * ------------------- + * | snps,dw-apb-intc | + * ------------------- + * | | | | + * [Bt] [HAPS] [... other peripherals] + * + * Current implementation of "irq-dw-apb-ictl" driver doesn't work well + * with stacked INTCs. In particular problem happens if its master INTC + * not yet instantiated. See discussion here - + * https://lkml.org/lkml/2015/3/4/755 + * + * So setup the first gpio block as a passive pass thru and hide it from + * DT hardware topology - connect intc directly to cpu intc + * The GPIO "wire" needs to be init nevertheless (here) + * + * One side adv is that peripheral interrupt handling avoids one nested + * intc ISR hop + * + * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping" + * we have the following GPIO input lines used as sources of interrupt: + * - GPIO[0] - Bluetooth interrupt of RS9113 module + * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector) + * - GPIO[3] - Audio codec (MAX9880A) interrupt + * - GPIO[8-23] - Available on Arduino and PMOD_x headers + * For now there's no use of Arduino and PMOD_x headers in Linux + * use-case so we only enable lines 0, 2 and 3. + * + * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf + */ +#define GPIO_INTEN (HSDK_GPIO_INTC + 0x30) +#define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34) +#define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38) +#define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c) +#define GPIO_INT_CONNECTED_MASK 0x0d + + iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK); + iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK); + iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL); + iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY); + iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); +} + static void __init hsdk_init_early(void) { /* @@ -62,6 +122,8 @@ static void __init hsdk_init_early(void) * minimum possible div-by-2. */ iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); + + hsdk_enable_gpio_intc_wire(); } static const char *hsdk_compat[] __initconst = { diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 54eeb8d00bc6..843edfd000be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1245,8 +1245,14 @@ config PCI VESA. If you have PCI, say Y, otherwise N. config PCI_DOMAINS - bool + bool "Support for multiple PCI domains" depends on PCI + help + Enable PCI domains kernel management. Say Y if your machine + has a PCI bus hierarchy that requires more than one PCI + domain (aka segment) to be correctly managed. Say N otherwise. + + If you don't know what to do here, say N. config PCI_DOMAINS_GENERIC def_bool PCI_DOMAINS diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index f9e8667f5886..73b514dddf65 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -168,7 +168,6 @@ AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */ AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */ AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */ - AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */ >; }; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index ca294914bbb1..23ea381d363f 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -39,6 +39,8 @@ ti,davinci-ctrl-ram-size = <0x2000>; ti,davinci-rmii-en = /bits/ 8 <1>; local-mac-address = [ 00 00 00 00 00 00 ]; + clocks = <&emac_ick>; + clock-names = "ick"; }; davinci_mdio: ethernet@5c030000 { @@ -49,6 +51,8 @@ bus_freq = <1000000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&emac_fck>; + clock-names = "fck"; }; uart4: serial@4809e000 { @@ -87,6 +91,11 @@ }; }; +/* Table Table 5-79 of the TRM shows 480ab000 is reserved */ +&usb_otg_hs { + status = "disabled"; +}; + &iva { status = "disabled"; }; diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 440351ad0b80..d4be3fd0b6f4 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -610,6 +610,8 @@ touchscreen-size-x = <480>; touchscreen-size-y = <272>; + + wakeup-source; }; tlv320aic3106: tlv320aic3106@1b { diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts index 6782ce481ac9..d8769956cbfc 100644 --- a/arch/arm/boot/dts/armada-385-synology-ds116.dts +++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts @@ -139,7 +139,7 @@ 3700 5 3900 6 4000 7>; - cooling-cells = <2>; + #cooling-cells = <2>; }; gpio-leds { diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 18edc9bc7927..929459c42760 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -547,7 +547,7 @@ thermal: thermal@e8078 { compatible = "marvell,armada380-thermal"; - reg = <0xe4078 0x4>, <0xe4074 0x4>; + reg = <0xe4078 0x4>, <0xe4070 0x8>; status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 9fe4f5a6379e..2c4df2d2d4a6 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -216,7 +216,7 @@ reg = <0x18008000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; @@ -245,7 +245,7 @@ reg = <0x1800b000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; @@ -256,7 +256,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -278,10 +278,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>, - <GIC_SPI 97 IRQ_TYPE_NONE>, - <GIC_SPI 98 IRQ_TYPE_NONE>, - <GIC_SPI 99 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; }; }; @@ -291,7 +291,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -313,10 +313,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>, - <GIC_SPI 103 IRQ_TYPE_NONE>, - <GIC_SPI 104 IRQ_TYPE_NONE>, - <GIC_SPI 105 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index 3f9cedd8011f..3084a7c95733 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -264,7 +264,7 @@ reg = <0x38000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; }; @@ -279,7 +279,7 @@ reg = <0x3b000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; }; }; @@ -300,7 +300,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -322,10 +322,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>, - <GIC_SPI 183 IRQ_TYPE_NONE>, - <GIC_SPI 184 IRQ_TYPE_NONE>, - <GIC_SPI 185 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>; brcm,pcie-msi-inten; }; }; @@ -336,7 +336,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -358,10 +358,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>, - <GIC_SPI 189 IRQ_TYPE_NONE>, - <GIC_SPI 190 IRQ_TYPE_NONE>, - <GIC_SPI 191 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; brcm,pcie-msi-inten; }; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index dcc55aa84583..09ba85046322 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -391,7 +391,7 @@ reg = <0x38000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; dma-coherent; status = "disabled"; @@ -496,7 +496,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -519,10 +519,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>, - <GIC_SPI 128 IRQ_TYPE_NONE>, - <GIC_SPI 129 IRQ_TYPE_NONE>, - <GIC_SPI 130 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>; brcm,pcie-msi-inten; }; }; @@ -533,7 +533,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -556,10 +556,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>, - <GIC_SPI 134 IRQ_TYPE_NONE>, - <GIC_SPI 135 IRQ_TYPE_NONE>, - <GIC_SPI 136 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>; brcm,pcie-msi-inten; }; }; @@ -570,7 +570,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <2>; @@ -593,10 +593,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>, - <GIC_SPI 140 IRQ_TYPE_NONE>, - <GIC_SPI 141 IRQ_TYPE_NONE>, - <GIC_SPI 142 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; brcm,pcie-msi-inten; }; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 9a076c409f4e..ef995e50ee12 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -365,7 +365,7 @@ i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; - interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index f6f1597b03df..0f4f817a9e22 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -549,11 +549,7 @@ gpio-controller; #gpio-cells = <2>; reg = <0x226000 0x1000>; - interrupts = <42 IRQ_TYPE_EDGE_BOTH - 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH - 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH - 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH - 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>; + interrupts = <42 43 44 45 46 47 48 49 50>; ti,ngpio = <144>; ti,davinci-gpio-unbanked = <0>; status = "disabled"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 9dcd14edc202..e03495a799ce 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1580,7 +1580,6 @@ dr_mode = "otg"; snps,dis_u3_susphy_quirk; snps,dis_u2_susphy_quirk; - snps,dis_metastability_quirk; }; }; @@ -1608,6 +1607,7 @@ dr_mode = "otg"; snps,dis_u3_susphy_quirk; snps,dis_u2_susphy_quirk; + snps,dis_metastability_quirk; }; }; diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index df9eca94d812..8a878687197b 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts @@ -770,7 +770,7 @@ pinctrl_ts: tsgrp { fsl,pins = < - MX51_PAD_CSI1_D8__GPIO3_12 0x85 + MX51_PAD_CSI1_D8__GPIO3_12 0x04 MX51_PAD_CSI1_D9__GPIO3_13 0x85 >; }; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 70483ce72ba6..77f8f030dd07 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -90,7 +90,7 @@ clocks = <&clks IMX6Q_CLK_ECSPI5>, <&clks IMX6Q_CLK_ECSPI5>; clock-names = "ipg", "per"; - dmas = <&sdma 11 7 1>, <&sdma 12 7 2>; + dmas = <&sdma 11 8 1>, <&sdma 12 8 2>; dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi index 19a075aee19e..f14df0baf2ab 100644 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi @@ -692,7 +692,7 @@ dsa,member = <0 0>; eeprom-length = <512>; interrupt-parent = <&gpio6>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index d8b94f47498b..4e4a55aad5c9 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -1344,7 +1344,7 @@ ranges = <0x81000000 0 0 0x08f80000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */ num-lanes = <1>; - interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index bdf73cbcec3a..e7c3c563ff8f 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -159,13 +159,7 @@ dais = <&mcbsp2_port>, <&mcbsp3_port>; }; -}; - -&dss { - status = "okay"; -}; -&gpio6 { pwm8: dmtimer-pwm-8 { pinctrl-names = "default"; pinctrl-0 = <&vibrator_direction_pin>; @@ -192,7 +186,10 @@ pwm-names = "enable", "direction"; direction-duty-cycle-ns = <10000000>; }; +}; +&dss { + status = "okay"; }; &dsi1 { diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 486d4e7433ed..b38f8c240558 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -748,13 +748,13 @@ nand0: nand@ff900000 { #address-cells = <0x1>; #size-cells = <0x1>; - compatible = "denali,denali-nand-dt"; + compatible = "altr,socfpga-denali-nand"; reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; interrupts = <0x0 0x90 0x4>; dma-mask = <0xffffffff>; - clocks = <&nand_clk>; + clocks = <&nand_x_clk>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index bead79e4b2aa..791ca15c799e 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -593,8 +593,7 @@ #size-cells = <0>; reg = <0xffda5000 0x100>; interrupts = <0 102 4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; /*32bit_access;*/ tx-dma-channel = <&pdma 16>; rx-dma-channel = <&pdma 17>; @@ -633,7 +632,7 @@ nand: nand@ffb90000 { #address-cells = <1>; #size-cells = <1>; - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand"; + compatible = "altr,socfpga-denali-nand"; reg = <0xffb90000 0x72000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 1e9f7af8f70f..3157be413297 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o -obj-$(CONFIG_SMP) += secure_cntvoff.o +obj-$(CONFIG_CPU_V7) += secure_cntvoff.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o CFLAGS_REMOVE_mcpm_entry.o = -pg diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 054591dc9a00..4cd2f4a2bff4 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_ULPI=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_GADGET=y CONFIG_USB_ETH=m +CONFIG_USB_ULPI_BUS=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f70507ab91ee..200ebda47e0c 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_ULPI=y CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_FTDI_SIO=m @@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m CONFIG_USB_FUNCTIONFS=m CONFIG_USB_MASS_STORAGE=m CONFIG_USB_G_SERIAL=m +CONFIG_USB_ULPI_BUS=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 7e1c543162c3..8f6be1982545 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1,5 +1,4 @@ CONFIG_SYSVIPC=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y @@ -10,20 +9,10 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y CONFIG_CMDLINE_PARTITION=y -CONFIG_ARCH_MULTI_V7=y -# CONFIG_ARCH_MULTI_V5 is not set -# CONFIG_ARCH_MULTI_V4 is not set CONFIG_ARCH_VIRT=y CONFIG_ARCH_ALPINE=y CONFIG_ARCH_ARTPEC=y CONFIG_MACH_ARTPEC6=y -CONFIG_ARCH_MVEBU=y -CONFIG_MACH_ARMADA_370=y -CONFIG_MACH_ARMADA_375=y -CONFIG_MACH_ARMADA_38X=y -CONFIG_MACH_ARMADA_39X=y -CONFIG_MACH_ARMADA_XP=y -CONFIG_MACH_DOVE=y CONFIG_ARCH_AT91=y CONFIG_SOC_SAMA5D2=y CONFIG_SOC_SAMA5D3=y @@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y CONFIG_ARCH_BCM_CYGNUS=y CONFIG_ARCH_BCM_HR2=y CONFIG_ARCH_BCM_NSP=y -CONFIG_ARCH_BCM_21664=y -CONFIG_ARCH_BCM_281XX=y CONFIG_ARCH_BCM_5301X=y +CONFIG_ARCH_BCM_281XX=y +CONFIG_ARCH_BCM_21664=y CONFIG_ARCH_BCM2835=y CONFIG_ARCH_BCM_63XX=y CONFIG_ARCH_BRCMSTB=y @@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y CONFIG_MACH_BERLIN_BG2CD=y CONFIG_MACH_BERLIN_BG2Q=y CONFIG_ARCH_DIGICOLOR=y +CONFIG_ARCH_EXYNOS=y +CONFIG_EXYNOS5420_MCPM=y CONFIG_ARCH_HIGHBANK=y CONFIG_ARCH_HISI=y CONFIG_ARCH_HI3xxx=y -CONFIG_ARCH_HIX5HD2=y CONFIG_ARCH_HIP01=y CONFIG_ARCH_HIP04=y -CONFIG_ARCH_KEYSTONE=y -CONFIG_ARCH_MESON=y +CONFIG_ARCH_HIX5HD2=y CONFIG_ARCH_MXC=y CONFIG_SOC_IMX50=y CONFIG_SOC_IMX51=y @@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y CONFIG_SOC_IMX6SX=y CONFIG_SOC_IMX6UL=y CONFIG_SOC_IMX7D=y -CONFIG_SOC_VF610=y CONFIG_SOC_LS1021A=y +CONFIG_SOC_VF610=y +CONFIG_ARCH_KEYSTONE=y +CONFIG_ARCH_MEDIATEK=y +CONFIG_ARCH_MESON=y +CONFIG_ARCH_MVEBU=y +CONFIG_MACH_ARMADA_370=y +CONFIG_MACH_ARMADA_375=y +CONFIG_MACH_ARMADA_38X=y +CONFIG_MACH_ARMADA_39X=y +CONFIG_MACH_ARMADA_XP=y +CONFIG_MACH_DOVE=y CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y CONFIG_SOC_AM33XX=y CONFIG_SOC_AM43XX=y CONFIG_SOC_DRA7XX=y +CONFIG_ARCH_SIRF=y CONFIG_ARCH_QCOM=y -CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MSM8X60=y CONFIG_ARCH_MSM8960=y CONFIG_ARCH_MSM8974=y CONFIG_ARCH_ROCKCHIP=y -CONFIG_ARCH_SOCFPGA=y -CONFIG_PLAT_SPEAR=y -CONFIG_ARCH_SPEAR13XX=y -CONFIG_MACH_SPEAR1310=y -CONFIG_MACH_SPEAR1340=y -CONFIG_ARCH_STI=y -CONFIG_ARCH_STM32=y -CONFIG_ARCH_EXYNOS=y -CONFIG_EXYNOS5420_MCPM=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_EMEV2=y CONFIG_ARCH_R7S72100=y @@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y CONFIG_ARCH_R8A7793=y CONFIG_ARCH_R8A7794=y CONFIG_ARCH_SH73A0=y +CONFIG_ARCH_SOCFPGA=y +CONFIG_PLAT_SPEAR=y +CONFIG_ARCH_SPEAR13XX=y +CONFIG_MACH_SPEAR1310=y +CONFIG_MACH_SPEAR1340=y +CONFIG_ARCH_STI=y +CONFIG_ARCH_STM32=y CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_SIRF=y CONFIG_ARCH_TEGRA=y -CONFIG_ARCH_TEGRA_2x_SOC=y -CONFIG_ARCH_TEGRA_3x_SOC=y -CONFIG_ARCH_TEGRA_114_SOC=y -CONFIG_ARCH_TEGRA_124_SOC=y CONFIG_ARCH_UNIPHIER=y CONFIG_ARCH_U8500=y -CONFIG_MACH_HREFV60=y -CONFIG_MACH_SNOWBALL=y CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_VEXPRESS_TC2_PM=y CONFIG_ARCH_WM8850=y CONFIG_ARCH_ZYNQ=y -CONFIG_TRUSTED_FOUNDATIONS=y -CONFIG_PCI=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCI_DRA7XX=y -CONFIG_PCI_DRA7XX_EP=y -CONFIG_PCI_KEYSTONE=y -CONFIG_PCI_MSI=y +CONFIG_PCIEPORTBUS=y CONFIG_PCI_MVEBU=y CONFIG_PCI_TEGRA=y CONFIG_PCI_RCAR_GEN2=y CONFIG_PCIE_RCAR=y -CONFIG_PCIEPORTBUS=y +CONFIG_PCI_DRA7XX_EP=y +CONFIG_PCI_KEYSTONE=y CONFIG_PCI_ENDPOINT=y CONFIG_PCI_ENDPOINT_CONFIGFS=y CONFIG_PCI_EPF_TEST=m CONFIG_SMP=y CONFIG_NR_CPUS=16 -CONFIG_HIGHPTE=y -CONFIG_CMA=y CONFIG_SECCOMP=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y @@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y CONFIG_ARM_IMX6Q_CPUFREQ=y CONFIG_QORIQ_CPUFREQ=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y -CONFIG_NEON=y -CONFIG_KERNEL_MODE_NEON=y CONFIG_ARM_ZYNQ_CPUIDLE=y CONFIG_ARM_EXYNOS_CPUIDLE=y +CONFIG_KERNEL_MODE_NEON=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NET_DSA=m -CONFIG_NET_SWITCHDEV=y CONFIG_CAN=y -CONFIG_CAN_RAW=y -CONFIG_CAN_BCM=y -CONFIG_CAN_DEV=y CONFIG_CAN_AT91=m CONFIG_CAN_FLEXCAN=m -CONFIG_CAN_RCAR=m +CONFIG_CAN_SUN4I=y CONFIG_CAN_XILINXCAN=y +CONFIG_CAN_RCAR=m CONFIG_CAN_MCP251X=y -CONFIG_NET_DSA_BCM_SF2=m -CONFIG_B53=m -CONFIG_B53_SPI_DRIVER=m -CONFIG_B53_MDIO_DRIVER=m -CONFIG_B53_MMAP_DRIVER=m -CONFIG_B53_SRAB_DRIVER=m -CONFIG_CAN_SUN4I=y CONFIG_BT=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCM=y @@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y CONFIG_RFKILL_GPIO=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y -CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_OMAP_OCP2SCP=y CONFIG_SIMPLE_PM_BUS=y -CONFIG_SUNXI_RSB=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m CONFIG_EEPROM_AT24=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y @@ -251,14 +221,20 @@ CONFIG_SATA_MV=y CONFIG_SATA_RCAR=y CONFIG_NETDEVICES=y CONFIG_VIRTIO_NET=y -CONFIG_HIX5HD2_GMAC=y +CONFIG_B53_SPI_DRIVER=m +CONFIG_B53_MDIO_DRIVER=m +CONFIG_B53_MMAP_DRIVER=m +CONFIG_B53_SRAB_DRIVER=m +CONFIG_NET_DSA_BCM_SF2=m CONFIG_SUN4I_EMAC=y -CONFIG_MACB=y CONFIG_BCMGENET=m CONFIG_BGMAC_BCMA=y CONFIG_SYSTEMPORT=m +CONFIG_MACB=y CONFIG_NET_CALXEDA_XGMAC=y CONFIG_GIANFAR=y +CONFIG_HIX5HD2_GMAC=y +CONFIG_E1000E=y CONFIG_IGB=y CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y @@ -268,19 +244,17 @@ CONFIG_R8169=y CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y -CONFIG_STMMAC_PLATFORM=y CONFIG_DWMAC_DWC_QOS_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y -CONFIG_MARVELL_PHY=y -CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y CONFIG_ICPLUS_PHY=y -CONFIG_REALTEK_PHY=y +CONFIG_MARVELL_PHY=y CONFIG_MICREL_PHY=y -CONFIG_FIXED_PHY=y +CONFIG_REALTEK_PHY=y CONFIG_ROCKCHIP_PHY=y +CONFIG_SMSC_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m @@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y CONFIG_USB_NET_SMSC95XX=y CONFIG_BRCMFMAC=m -CONFIG_RT2X00=m -CONFIG_RT2800USB=m CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m +CONFIG_RT2X00=m +CONFIG_RT2800USB=m CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_QT1070=m CONFIG_KEYBOARD_GPIO=y CONFIG_KEYBOARD_TEGRA=y -CONFIG_KEYBOARD_SPEAR=y +CONFIG_KEYBOARD_SAMSUNG=m CONFIG_KEYBOARD_ST_KEYSCAN=y +CONFIG_KEYBOARD_SPEAR=y CONFIG_KEYBOARD_CROS_EC=m -CONFIG_KEYBOARD_SAMSUNG=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=m CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMPE=y CONFIG_TOUCHSCREEN_SUN4I=y -CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_INPUT_MISC=y CONFIG_INPUT_MAX77693_HAPTIC=m CONFIG_INPUT_MAX8997_HAPTIC=m @@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_8250_UNIPHIER=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_TTYAT=y -CONFIG_SERIAL_BCM63XX=y -CONFIG_SERIAL_BCM63XX_CONSOLE=y CONFIG_SERIAL_MESON=y CONFIG_SERIAL_MESON_CONSOLE=y CONFIG_SERIAL_SAMSUNG=y @@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=20 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_SH_SCI_DMA=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y CONFIG_SERIAL_VT8500_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_OMAP=y CONFIG_SERIAL_OMAP_CONSOLE=y +CONFIG_SERIAL_BCM63XX=y +CONFIG_SERIAL_BCM63XX_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_FSL_LPUART=y @@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y CONFIG_SERIAL_STM32=y CONFIG_SERIAL_STM32_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y -CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_ST=y CONFIG_I2C_CHARDEV=y -CONFIG_I2C_DAVINCI=y -CONFIG_I2C_MESON=y -CONFIG_I2C_MUX=y CONFIG_I2C_ARB_GPIO_CHALLENGE=m CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_MUX_PINCTRL=y @@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y CONFIG_I2C_AT91=m CONFIG_I2C_BCM2835=y CONFIG_I2C_CADENCE=y +CONFIG_I2C_DAVINCI=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_DIGICOLOR=m CONFIG_I2C_EMEV2=m CONFIG_I2C_GPIO=m -CONFIG_I2C_EXYNOS5=y CONFIG_I2C_IMX=y +CONFIG_I2C_MESON=y CONFIG_I2C_MV64XXX=y CONFIG_I2C_RIIC=y CONFIG_I2C_RK3X=y @@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_PINCTRL_AS3722=y CONFIG_PINCTRL_PALMAS=y -CONFIG_PINCTRL_BCM2835=y CONFIG_PINCTRL_APQ8064=y CONFIG_PINCTRL_APQ8084=y CONFIG_PINCTRL_IPQ8064=y @@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_QCOM_SSBI_PMIC=y -CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_GPIO_DAVINCI=y CONFIG_GPIO_DWAPB=y CONFIG_GPIO_EM=y CONFIG_GPIO_RCAR=y +CONFIG_GPIO_SYSCON=y CONFIG_GPIO_UNIPHIER=y CONFIG_GPIO_XILINX=y CONFIG_GPIO_ZYNQ=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_PCF857X=y -CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y -CONFIG_GPIO_SYSCON=y CONFIG_GPIO_TPS6586X=y CONFIG_GPIO_TPS65910=y +CONFIG_GPIO_TWL4030=y +CONFIG_POWER_AVS=y +CONFIG_ROCKCHIP_IODOMAIN=y +CONFIG_POWER_RESET_AS3722=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_ST=y +CONFIG_POWER_RESET_KEYSTONE=y +CONFIG_POWER_RESET_RMOBILE=y CONFIG_BATTERY_ACT8945A=y CONFIG_BATTERY_CPCAP=m CONFIG_BATTERY_SBS=y +CONFIG_AXP20X_POWER=m CONFIG_BATTERY_MAX17040=m CONFIG_BATTERY_MAX17042=m CONFIG_CHARGER_CPCAP=m @@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m CONFIG_CHARGER_MAX8997=m CONFIG_CHARGER_MAX8998=m CONFIG_CHARGER_TPS65090=y -CONFIG_AXP20X_POWER=m -CONFIG_POWER_RESET_AS3722=y -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -CONFIG_POWER_RESET_KEYSTONE=y -CONFIG_POWER_RESET_RMOBILE=y -CONFIG_POWER_RESET_ST=y -CONFIG_POWER_AVS=y -CONFIG_ROCKCHIP_IODOMAIN=y CONFIG_SENSORS_IIO_HWMON=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y @@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m CONFIG_SENSORS_PWM_FAN=m CONFIG_SENSORS_INA2XX=m CONFIG_CPU_THERMAL=y -CONFIG_BCM2835_THERMAL=m -CONFIG_BRCMSTB_THERMAL=m CONFIG_IMX_THERMAL=y CONFIG_ROCKCHIP_THERMAL=y CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y -CONFIG_DAVINCI_WATCHDOG=m -CONFIG_EXYNOS_THERMAL=m +CONFIG_BCM2835_THERMAL=m +CONFIG_BRCMSTB_THERMAL=m CONFIG_ST_THERMAL_MEMMAP=y CONFIG_WATCHDOG=y CONFIG_DA9063_WATCHDOG=m @@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y CONFIG_ARM_SP805_WATCHDOG=y CONFIG_AT91SAM9X_WATCHDOG=y CONFIG_SAMA5D4_WATCHDOG=y +CONFIG_DW_WATCHDOG=y +CONFIG_DAVINCI_WATCHDOG=m CONFIG_ORION_WATCHDOG=y CONFIG_RN5T618_WATCHDOG=y -CONFIG_ST_LPC_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_IMX2_WDT=y +CONFIG_ST_LPC_WATCHDOG=y CONFIG_TEGRA_WATCHDOG=m CONFIG_MESON_WATCHDOG=y -CONFIG_DW_WATCHDOG=y CONFIG_DIGICOLOR_WATCHDOG=y CONFIG_RENESAS_WDT=m -CONFIG_BCM2835_WDT=y CONFIG_BCM47XX_WDT=y -CONFIG_BCM7038_WDT=m +CONFIG_BCM2835_WDT=y CONFIG_BCM_KONA_WDT=y +CONFIG_BCM7038_WDT=m +CONFIG_BCMA_HOST_SOC=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y CONFIG_MFD_ACT8945A=y CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y @@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y CONFIG_MFD_ATMEL_HLCDC=m CONFIG_MFD_BCM590XX=y CONFIG_MFD_AC100=y -CONFIG_MFD_AXP20X=y CONFIG_MFD_AXP20X_I2C=y CONFIG_MFD_AXP20X_RSB=y CONFIG_MFD_CROS_EC=m @@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m CONFIG_MFD_MAX8907=y CONFIG_MFD_MAX8997=y CONFIG_MFD_MAX8998=y -CONFIG_MFD_RK808=y CONFIG_MFD_CPCAP=y CONFIG_MFD_PM8XXX=y CONFIG_MFD_QCOM_RPM=y CONFIG_MFD_SPMI_PMIC=y +CONFIG_MFD_RK808=y CONFIG_MFD_RN5T618=y CONFIG_MFD_SEC_CORE=y CONFIG_MFD_STMPE=y @@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y CONFIG_MFD_TPS65218=y CONFIG_MFD_TPS6586X=y CONFIG_MFD_TPS65910=y -CONFIG_REGULATOR_ACT8945A=y -CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_ACT8865=y +CONFIG_REGULATOR_ACT8945A=y CONFIG_REGULATOR_ANATOP=y +CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_AS3711=y CONFIG_REGULATOR_AS3722=y CONFIG_REGULATOR_AXP20X=y @@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y CONFIG_REGULATOR_CPCAP=y CONFIG_REGULATOR_DA9210=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_GPIO=y -CONFIG_MFD_SYSCON=y -CONFIG_POWER_RESET_SYSCON=y CONFIG_REGULATOR_LP872X=y CONFIG_REGULATOR_MAX14577=m CONFIG_REGULATOR_MAX8907=y @@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_PWM=y CONFIG_REGULATOR_QCOM_RPM=y -CONFIG_REGULATOR_QCOM_SMD_RPM=y +CONFIG_REGULATOR_QCOM_SMD_RPM=m +CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_RN5T618=y CONFIG_REGULATOR_S2MPS11=y CONFIG_REGULATOR_S5M8767=y @@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_VIDEO_CLASS=y -CONFIG_USB_GSPCA=y +CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_SOC_CAMERA=m CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_VIDEO_RCAR_VIN=m -CONFIG_VIDEO_ATMEL_ISI=m CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m CONFIG_VIDEO_S5P_FIMC=m CONFIG_VIDEO_S5P_MIPI_CSIS=m CONFIG_VIDEO_EXYNOS_FIMC_LITE=m CONFIG_VIDEO_EXYNOS4_FIMC_IS=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_VIDEO_ATMEL_ISI=m CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m CONFIG_VIDEO_SAMSUNG_S5P_MFC=m @@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m CONFIG_VIDEO_RENESAS_JPU=m CONFIG_VIDEO_RENESAS_VSP1=m CONFIG_V4L_TEST_DRIVERS=y +CONFIG_VIDEO_VIVID=m CONFIG_CEC_PLATFORM_DRIVERS=y CONFIG_VIDEO_SAMSUNG_S5P_CEC=m # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_VIDEO_ADV7180=m CONFIG_VIDEO_ML86V7667=m CONFIG_DRM=y -CONFIG_DRM_I2C_ADV7511=m -CONFIG_DRM_I2C_ADV7511_AUDIO=y # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set -CONFIG_DRM_DUMB_VGA_DAC=m -CONFIG_DRM_NXP_PTN3460=m -CONFIG_DRM_PARADE_PS8622=m CONFIG_DRM_NOUVEAU=m CONFIG_DRM_EXYNOS=m CONFIG_DRM_EXYNOS_FIMD=y @@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y CONFIG_DRM_SUN4I=m CONFIG_DRM_FSL_DCU=m CONFIG_DRM_TEGRA=y +CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_PANEL_SAMSUNG_LD9040=m CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m -CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_DUMB_VGA_DAC=m +CONFIG_DRM_NXP_PTN3460=m +CONFIG_DRM_PARADE_PS8622=m CONFIG_DRM_SII9234=m +CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_I2C_ADV7511_AUDIO=y CONFIG_DRM_STI=m -CONFIG_DRM_VC4=y +CONFIG_DRM_VC4=m CONFIG_DRM_ETNAVIV=m CONFIG_DRM_MXSFB=m CONFIG_FB_ARMCLCD=y @@ -659,8 +627,6 @@ CONFIG_FB_EFI=y CONFIG_FB_WM8505=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SIMPLE=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m CONFIG_BACKLIGHT_PWM=y CONFIG_BACKLIGHT_AS3711=y @@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_HDA_TEGRA=m CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y @@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m CONFIG_SND_SOC_ODROID=m CONFIG_SND_SOC_SH4_FSI=m CONFIG_SND_SOC_RCAR=m -CONFIG_SND_SIMPLE_SCU_CARD=m +CONFIG_SND_SOC_STI=m CONFIG_SND_SUN4I_CODEC=m CONFIG_SND_SOC_TEGRA=m CONFIG_SND_SOC_TEGRA20_I2S=m @@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m CONFIG_SND_SOC_TEGRA_WM9712=m CONFIG_SND_SOC_TEGRA_TRIMSLICE=m CONFIG_SND_SOC_TEGRA_ALC5632=m -CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SOC_TEGRA_MAX98090=m CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SOC_SGTL5000=m CONFIG_SND_SOC_SPDIF=m -CONFIG_SND_SOC_WM8978=m -CONFIG_SND_SOC_STI=m CONFIG_SND_SOC_STI_SAS=m -CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_SOC_WM8978=m +CONFIG_SND_SIMPLE_SCU_CARD=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y -CONFIG_USB_XHCI_RCAR=m CONFIG_USB_XHCI_TEGRA=m CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=m -CONFIG_USB_EHCI_EXYNOS=y -CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ISP1760=y +CONFIG_USB_EHCI_TEGRA=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y -CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_EXYNOS=m CONFIG_USB_R8A66597_HCD=m CONFIG_USB_RENESAS_USBHS=m @@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y CONFIG_USB_TUSB_OMAP_DMA=y CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y -CONFIG_USB_HSIC_USB3503=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_ISP1760=y +CONFIG_USB_HSIC_USB3503=y CONFIG_AB8500_USB=y -CONFIG_KEYSTONE_USB_PHY=y +CONFIG_KEYSTONE_USB_PHY=m CONFIG_NOP_USB_XCEIV=m CONFIG_AM335X_PHY_USB=m CONFIG_TWL6030_USB=m CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y -CONFIG_USB_MSM_OTG=m CONFIG_USB_MXS_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_FSL_USB2=y @@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_ESDHC_IMX=y CONFIG_MMC_SDHCI_DOVE=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_MMC_SDHCI_S3C=y CONFIG_MMC_SDHCI_PXAV3=y CONFIG_MMC_SDHCI_SPEAR=y -CONFIG_MMC_SDHCI_S3C=y CONFIG_MMC_SDHCI_S3C_DMA=y CONFIG_MMC_SDHCI_BCM_KONA=y +CONFIG_MMC_MESON_MX_SDIO=y CONFIG_MMC_SDHCI_ST=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_MMC_ATMELMCI=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_MESON_MX_SDIO=y CONFIG_MMC_MVSDIO=y CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y -CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y CONFIG_MMC_SH_MMCIF=y @@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y CONFIG_RTC_DRV_RK808=m CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_BQ32K=m -CONFIG_RTC_DRV_PALMAS=y -CONFIG_RTC_DRV_ST_LPC=y CONFIG_RTC_DRV_TWL4030=y +CONFIG_RTC_DRV_PALMAS=y CONFIG_RTC_DRV_TPS6586X=y CONFIG_RTC_DRV_TPS65910=y CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_EM3027=y +CONFIG_RTC_DRV_S5M=m CONFIG_RTC_DRV_DA9063=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_DIGICOLOR=m -CONFIG_RTC_DRV_S5M=m CONFIG_RTC_DRV_S3C=m CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_AT91RM9200=m CONFIG_RTC_DRV_AT91SAM9=m CONFIG_RTC_DRV_VT8500=y -CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_SUNXI=y CONFIG_RTC_DRV_MV=y CONFIG_RTC_DRV_TEGRA=y +CONFIG_RTC_DRV_ST_LPC=y CONFIG_RTC_DRV_CPCAP=m CONFIG_DMADEVICES=y -CONFIG_DW_DMAC=y CONFIG_AT_HDMAC=y CONFIG_AT_XDMAC=y +CONFIG_DMA_BCM2835=y +CONFIG_DMA_SUN6I=y CONFIG_FSL_EDMA=y +CONFIG_IMX_DMA=y +CONFIG_IMX_SDMA=y CONFIG_MV_XOR=y +CONFIG_MXS_DMA=y +CONFIG_PL330_DMA=y +CONFIG_SIRF_DMA=y +CONFIG_STE_DMA40=y +CONFIG_ST_FDMA=m CONFIG_TEGRA20_APB_DMA=y +CONFIG_XILINX_DMA=y +CONFIG_QCOM_BAM_DMA=y +CONFIG_DW_DMAC=y CONFIG_SH_DMAE=y CONFIG_RCAR_DMAC=y CONFIG_RENESAS_USB_DMAC=m -CONFIG_STE_DMA40=y -CONFIG_SIRF_DMA=y -CONFIG_TI_EDMA=y -CONFIG_PL330_DMA=y -CONFIG_IMX_SDMA=y -CONFIG_IMX_DMA=y -CONFIG_MXS_DMA=y -CONFIG_DMA_BCM2835=y -CONFIG_DMA_OMAP=y -CONFIG_QCOM_BAM_DMA=y -CONFIG_XILINX_DMA=y -CONFIG_DMA_SUN6I=y -CONFIG_ST_FDMA=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_MMIO=y CONFIG_STAGING=y -CONFIG_SENSORS_ISL29018=y -CONFIG_SENSORS_ISL29028=y CONFIG_MFD_NVEC=y CONFIG_KEYBOARD_NVEC=y CONFIG_SERIO_NVEC_PS2=y CONFIG_NVEC_POWER=y CONFIG_NVEC_PAZ00=y -CONFIG_BCMA=y -CONFIG_BCMA_HOST_SOC=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -CONFIG_QCOM_GSBI=y -CONFIG_QCOM_PM=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_QCOM_SMP2P=y -CONFIG_QCOM_SMSM=y -CONFIG_QCOM_WCNSS_CTRL=m -CONFIG_ROCKCHIP_PM_DOMAINS=y -CONFIG_COMMON_CLK_QCOM=y -CONFIG_QCOM_CLK_RPM=y -CONFIG_CHROME_PLATFORMS=y CONFIG_STAGING_BOARD=y -CONFIG_CROS_EC_CHARDEV=m CONFIG_COMMON_CLK_MAX77686=y CONFIG_COMMON_CLK_RK808=m CONFIG_COMMON_CLK_S2MPS11=m +CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_RPM=y CONFIG_APQ_MMCC_8084=y CONFIG_MSM_GCC_8660=y CONFIG_MSM_MMCC_8960=y CONFIG_MSM_MMCC_8974=y -CONFIG_HWSPINLOCK_QCOM=y +CONFIG_BCM2835_MBOX=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_GART=y CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_REMOTEPROC=m CONFIG_ST_REMOTEPROC=m CONFIG_RPMSG_VIRTIO=m +CONFIG_RASPBERRYPI_POWER=y +CONFIG_QCOM_GSBI=y +CONFIG_QCOM_PM=y +CONFIG_QCOM_SMD_RPM=m +CONFIG_QCOM_WCNSS_CTRL=m +CONFIG_ROCKCHIP_PM_DOMAINS=y +CONFIG_ARCH_TEGRA_2x_SOC=y +CONFIG_ARCH_TEGRA_3x_SOC=y +CONFIG_ARCH_TEGRA_114_SOC=y +CONFIG_ARCH_TEGRA_124_SOC=y CONFIG_PM_DEVFREQ=y CONFIG_ARM_TEGRA_DEVFREQ=m -CONFIG_MEMORY=y -CONFIG_EXTCON=y CONFIG_TI_AEMIF=y CONFIG_IIO=y CONFIG_IIO_SW_TRIGGER=y @@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m CONFIG_XILINX_XADC=y CONFIG_MPU3050_I2C=y CONFIG_CM36651=m +CONFIG_SENSORS_ISL29018=y +CONFIG_SENSORS_ISL29028=y CONFIG_AK8975=y -CONFIG_RASPBERRYPI_POWER=y CONFIG_IIO_HRTIMER_TRIGGER=y CONFIG_PWM=y CONFIG_PWM_ATMEL=m CONFIG_PWM_ATMEL_HLCDC_PWM=m CONFIG_PWM_ATMEL_TCB=m +CONFIG_PWM_BCM2835=y +CONFIG_PWM_BRCMSTB=m CONFIG_PWM_FSL_FTM=m CONFIG_PWM_MESON=m CONFIG_PWM_RCAR=m CONFIG_PWM_RENESAS_TPU=y CONFIG_PWM_ROCKCHIP=m CONFIG_PWM_SAMSUNG=m +CONFIG_PWM_STI=y CONFIG_PWM_SUN4I=y CONFIG_PWM_TEGRA=y CONFIG_PWM_VT8500=y +CONFIG_KEYSTONE_IRQ=y +CONFIG_PHY_SUN4I_USB=y +CONFIG_PHY_SUN9I_USB=y CONFIG_PHY_HIX5HD2_SATA=y -CONFIG_E1000E=y -CONFIG_PWM_STI=y -CONFIG_PWM_BCM2835=y -CONFIG_PWM_BRCMSTB=m -CONFIG_PHY_DM816X_USB=m -CONFIG_OMAP_USB2=y -CONFIG_TI_PIPE3=y -CONFIG_TWL4030_USB=m +CONFIG_PHY_BERLIN_SATA=y CONFIG_PHY_BERLIN_USB=y CONFIG_PHY_CPCAP_USB=m -CONFIG_PHY_BERLIN_SATA=y +CONFIG_PHY_QCOM_APQ8064_SATA=m +CONFIG_PHY_RCAR_GEN2=m CONFIG_PHY_ROCKCHIP_DP=m CONFIG_PHY_ROCKCHIP_USB=y -CONFIG_PHY_QCOM_APQ8064_SATA=m +CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_MIPHY28LP=y -CONFIG_PHY_RCAR_GEN2=m CONFIG_PHY_STIH407_USB=y CONFIG_PHY_STM32_USBPHYC=y -CONFIG_PHY_SUN4I_USB=y -CONFIG_PHY_SUN9I_USB=y -CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TEGRA_XUSB=y -CONFIG_PHY_BRCM_SATA=y -CONFIG_NVMEM=y +CONFIG_PHY_DM816X_USB=m +CONFIG_OMAP_USB2=y +CONFIG_TI_PIPE3=y +CONFIG_TWL4030_USB=m CONFIG_NVMEM_IMX_OCOTP=y CONFIG_NVMEM_SUNXI_SID=y CONFIG_NVMEM_VF610_OCOTP=y -CONFIG_BCM2835_MBOX=y CONFIG_RASPBERRYPI_FIRMWARE=y -CONFIG_EFI_VARS=m -CONFIG_EFI_CAPSULE_LOADER=m CONFIG_BCM47XX_NVRAM=y CONFIG_BCM47XX_SPROM=y +CONFIG_EFI_VARS=m +CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EXT4_FS=y CONFIG_AUTOFS4_FS=y CONFIG_MSDOS_FS=y @@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_UBIFS_FS=y -CONFIG_TMPFS=y CONFIG_SQUASHFS=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y @@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_CPUFREQ_DT=y -CONFIG_KEYSTONE_IRQ=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_ST=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m @@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_DEV_MARVELL_CESA=m CONFIG_CRYPTO_DEV_EXYNOS_RNG=m CONFIG_CRYPTO_DEV_S5P=m +CONFIG_CRYPTO_DEV_ATMEL_AES=m +CONFIG_CRYPTO_DEV_ATMEL_TDES=m +CONFIG_CRYPTO_DEV_ATMEL_SHA=m CONFIG_CRYPTO_DEV_SUN4I_SS=m CONFIG_CRYPTO_DEV_ROCKCHIP=m CONFIG_ARM_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM=m CONFIG_CRYPTO_SHA1_ARM_NEON=m CONFIG_CRYPTO_SHA1_ARM_CE=m CONFIG_CRYPTO_SHA2_ARM_CE=m -CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m CONFIG_CRYPTO_AES_ARM_CE=m -CONFIG_CRYPTO_CHACHA20_NEON=m -CONFIG_CRYPTO_CRC32_ARM_CE=m -CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m CONFIG_CRYPTO_GHASH_ARM_CE=m -CONFIG_CRYPTO_DEV_ATMEL_AES=m -CONFIG_CRYPTO_DEV_ATMEL_TDES=m -CONFIG_CRYPTO_DEV_ATMEL_SHA=m -CONFIG_VIDEO_VIVID=m -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_MMIO=y +CONFIG_CRYPTO_CRC32_ARM_CE=m +CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S index 3c1e203e53b9..57caa742016e 100644 --- a/arch/arm/crypto/speck-neon-core.S +++ b/arch/arm/crypto/speck-neon-core.S @@ -272,9 +272,11 @@ * Allocate stack space to store 128 bytes worth of tweaks. For * performance, this space is aligned to a 16-byte boundary so that we * can use the load/store instructions that declare 16-byte alignment. + * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. */ - sub sp, #128 - bic sp, #0xf + sub r12, sp, #128 + bic r12, #0xf + mov sp, r12 .if \n == 64 // Load first tweak diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile index a71f16536b6c..6e41336b0bc4 100644 --- a/arch/arm/firmware/Makefile +++ b/arch/arm/firmware/Makefile @@ -1 +1,4 @@ obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o + +# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc +KCOV_INSTRUMENT := n diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index dd546d65a383..7a9b86978ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) bic r0, r0, #CR_I #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg - isb + instr_sync #elif defined (CONFIG_CPU_V7M) #ifdef CONFIG_ARM_MPU ldreq r3, [r12, MPU_CTRL] diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index c46a728df44e..25aac6ee2ab1 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -20,6 +20,7 @@ config ARCH_BCM_IPROC select GPIOLIB select ARM_AMBA select PINCTRL + select PCI_DOMAINS if PCI help This enables support for systems based on Broadcom IPROC architected SoCs. The IPROC complex contains one or more ARM CPUs along with common diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index e22fb40e34bc..6d5beb11bd96 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = { GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", - GPIO_ACTIVE_LOW), + GPIO_ACTIVE_HIGH), }, }; diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 69df3620eca5..1c73694c871a 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void) static inline void omap5_erratum_workaround_801819(void) { } #endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +/* + * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with + * ICIALLU) to activate the workaround for secondary Core. + * NOTE: it is assumed that the primary core's configuration is done + * by the boot loader (kernel will detect a misconfiguration and complain + * if this is not done). + * + * In General Purpose(GP) devices, ACR bit settings can only be done + * by ROM code in "secure world" using the smc call and there is no + * option to update the "firmware" on such devices. This also works for + * High security(HS) devices, as a backup option in case the + * "update" is not done in the "security firmware". + */ +static void omap5_secondary_harden_predictor(void) +{ + u32 acr, acr_mask; + + asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); + + /* + * ACTLR[0] (Enable invalidates of BTB with ICIALLU) + */ + acr_mask = BIT(0); + + /* Do we already have it done.. if yes, skip expensive smc */ + if ((acr & acr_mask) == acr_mask) + return; + + acr |= acr_mask; + omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); + + pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", + __func__, smp_processor_id()); +} +#else +static inline void omap5_secondary_harden_predictor(void) { } +#endif + static void omap4_secondary_init(unsigned int cpu) { /* @@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu) set_cntfreq(); /* Configure ACR to disable streaming WA for 801819 */ omap5_erratum_workaround_801819(); + /* Enable ACR to allow for ICUALLU workaround */ + omap5_secondary_harden_predictor(); } /* diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 9c10248fadcc..4e8c2116808e 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -185,7 +185,7 @@ static int pxa_irq_suspend(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); saved_icmr[i] = __raw_readl(base + ICMR); @@ -204,7 +204,7 @@ static void pxa_irq_resume(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); __raw_writel(saved_icmr[i], base + ICMR); diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index d0f62eacf59d..4adb901dd5eb 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA select HAVE_ARM_SCU select HAVE_ARM_TWD if SMP select MFD_SYSCON + select PCI_DOMAINS if PCI if ARCH_SOCFPGA config SOCFPGA_SUSPEND diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c186474422f3..0cc8e04295a4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused) return 0; } +static int kernel_set_to_readonly __read_mostly; + void mark_rodata_ro(void) { + kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } void set_kernel_text_rw(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, current->active_mm); } void set_kernel_text_ro(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, current->active_mm); } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6e8b71613039..f6a62ae44a65 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* there are 2 passes here */ bpf_jit_dump(prog->len, image_size, 2, ctx.target); - set_memory_ro((unsigned long)header, header->pages); + bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.target; prog->jited = 1; prog->jited_len = image_size; diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 45272266dafb..e7101b19d590 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,7 +10,7 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=-p --no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 @@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LD += -EB -LDFLAGS += -maarch64linuxb +# We must use the linux target here, since distributions don't tend to package +# the ELF linker scripts with binutils, and this results in a build failure. +LDFLAGS += -EB -maarch64linuxb UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LD += -EL -LDFLAGS += -maarch64linux +LDFLAGS += -EL -maarch64linux # See comment above UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index e6b059378dc0..67dac595dc72 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -309,8 +309,7 @@ interrupts = <0 99 4>; resets = <&rst SPIM0_RESET>; reg-io-width = <4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; status = "disabled"; }; @@ -322,8 +321,7 @@ interrupts = <0 100 4>; resets = <&rst SPIM1_RESET>; reg-io-width = <4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts index 4b3331fbfe39..dff9b15eb3c0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts +++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts @@ -66,9 +66,22 @@ ðmac { status = "okay"; - phy-mode = "rgmii"; pinctrl-0 = <ð_rgmii_y_pins>; pinctrl-names = "default"; + phy-handle = <ð_phy0>; + phy-mode = "rgmii"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + eth_phy0: ethernet-phy@0 { + /* Realtek RTL8211F (0x001cc916) */ + reg = <0>; + eee-broken-1000t; + }; + }; }; &uart_A { diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index fee87737a201..67d7115e4eff 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -132,7 +132,7 @@ sd_emmc_b: sd@5000 { compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x5000 0x0 0x2000>; + reg = <0x0 0x5000 0x0 0x800>; interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, @@ -144,7 +144,7 @@ sd_emmc_c: mmc@7000 { compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x7000 0x0 0x2000>; + reg = <0x0 0x7000 0x0 0x800>; interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 3c31e21cbed7..b8dc4dbb391b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -35,6 +35,12 @@ no-map; }; + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved_alt: secmon@5000000 { + reg = <0x0 0x05000000 0x0 0x300000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; @@ -457,21 +463,21 @@ sd_emmc_a: mmc@70000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x70000 0x0 0x2000>; + reg = <0x0 0x70000 0x0 0x800>; interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>; status = "disabled"; }; sd_emmc_b: mmc@72000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x72000 0x0 0x2000>; + reg = <0x0 0x72000 0x0 0x800>; interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>; status = "disabled"; }; sd_emmc_c: mmc@74000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x74000 0x0 0x2000>; + reg = <0x0 0x74000 0x0 0x800>; interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi index eb327664a4d8..6aaafff674f9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi @@ -6,7 +6,7 @@ &apb { mali: gpu@c0000 { - compatible = "amlogic,meson-gxbb-mali", "arm,mali-450"; + compatible = "amlogic,meson-gxl-mali", "arm,mali-450"; reg = <0x0 0xc0000 0x0 0x40000>; interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 3e3eb31748a3..f63bceb88caa 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -234,9 +234,6 @@ bus-width = <4>; cap-sd-highspeed; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index 0cfd701809de..a1b31013ab6e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -189,3 +189,10 @@ &usb0 { status = "okay"; }; + +&usb2_phy0 { + /* + * HDMI_5V is also used as supply for the USB VBUS. + */ + phy-supply = <&hdmi_5v>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 27538eea547b..c87a80e9bcc6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -13,14 +13,6 @@ / { compatible = "amlogic,meson-gxl"; - reserved-memory { - /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ - secmon_reserved_alt: secmon@5000000 { - reg = <0x0 0x05000000 0x0 0x300000>; - no-map; - }; - }; - soc { usb0: usb@c9000000 { status = "disabled"; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 4a2a6af8e752..4057197048dc 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -118,7 +118,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -149,7 +149,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <4>; @@ -566,7 +566,7 @@ reg = <0x66080000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; @@ -594,7 +594,7 @@ reg = <0x660b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts index eb6f08cdbd79..77efa28c4dd5 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts @@ -43,6 +43,10 @@ enet-phy-lane-swap; }; +&sdio0 { + mmc-ddr-1_8v; +}; + &uart2 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts index 5084b037320f..55ba495ef56e 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts @@ -42,3 +42,7 @@ &gphy0 { enet-phy-lane-swap; }; + +&sdio0 { + mmc-ddr-1_8v; +}; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index 99aaff0b6d72..b203152ad67c 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -409,7 +409,7 @@ reg = <0x000b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; @@ -453,7 +453,7 @@ reg = <0x000e0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index c6999624ed8a..68c5a6c819ae 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -585,6 +585,8 @@ vmmc-supply = <&wlan_en>; ti,non-removable; non-removable; + cap-power-off-card; + keep-power-in-suspend; #address-cells = <0x1>; #size-cells = <0x0>; status = "ok"; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index edb4ee0b8896..7f12624f6c8e 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -322,6 +322,8 @@ dwmmc_2: dwmmc2@f723f000 { bus-width = <0x4>; non-removable; + cap-power-off-card; + keep-power-in-suspend; vmmc-supply = <®_vdd_3v3>; mmc-pwrseq = <&wl1835_pwrseq>; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi index 7dabe25f6774..1c6ff8197a88 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi @@ -149,7 +149,7 @@ CP110_LABEL(icu): interrupt-controller@1e0000 { compatible = "marvell,cp110-icu"; - reg = <0x1e0000 0x10>; + reg = <0x1e0000 0x440>; #interrupt-cells = <3>; interrupt-controller; msi-parent = <&gicp>; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 0f829db33efe..4d5ef01f43a3 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -75,7 +75,7 @@ serial@75b1000 { label = "LS-UART0"; - status = "okay"; + status = "disabled"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp2_uart2_4pins_default>; pinctrl-1 = <&blsp2_uart2_4pins_sleep>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 650f356f69ca..c2625d15a8c0 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1191,14 +1191,14 @@ port@0 { reg = <0>; - etf_out: endpoint { + etf_in: endpoint { slave-mode; remote-endpoint = <&funnel0_out>; }; }; port@1 { reg = <0>; - etf_in: endpoint { + etf_out: endpoint { remote-endpoint = <&replicator_in>; }; }; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts index 9b4dc41703e3..ae3b5adf32df 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts @@ -54,7 +54,7 @@ sound { compatible = "audio-graph-card"; label = "UniPhier LD11"; - widgets = "Headphone", "Headphone Jack"; + widgets = "Headphone", "Headphones"; dais = <&i2s_port2 &i2s_port3 &i2s_port4 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts index fe6608ea3277..7919233c9ce2 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts @@ -54,7 +54,7 @@ sound { compatible = "audio-graph-card"; label = "UniPhier LD20"; - widgets = "Headphone", "Headphone Jack"; + widgets = "Headphone", "Headphones"; dais = <&i2s_port2 &i2s_port3 &i2s_port4 diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 3cfa8ca26738..f9a186f6af8a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SEATTLE=y +CONFIG_ARCH_SYNQUACER=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_R8A7795=y CONFIG_ARCH_R8A7796=y @@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y CONFIG_ARCH_STRATIX10=y CONFIG_ARCH_TEGRA=y CONFIG_ARCH_SPRD=y -CONFIG_ARCH_SYNQUACER=y CONFIG_ARCH_THUNDER=y CONFIG_ARCH_THUNDER2=y CONFIG_ARCH_UNIPHIER=y @@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y CONFIG_ARCH_ZX=y CONFIG_ARCH_ZYNQMP=y CONFIG_PCI=y -CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCI_IOV=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_PCI_LAYERSCAPE=y -CONFIG_PCI_HISI=y -CONFIG_PCIE_QCOM=y -CONFIG_PCIE_KIRIN=y -CONFIG_PCIE_ARMADA_8K=y -CONFIG_PCIE_HISI_STB=y CONFIG_PCI_AARDVARK=y CONFIG_PCI_TEGRA=y CONFIG_PCIE_RCAR=y -CONFIG_PCIE_ROCKCHIP=y -CONFIG_PCIE_ROCKCHIP_HOST=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_PCI_HOST_THUNDER_PEM=y CONFIG_PCI_HOST_THUNDER_ECAM=y +CONFIG_PCIE_ROCKCHIP_HOST=m +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_HISI=y +CONFIG_PCIE_QCOM=y +CONFIG_PCIE_ARMADA_8K=y +CONFIG_PCIE_KIRIN=y +CONFIG_PCIE_HISI_STB=y CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NUMA=y @@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_CPUFREQ_DT=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ARM_ARMADA_37XX_CPUFREQ=y CONFIG_ARM_BIG_LITTLE_CPUFREQ=y CONFIG_ARM_SCPI_CPUFREQ=y CONFIG_ARM_TEGRA186_CPUFREQ=y -CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -236,11 +232,6 @@ CONFIG_SMSC911X=y CONFIG_SNI_AVE=y CONFIG_SNI_NETSEC=y CONFIG_STMMAC_ETH=m -CONFIG_DWMAC_IPQ806X=m -CONFIG_DWMAC_MESON=m -CONFIG_DWMAC_ROCKCHIP=m -CONFIG_DWMAC_SUNXI=m -CONFIG_DWMAC_SUN8I=m CONFIG_MDIO_BUS_MUX_MMIOREG=y CONFIG_AT803X_PHY=m CONFIG_MARVELL_PHY=m @@ -269,8 +260,8 @@ CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_ADC=m -CONFIG_KEYBOARD_CROS_EC=y CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYBOARD_CROS_EC=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=m CONFIG_INPUT_MISC=y @@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=11 -CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_DEV_BUS=y -CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=y -CONFIG_I2C_HID=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA954x=y @@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y CONFIG_I2C_CROS_EC_TUNNEL=y CONFIG_SPI=y CONFIG_SPI_ARMADA_3700=y -CONFIG_SPI_MESON_SPICC=m -CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_MESON_SPICC=m +CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y -CONFIG_SPI_QUP=y CONFIG_SPI_ROCKCHIP=y +CONFIG_SPI_QUP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y -CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y +CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_MSM8994=y CONFIG_PINCTRL_MSM8996=y -CONFIG_PINCTRL_MT7622=y CONFIG_PINCTRL_QDF2XXX=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_MT7622=y CONFIG_GPIO_DWAPB=y CONFIG_GPIO_MB86S7X=y CONFIG_GPIO_PL061=y @@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y +CONFIG_ROCKCHIP_THERMAL=m +CONFIG_RCAR_GEN3_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_BRCMSTB_THERMAL=m CONFIG_EXYNOS_THERMAL=y -CONFIG_RCAR_GEN3_THERMAL=y -CONFIG_QCOM_TSENS=y -CONFIG_ROCKCHIP_THERMAL=m CONFIG_TEGRA_BPMP_THERMAL=m +CONFIG_QCOM_TSENS=y CONFIG_UNIPHIER_THERMAL=y CONFIG_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y @@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y @@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y +CONFIG_RC_CORE=m +CONFIG_RC_DECODERS=y +CONFIG_RC_DEVICES=y +CONFIG_IR_MESON=m CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_ANALOG_TV_SUPPORT=y CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y -CONFIG_MEDIA_RC_SUPPORT=y -CONFIG_RC_CORE=m -CONFIG_RC_DEVICES=y -CONFIG_RC_DECODERS=y -CONFIG_IR_MESON=m CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set CONFIG_V4L_MEM2MEM_DRIVERS=y @@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y CONFIG_ROCKCHIP_DW_MIPI_DSI=y CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_DRM_RCAR_DU=m -CONFIG_DRM_RCAR_LVDS=y -CONFIG_DRM_RCAR_VSP=y +CONFIG_DRM_RCAR_LVDS=m CONFIG_DRM_TEGRA=m CONFIG_DRM_PANEL_SIMPLE=m CONFIG_DRM_I2C_ADV7511=m @@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_AK4613=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m +CONFIG_I2C_HID=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y @@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_F_SDH30=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_MMC_SDHCI_F_SDH30=y CONFIG_MMC_MESON_GX=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_SPI=y @@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_PWM=y CONFIG_LEDS_SYSCON=y +CONFIG_LEDS_TRIGGER_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_LEDS_TRIGGER_PANIC=y -CONFIG_LEDS_TRIGGER_DISK=y CONFIG_EDAC=y CONFIG_EDAC_GHES=y CONFIG_RTC_CLASS=y @@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m CONFIG_RTC_DRV_S5M=y CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_CROS_EC=y CONFIG_RTC_DRV_S3C=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_ARMADA38X=y CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_XGENE=y -CONFIG_RTC_DRV_CROS_EC=y CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=m CONFIG_K3_DMA=y @@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_MHU=y CONFIG_PLATFORM_MHU=y CONFIG_BCM2835_MBOX=y -CONFIG_HI6220_MBOX=y CONFIG_QCOM_APCS_IPC=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_SMMU=y @@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y CONFIG_MEMORY=y -CONFIG_TEGRA_MC=y CONFIG_IIO=y CONFIG_EXYNOS_ADC=y CONFIG_ROCKCHIP_SARADC=m @@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y CONFIG_PWM_TEGRA=m +CONFIG_PHY_XGENE=y +CONFIG_PHY_SUN4I_USB=y +CONFIG_PHY_HI6220_USB=y CONFIG_PHY_HISTB_COMBPHY=y CONFIG_PHY_HISI_INNO_USB2=y -CONFIG_PHY_RCAR_GEN3_USB2=y -CONFIG_PHY_RCAR_GEN3_USB3=m -CONFIG_PHY_HI6220_USB=y -CONFIG_PHY_QCOM_USB_HS=y -CONFIG_PHY_SUN4I_USB=y CONFIG_PHY_MVEBU_CP110_COMPHY=y CONFIG_PHY_QCOM_QMP=m -CONFIG_PHY_ROCKCHIP_INNO_USB2=y +CONFIG_PHY_QCOM_USB_HS=y +CONFIG_PHY_RCAR_GEN3_USB2=y +CONFIG_PHY_RCAR_GEN3_USB3=m CONFIG_PHY_ROCKCHIP_EMMC=y +CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_PCIE=m CONFIG_PHY_ROCKCHIP_TYPEC=y -CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y -CONFIG_MESON_EFUSE=m CONFIG_QCOM_QFPROM=y CONFIG_ROCKCHIP_EFUSE=y CONFIG_UNIPHIER_EFUSE=y +CONFIG_MESON_EFUSE=m CONFIG_TEE=y CONFIG_OPTEE=y CONFIG_ARM_SCPI_PROTOCOL=y @@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y CONFIG_ACPI=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_ACPI_APEI_EINJ=y CONFIG_EXT2_FS=y @@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set @@ -691,20 +672,15 @@ CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m -CONFIG_CRYPTO_SHA512_ARM64=m CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_SHA512_ARM64_CE=m +CONFIG_CRYPTO_SHA3_ARM64=m +CONFIG_CRYPTO_SM3_ARM64_CE=m CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_CRC32_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=m -CONFIG_CRYPTO_AES_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_AES_ARM64_BS=m -CONFIG_CRYPTO_SHA512_ARM64_CE=m -CONFIG_CRYPTO_SHA3_ARM64=m -CONFIG_CRYPTO_SM3_ARM64_CE=m diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index a91933b1e2e6..4b650ec1d7dd 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); -void apply_alternatives(void *start, size_t length); + +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length); +#else +static inline void apply_alternatives_module(void *start, size_t length) { } +#endif #define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9f82d6b53851..1bdeca8918a6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) { + if (pte_valid_not_user(pte)) dsb(ishst); - isb(); - } } extern void __sync_icache_dcache(pte_t pteval); @@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { WRITE_ONCE(*pmdp, pmd); dsb(ishst); - isb(); } static inline void pmd_clear(pmd_t *pmdp) @@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud) { WRITE_ONCE(*pudp, pud); dsb(ishst); - isb(); } static inline void pud_clear(pud_t *pudp) diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 5c4bce4ac381..36fb069fd049 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt, } } -static void __apply_alternatives(void *alt_region, bool use_linear_alias) +/* + * We provide our own, private D-cache cleaning function so that we don't + * accidentally call into the cache.S code, which is patched by us at + * runtime. + */ +static void clean_dcache_range_nopatch(u64 start, u64 end) +{ + u64 cur, d_size, ctr_el0; + + ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); + d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0, + CTR_DMINLINE_SHIFT); + cur = start & ~(d_size - 1); + do { + /* + * We must clean+invalidate to the PoC in order to avoid + * Cortex-A53 errata 826319, 827319, 824069 and 819472 + * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE) + */ + asm volatile("dc civac, %0" : : "r" (cur) : "memory"); + } while (cur += d_size, cur < end); +} + +static void __apply_alternatives(void *alt_region, bool is_module) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) pr_info_once("patching kernel code\n"); origptr = ALT_ORIG_PTR(alt); - updptr = use_linear_alias ? lm_alias(origptr) : origptr; + updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; if (alt->cpufeature < ARM64_CB_PATCH) @@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) alt_cb(alt, origptr, updptr, nr_inst); - flush_icache_range((uintptr_t)origptr, - (uintptr_t)(origptr + nr_inst)); + if (!is_module) { + clean_dcache_range_nopatch((u64)origptr, + (u64)(origptr + nr_inst)); + } + } + + /* + * The core module code takes care of cache maintenance in + * flush_module_icache(). + */ + if (!is_module) { + dsb(ish); + __flush_icache_all(); + isb(); } } @@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused) isb(); } else { BUG_ON(alternatives_applied); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, false); /* Barriers provided by the cache flushing */ WRITE_ONCE(alternatives_applied, 1); } @@ -192,12 +227,14 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } -void apply_alternatives(void *start, size_t length) +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length) { struct alt_region region = { .begin = start, .end = start + length, }; - __apply_alternatives(®ion, false); + __apply_alternatives(®ion, true); } +#endif diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 155fd91e78f4..f0f27aeefb73 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr, const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { - if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { - apply_alternatives((void *)s->sh_addr, s->sh_size); - } + if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) + apply_alternatives_module((void *)s->sh_addr, s->sh_size); #ifdef CONFIG_ARM64_MODULE_PLTS if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 3b38c717008a..46bff1661836 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t DPRINT(("smpl_buf @%p\n", smpl_buf)); /* allocate vma */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(mm); if (!vma) { DPRINT(("Cannot allocate vma\n")); goto error_kmem; } - INIT_LIST_HEAD(&vma->anon_vma_chain); /* * partially initialize the vma for the sampling buffer */ - vma->vm_mm = mm; vma->vm_file = get_file(filp); vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ @@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t return 0; error: - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); error_kmem: pfm_rvfree(smpl_buf, size); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 18278b448530..bdb14a369137 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -114,10 +114,8 @@ ia64_init_addr_space (void) * the problem. When the process attempts to write to the register backing store * for the first time, it will get a SEGFAULT in this case. */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(current->mm); if (vma) { - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; @@ -125,7 +123,7 @@ ia64_init_addr_space (void) down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return; } up_write(¤t->mm->mmap_sem); @@ -133,10 +131,8 @@ ia64_init_addr_space (void) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(current->mm); if (vma) { - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = current->mm; vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | @@ -144,7 +140,7 @@ ia64_init_addr_space (void) down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return; } up_write(¤t->mm->mmap_sem); diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 8b707c249026..12fe700632f4 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, unsigned long address) { + pgtable_page_dtor(page); __free_page(page); } @@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, return page; } -extern inline void pte_free(struct mm_struct *mm, struct page *page) +static inline void pte_free(struct mm_struct *mm, struct page *page) { + pgtable_page_dtor(page); __free_page(page); } diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug index 331a3bb66297..93a737c8d1a6 100644 --- a/arch/microblaze/Kconfig.debug +++ b/arch/microblaze/Kconfig.debug @@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config HEART_BEAT - bool "Heart beat function for kernel" - default n - help - This option turns on/off heart beat kernel functionality. - First GPIO node is taken. - endmenu diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index d5384f6f36f7..ce9b7b786156 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE]; extern char *klimit; -void microblaze_heartbeat(void); -void microblaze_setup_heartbeat(void); - # ifdef CONFIG_MMU extern void mmu_reset(void); # endif /* CONFIG_MMU */ -extern void of_platform_reset_gpio_probe(void); - void time_init(void); void init_IRQ(void); void machine_early_init(const char *cmdline, unsigned int ram, diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 9774e1d9507b..a62d09420a47 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,6 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 399 +#define __NR_syscalls 401 #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index eb156f914793..7a9f16a76413 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -415,5 +415,7 @@ #define __NR_pkey_alloc 396 #define __NR_pkey_free 397 #define __NR_statx 398 +#define __NR_io_pgetevents 399 +#define __NR_rseq 400 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index 7e99cf6984a1..dd71637437f4 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_timer.o = -pg CFLAGS_REMOVE_intc.o = -pg CFLAGS_REMOVE_early_printk.o = -pg -CFLAGS_REMOVE_heartbeat.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_process.o = -pg endif @@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds obj-y += dma.o exceptions.o \ hw_exception_handler.o irq.o \ - platform.o process.o prom.o ptrace.o \ + process.o prom.o ptrace.o \ reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o obj-y += cpu/ -obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MMU) += misc.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c deleted file mode 100644 index 2022130139d2..000000000000 --- a/arch/microblaze/kernel/heartbeat.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> - * Copyright (C) 2007-2009 PetaLogix - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/sched.h> -#include <linux/sched/loadavg.h> -#include <linux/io.h> - -#include <asm/setup.h> -#include <asm/page.h> -#include <asm/prom.h> - -static unsigned int base_addr; - -void microblaze_heartbeat(void) -{ - static unsigned int cnt, period, dist; - - if (base_addr) { - if (cnt == 0 || cnt == dist) - out_be32(base_addr, 1); - else if (cnt == 7 || cnt == dist + 7) - out_be32(base_addr, 0); - - if (++cnt > period) { - cnt = 0; - /* - * The hyperbolic function below modifies the heartbeat - * period length in dependency of the current (5min) - * load. It goes through the points f(0)=126, f(1)=86, - * f(5)=51, f(inf)->30. - */ - period = ((672 << FSHIFT) / (5 * avenrun[0] + - (7 << FSHIFT))) + 30; - dist = period / 4; - } - } -} - -void microblaze_setup_heartbeat(void) -{ - struct device_node *gpio = NULL; - int *prop; - int j; - const char * const gpio_list[] = { - "xlnx,xps-gpio-1.00.a", - NULL - }; - - for (j = 0; gpio_list[j] != NULL; j++) { - gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]); - if (gpio) - break; - } - - if (gpio) { - base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL)); - base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); - pr_notice("Heartbeat GPIO at 0x%x\n", base_addr); - - /* GPIO is configured as output */ - prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); - if (prop) - out_be32(base_addr + 4, 0); - } -} diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c deleted file mode 100644 index 2540d60610d9..000000000000 --- a/arch/microblaze/kernel/platform.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2008 Michal Simek <monstr@monstr.eu> - * - * based on virtex.c file - * - * Copyright 2007 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/init.h> -#include <linux/of_platform.h> -#include <asm/setup.h> - -static struct of_device_id xilinx_of_bus_ids[] __initdata = { - { .compatible = "simple-bus", }, - { .compatible = "xlnx,compound", }, - {} -}; - -static int __init microblaze_device_probe(void) -{ - of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL); - of_platform_reset_gpio_probe(); - return 0; -} -device_initcall(microblaze_device_probe); diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c index bab4c8330ef4..fcbe1daf6316 100644 --- a/arch/microblaze/kernel/reset.c +++ b/arch/microblaze/kernel/reset.c @@ -18,7 +18,7 @@ static int handle; /* reset pin handle */ static unsigned int reset_val; -void of_platform_reset_gpio_probe(void) +static int of_platform_reset_gpio_probe(void) { int ret; handle = of_get_named_gpio(of_find_node_by_path("/"), @@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void) if (!gpio_is_valid(handle)) { pr_info("Skipping unavailable RESET gpio %d (%s)\n", handle, "reset"); - return; + return -ENODEV; } ret = gpio_request(handle, "reset"); if (ret < 0) { pr_info("GPIO pin is already allocated\n"); - return; + return ret; } /* get current setup value */ @@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void) pr_info("RESET: Registered gpio device: %d, current val: %d\n", handle, reset_val); - return; + return 0; err: gpio_free(handle); - return; + return ret; } +device_initcall(of_platform_reset_gpio_probe); static void gpio_system_reset(void) diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 56bcf313121f..6ab650593792 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -400,3 +400,5 @@ ENTRY(sys_call_table) .long sys_pkey_alloc .long sys_pkey_free .long sys_statx + .long sys_io_pgetevents + .long sys_rseq diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 7de941cbbd94..a6683484b3a1 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -156,9 +156,6 @@ static inline void timer_ack(void) static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_xilinx_timer; -#ifdef CONFIG_HEART_BEAT - microblaze_heartbeat(); -#endif timer_ack(); evt->event_handler(evt); return IRQ_HANDLED; @@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer) return ret; } -#ifdef CONFIG_HEART_BEAT - microblaze_setup_heartbeat(); -#endif - ret = xilinx_clocksource_init(); if (ret) return ret; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8d85046adcc8..9670e70139fd 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/kallsyms.h> #include <linux/random.h> #include <linux/prctl.h> +#include <linux/nmi.h> #include <asm/asm.h> #include <asm/bootinfo.h> @@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } -static void arch_dump_stack(void *info) +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info) { - struct pt_regs *regs; + nmi_cpu_backtrace(get_irq_regs()); + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} - regs = get_irq_regs(); +static void raise_backtrace(cpumask_t *mask) +{ + call_single_data_t *csd; + int cpu; - if (regs) - show_regs(regs); + for_each_cpu(cpu, mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } - dump_stack(); + csd = &per_cpu(backtrace_csd, cpu); + csd->func = handle_backtrace; + smp_call_function_single_async(cpu, csd); + } } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - long this_cpu = get_cpu(); - - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) - dump_stack(); - - smp_call_function_many(mask, arch_dump_stack, NULL, 1); - - put_cpu(); + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 00f2535d2226..0a9cfe7a0372 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -801,7 +801,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->regs[0] = 0; /* Don't deal with this again. */ } - rseq_signal_deliver(regs); + rseq_signal_deliver(ksig, regs); if (sig_uses_siginfo(&ksig->ka, abi)) ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, @@ -870,7 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - rseq_handle_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } user_enter(); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d67fa74622ee..8d505a21396e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs) void show_regs(struct pt_regs *regs) { __show_regs((struct pt_regs *)regs); + dump_stack(); } void show_registers(struct pt_regs *regs) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1986e09fb457..1601d90b087b 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -9,6 +9,7 @@ #include <linux/export.h> #include <asm/addrspace.h> #include <asm/byteorder.h> +#include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, return error; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; i++) { + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + } + + return 0; +} + /* * Generic mapping function (not visible outside): */ @@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { + unsigned long offset, pfn, last_pfn; struct vm_struct * area; - unsigned long offset; phys_addr_t last_addr; void * addr; @@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long return (void __iomem *) CKSEG1ADDR(phys_addr); /* - * Don't allow anybody to remap normal RAM that we're using.. + * Don't allow anybody to remap RAM that may be allocated by the page + * allocator, since that could lead to races & data clobbering. */ - if (phys_addr < virt_to_phys(high_memory)) { - char *t_addr, *t_end; - struct page *page; - - t_addr = __va(phys_addr); - t_end = t_addr + (size - 1); - - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) - if(!PageReserved(page)) - return NULL; + pfn = PFN_DOWN(phys_addr); + last_pfn = PFN_DOWN(last_addr); + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); + return NULL; } /* diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 6aed974276d8..34f7222c5efe 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -12,17 +12,17 @@ config NDS32 select CLONE_BACKWARDS select COMMON_CLK select DMA_NONCOHERENT_OPS - select GENERIC_ASHLDI3 - select GENERIC_ASHRDI3 - select GENERIC_LSHRDI3 - select GENERIC_CMPDI2 - select GENERIC_MULDI3 - select GENERIC_UCMPDI2 select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS select GENERIC_IRQ_CHIP select GENERIC_IRQ_SHOW + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_MULDI3 + select GENERIC_LIB_UCMPDI2 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 513bb2e9baf9..031c676821ff 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN KBUILD_CFLAGS += $(call cc-option, -EL) KBUILD_AFLAGS += $(call cc-option, -EL) LDFLAGS += $(call cc-option, -EL) +CHECKFLAGS += -D__NDS32_EL__ else KBUILD_CFLAGS += $(call cc-option, -EB) KBUILD_AFLAGS += $(call cc-option, -EB) LDFLAGS += $(call cc-option, -EB) +CHECKFLAGS += -D__NDS32_EB__ endif boot := arch/nds32/boot diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 10b48f0d8e85..8b26198d51bb 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -8,6 +8,8 @@ #define PG_dcache_dirty PG_arch_1 +void flush_icache_range(unsigned long start, unsigned long end); +void flush_icache_page(struct vm_area_struct *vma, struct page *page); #ifdef CONFIG_CPU_CACHE_ALIASING void flush_cache_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm); @@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma, void flush_kernel_dcache_page(struct page *page); void flush_kernel_vmap_range(void *addr, int size); void invalidate_kernel_vmap_range(void *addr, int size); -void flush_icache_range(unsigned long start, unsigned long end); -void flush_icache_page(struct vm_area_struct *vma, struct page *page); #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #else #include <asm-generic/cacheflush.h> +#undef flush_icache_range +#undef flush_icache_page +#undef flush_icache_user_range +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); #endif #endif /* __NDS32_CACHEFLUSH_H__ */ diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h index eab5e84bd991..cb6cb91cfdf8 100644 --- a/arch/nds32/include/asm/futex.h +++ b/arch/nds32/include/asm/futex.h @@ -16,7 +16,7 @@ " .popsection\n" \ " .pushsection .fixup,\"ax\"\n" \ "4: move %0, " err_reg "\n" \ - " j 3b\n" \ + " b 3b\n" \ " .popsection" #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index 2f5b2ccebe47..63a1a5ef5219 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -278,7 +278,8 @@ static void __init setup_memory(void) void __init setup_arch(char **cmdline_p) { - early_init_devtree( __dtb_start); + early_init_devtree(__atags_pointer ? \ + phys_to_virt(__atags_pointer) : __dtb_start); setup_cpuinfo(); diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index ce8fd34497bf..254703653b6f 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c @@ -13,7 +13,39 @@ extern struct cache_info L1_cache_info[2]; -#ifndef CONFIG_CPU_CACHE_ALIASING +void flush_icache_range(unsigned long start, unsigned long end) +{ + unsigned long line_size, flags; + line_size = L1_cache_info[DCACHE].line_size; + start = start & ~(line_size - 1); + end = (end + line_size - 1) & ~(line_size - 1); + local_irq_save(flags); + cpu_cache_wbinval_range(start, end, 1); + local_irq_restore(flags); +} +EXPORT_SYMBOL(flush_icache_range); + +void flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + unsigned long flags; + unsigned long kaddr; + local_irq_save(flags); + kaddr = (unsigned long)kmap_atomic(page); + cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); + kunmap_atomic((void *)kaddr); + local_irq_restore(flags); +} +EXPORT_SYMBOL(flush_icache_page); + +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + unsigned long kaddr; + kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK); + flush_icache_range(kaddr, kaddr + len); + kunmap_atomic((void *)kaddr); +} + void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t * pte) { @@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || (vma->vm_flags & VM_EXEC)) { - - if (!PageHighMem(page)) { - cpu_cache_wbinval_page((unsigned long) - page_address(page), - vma->vm_flags & VM_EXEC); - } else { - unsigned long kaddr = (unsigned long)kmap_atomic(page); - cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); - kunmap_atomic((void *)kaddr); - } + unsigned long kaddr; + local_irq_save(flags); + kaddr = (unsigned long)kmap_atomic(page); + cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); + kunmap_atomic((void *)kaddr); + local_irq_restore(flags); } } -#else +#ifdef CONFIG_CPU_CACHE_ALIASING extern pte_t va_present(struct mm_struct *mm, unsigned long addr); static inline unsigned long aliasing(unsigned long addr, unsigned long page) @@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size) local_irq_restore(flags); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); - -void flush_icache_range(unsigned long start, unsigned long end) -{ - unsigned long line_size, flags; - line_size = L1_cache_info[DCACHE].line_size; - start = start & ~(line_size - 1); - end = (end + line_size - 1) & ~(line_size - 1); - local_irq_save(flags); - cpu_cache_wbinval_range(start, end, 1); - local_irq_restore(flags); -} -EXPORT_SYMBOL(flush_icache_range); - -void flush_icache_page(struct vm_area_struct *vma, struct page *page) -{ - unsigned long flags; - local_irq_save(flags); - cpu_cache_wbinval_page((unsigned long)page_address(page), - vma->vm_flags & VM_EXEC); - local_irq_restore(flags); -} - -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, - pte_t * pte) -{ - struct page *page; - unsigned long flags; - unsigned long pfn = pte_pfn(*pte); - - if (!pfn_valid(pfn)) - return; - - if (vma->vm_mm == current->active_mm) { - local_irq_save(flags); - __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN); - __nds32__tlbop_rwr(*pte); - __nds32__isb(); - local_irq_restore(flags); - } - - page = pfn_to_page(pfn); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags) || - (vma->vm_flags & VM_EXEC)) { - local_irq_save(flags); - cpu_dcache_wbinval_page((unsigned long)page_address(page)); - local_irq_restore(flags); - } -} #endif diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 3e1a46615120..8999b9226512 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) __free_page(pte); } +#define __pte_free_tlb(tlb, pte, addr) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), (pte)); \ +} while (0) -#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) #define pmd_pgtable(pmd) pmd_page(pmd) #define check_pgt_cache() do { } while (0) diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 690d55272ba6..0c826ad6e994 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler) l.addi r3,r1,0 // pt_regs /* r4 set be EXCEPTION_HANDLE */ // effective address of fault - /* - * __PHX__: TODO - * - * all this can be written much simpler. look at - * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part - */ #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX l.lwz r6,PT_PC(r3) // address of an offending insn l.lwz r6,0(r6) // instruction that caused pf @@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler) #else - l.lwz r6,PT_SR(r3) // SR + l.mfspr r6,r0,SPR_SR // SR l.andi r6,r6,SPR_SR_DSX // check for delay slot exception l.sfne r6,r0 // exception happened in delay slot l.bnf 7f diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index fb02b2a1d6f2..9fc6b60140f0 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -210,8 +210,7 @@ * r4 - EEAR exception EA * r10 - current pointing to current_thread_info struct * r12 - syscall 0, since we didn't come from syscall - * r13 - temp it actually contains new SR, not needed anymore - * r31 - handler address of the handler we'll jump to + * r30 - handler address of the handler we'll jump to * * handler has to save remaining registers to the exception * ksp frame *before* tainting them! @@ -244,6 +243,7 @@ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ l.sw PT_GPR12(r30),r12 ;\ + /* r4 use for tmp before EA */ ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ @@ -263,7 +263,10 @@ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- turn on MMU ----- */ ;\ - l.ori r30,r0,(EXCEPTION_SR) ;\ + /* Carry DSX into exception SR */ ;\ + l.mfspr r30,r0,SPR_SR ;\ + l.andi r30,r30,SPR_SR_DSX ;\ + l.ori r30,r30,(EXCEPTION_SR) ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r30: EA address of handler */ ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\ diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index fac246e6f37a..d8981cbb852a 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs) return 0; } #else - return regs->sr & SPR_SR_DSX; + return mfspr(SPR_SR) & SPR_SR_DSX; #endif } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c480770fabcd..17526bebcbd2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB config PARISC_PAGE_SIZE_16KB bool "16KB" - depends on PA8X00 + depends on PA8X00 && BROKEN config PARISC_PAGE_SIZE_64KB bool "64KB" - depends on PA8X00 + depends on PA8X00 && BROKEN endchoice @@ -347,7 +347,7 @@ config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 depends on SMP - default "32" + default "4" endmenu diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 714284ea6cc2..5ce030266e7d 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,10 +65,6 @@ endif # kernel. cflags-y += -mdisable-fpregs -# Without this, "ld -r" results in .text sections that are too big -# (> 0x40000) for branches to reach stubs. -cflags-y += -ffunction-sections - # Use long jumps instead of long branches (needed if your linker fails to # link a too big vmlinux executable). Not enabled for building modules. ifdef CONFIG_MLONGCALLS diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index eeb5c8858663..715c96ba2ec8 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -21,14 +21,6 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -#ifndef __KERNEL__ -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - sigset_t sa_mask; /* mask last for extensibility */ -}; -#endif - #include <asm/sigcontext.h> #endif /* !__ASSEMBLY */ diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 4872e77aa96b..dc77c5a51db7 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -364,8 +364,9 @@ #define __NR_preadv2 (__NR_Linux + 347) #define __NR_pwritev2 (__NR_Linux + 348) #define __NR_statx (__NR_Linux + 349) +#define __NR_io_pgetevents (__NR_Linux + 350) -#define __NR_Linux_syscalls (__NR_statx + 1) +#define __NR_Linux_syscalls (__NR_io_pgetevents + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index e0e1c9775c32..5eb979d04b90 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver) { /* FIXME: we need this because apparently the sti * driver can be registered twice */ - if(driver->drv.name) { - printk(KERN_WARNING - "BUG: skipping previously registered driver %s\n", - driver->name); + if (driver->drv.name) { + pr_warn("BUG: skipping previously registered driver %s\n", + driver->name); return 1; } if (!driver->probe) { - printk(KERN_WARNING - "BUG: driver %s has no probe routine\n", - driver->name); + pr_warn("BUG: driver %s has no probe routine\n", driver->name); return 1; } @@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev = create_parisc_device(mod_path); if (dev->id.hw_type != HPHW_FAULTY) { - printk(KERN_ERR "Two devices have hardware path [%s]. " - "IODC data for second device: " - "%02x%02x%02x%02x%02x%02x\n" - "Rearranging GSC cards sometimes helps\n", - parisc_pathname(dev), iodc_data[0], iodc_data[1], - iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]); + pr_err("Two devices have hardware path [%s]. IODC data for second device: %7phN\n" + "Rearranging GSC cards sometimes helps\n", + parisc_pathname(dev), iodc_data); return NULL; } @@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) * the keyboard controller */ if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa)) - printk("Unable to claim HPA %lx for device %s\n", - hpa, name); + pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name); return dev; } @@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 6308749359e4..fe3f2a49d2b1 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -445,6 +445,7 @@ ENTRY_COMP(preadv2) ENTRY_COMP(pwritev2) ENTRY_SAME(statx) + ENTRY_COMP(io_pgetevents) /* 350 */ .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 143f90e2f9f3..2ef83d78eec4 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -25,7 +25,7 @@ /* #define DEBUG 1 */ #ifdef DEBUG -#define dbg(x...) printk(x) +#define dbg(x...) pr_debug(x) #else #define dbg(x...) #endif @@ -182,7 +182,7 @@ int __init unwind_init(void) start = (long)&__start___unwind[0]; stop = (long)&__stop___unwind[0]; - printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", + dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", start, stop, (stop - start) / sizeof(struct unwind_table_entry)); diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 2ea575cb3401..fb96206de317 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -243,6 +243,7 @@ endif cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 +cpu-as-$(CONFIG_E500) += -Wa,-me500 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index e4633803fe43..82e44b1a00ae 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -138,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - pgtable_page_dtor(table); pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 896efa559996..79d570cbf332 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); #endif diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 9de40eb614da..8825953c225b 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -140,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(table); pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index cfcf6a874cfa..01b5171ea189 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -393,3 +393,4 @@ SYSCALL(pkey_alloc) SYSCALL(pkey_free) SYSCALL(pkey_mprotect) SYSCALL(rseq) +COMPAT_SYS(io_pgetevents) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 1e9708632dce..c19379f0a32e 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 388 +#define NR_syscalls 389 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index ac5ba55066dd..985534d0b448 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -399,5 +399,6 @@ #define __NR_pkey_free 385 #define __NR_pkey_mprotect 386 #define __NR_rseq 387 +#define __NR_io_pgetevents 388 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e734f6e45abc..689306118b48 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -144,7 +144,9 @@ power9_restore_additional_sprs: mtspr SPRN_MMCR1, r4 ld r3, STOP_MMCR2(r13) + ld r4, PACA_SPRG_VDSO(r13) mtspr SPRN_MMCR2, r3 + mtspr SPRN_SPRG3, r4 blr /* diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4f861055a852..d63b488d34d7 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -285,9 +285,6 @@ pci_bus_to_hose(int bus) * Note that the returned IO or memory base is a physical address */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, unsigned long, devfn) { @@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, return result; } -#pragma GCC diagnostic pop diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 812171c09f42..dff28f903512 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose) #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, unsigned long, in_devfn) { @@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, return -EOPNOTSUPP; } -#pragma GCC diagnostic pop #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7fb9f83dcde8..8afd146bc9c7 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, } /* We assume to be passed big endian arguments */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { struct rtas_args args; @@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) return 0; } -#pragma GCC diagnostic pop /* * Call early during boot, before mem init, to retrieve the RTAS diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 5eedbb282d42..e6474a45cef5 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp, } #endif -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" #ifdef CONFIG_PPC64 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, struct ucontext __user *, new_ctx, int, ctx_size) @@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } -#pragma GCC diagnostic pop #ifdef CONFIG_PPC64 COMPAT_SYSCALL_DEFINE0(rt_sigreturn) @@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn) return 0; } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" #ifdef CONFIG_PPC32 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, int, ndbg, struct sig_dbg_op __user *, dbg) @@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, return 0; } #endif -#pragma GCC diagnostic pop /* * OK, we're invoking a handler diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index d42b60020389..83d51bf586c7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) /* * Handle {get,set,swap}_context operations */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, struct ucontext __user *, new_ctx, long, ctx_size) { @@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } -#pragma GCC diagnostic pop /* diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 083fa06962fd..466216506eb2 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -62,9 +62,6 @@ out: return ret; } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) @@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, { return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT); } -#pragma GCC diagnostic pop #ifdef CONFIG_PPC32 /* diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 85396e93747f..8167ce8e0cdd 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -448,7 +448,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ return H_TOO_HARD; - if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) return H_HARDWARE; if (mm_iommu_mapped_inc(mem)) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 925fc316a104..5b298f5a1a14 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (!mem) return H_TOO_HARD; - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, + &hpa))) return H_HARDWARE; pua = (void *) vmalloc_to_phys(pua); @@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); if (mem) - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; + prereg = mm_iommu_ua_to_hpa_rm(mem, ua, + IOMMU_PAGE_SHIFT_4K, &tces) == 0; } if (!prereg) { diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index abb43646927a..a4ca57612558 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -19,6 +19,7 @@ #include <linux/hugetlb.h> #include <linux/swap.h> #include <asm/mmu_context.h> +#include <asm/pte-walk.h> static DEFINE_MUTEX(mem_list_mutex); @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { struct rcu_head rcu; unsigned long used; atomic64_t mapped; + unsigned int pageshift; u64 ua; /* userspace address */ u64 entries; /* number of entries in hpas[] */ u64 *hpas; /* vmalloc'ed */ @@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, { struct mm_iommu_table_group_mem_t *mem; long i, j, ret = 0, locked_entries = 0; + unsigned int pageshift; + unsigned long flags; struct page *page = NULL; mutex_lock(&mem_list_mutex); @@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, goto unlock_exit; } + /* + * For a starting point for a maximum page size calculation + * we use @ua and @entries natural alignment to allow IOMMU pages + * smaller than huge pages but still bigger than PAGE_SIZE. + */ + mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); if (!mem->hpas) { kfree(mem); @@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } } populate: + pageshift = PAGE_SHIFT; + if (PageCompound(page)) { + pte_t *pte; + struct page *head = compound_head(page); + unsigned int compshift = compound_order(head); + + local_irq_save(flags); /* disables as well */ + pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); + local_irq_restore(flags); + + /* Double check it is still the same pinned page */ + if (pte && pte_page(*pte) == head && + pageshift == compshift) + pageshift = max_t(unsigned int, pageshift, + PAGE_SHIFT); + } + mem->pageshift = min(mem->pageshift, pageshift); mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } @@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, EXPORT_SYMBOL_GPL(mm_iommu_find); long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; u64 *va = &mem->hpas[entry]; @@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + *hpa = *va | (ua & ~PAGE_MASK); return 0; @@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; void *va = &mem->hpas[entry]; @@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + pa = (void *) vmalloc_to_phys(va); if (!pa) return -EFAULT; diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 75cb646a79c3..9d16ee251fc0 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, * in a 2-bit field won't allow writes to a page that is otherwise * write-protected. */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, unsigned long, len, u32 __user *, map) { @@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, up_write(&mm->mmap_sem); return err; } -#pragma GCC diagnostic pop diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 7c968e46736f..12e6e4d30602 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -42,7 +42,11 @@ #define DBG(x...) #endif -/* Apparently the RTC stores seconds since 1 Jan 1904 */ +/* + * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU + * times wrap in 2040. If we need to handle later times, the read_time functions + * need to be changed to interpret wrapped times as post-2040. + */ #define RTC_OFFSET 2082844800 /* @@ -97,8 +101,11 @@ static time64_t cuda_get_time(void) if (req.reply_len != 7) printk(KERN_ERR "cuda_get_time: got %d byte reply\n", req.reply_len); - now = (req.reply[3] << 24) + (req.reply[4] << 16) - + (req.reply[5] << 8) + req.reply[6]; + now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) + + (req.reply[5] << 8) + req.reply[6]); + /* it's either after year 2040, or the RTC has gone backwards */ + WARN_ON(now < RTC_OFFSET); + return now - RTC_OFFSET; } @@ -106,10 +113,10 @@ static time64_t cuda_get_time(void) static int cuda_set_rtc_time(struct rtc_time *tm) { - time64_t nowtime; + u32 nowtime; struct adb_request req; - nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; + nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) @@ -140,8 +147,12 @@ static time64_t pmu_get_time(void) if (req.reply_len != 4) printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n", req.reply_len); - now = (req.reply[0] << 24) + (req.reply[1] << 16) - + (req.reply[2] << 8) + req.reply[3]; + now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) + + (req.reply[2] << 8) + req.reply[3]); + + /* it's either after year 2040, or the RTC has gone backwards */ + WARN_ON(now < RTC_OFFSET); + return now - RTC_OFFSET; } @@ -149,10 +160,10 @@ static time64_t pmu_get_time(void) static int pmu_set_rtc_time(struct rtc_time *tm) { - time64_t nowtime; + u32 nowtime; struct adb_request req; - nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; + nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) return -ENXIO; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 47166ad2a669..196978733e64 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, { int nr, dotted; unsigned long first_adr; - unsigned long inst, last_inst = 0; + unsigned int inst, last_inst = 0; unsigned char val[4]; dotted = 0; @@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, dotted = 0; last_inst = inst; if (praddr) - printf(REG" %.8lx", adr, inst); + printf(REG" %.8x", adr, inst); printf("\t"); dump_func(inst, adr); printf("\n"); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index f12680c9b947..4764fdeb4f1f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -107,6 +107,7 @@ config ARCH_RV32I select GENERIC_LIB_ASHLDI3 select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_UCMPDI2 config ARCH_RV64I bool "RV64I" diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index 5cae4c30cd8e..1e0dfc36aab9 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t; typedef union __riscv_fp_state elf_fpregset_t; -#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32) -#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff) +#if __riscv_xlen == 64 +#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info) +#else +#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info) +#endif /* * RISC-V relocation types diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index b74cbfbce2d0..7bcdaed15703 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -16,10 +16,6 @@ #include <linux/irqchip.h> #include <linux/irqdomain.h> -#ifdef CONFIG_RISCV_INTC -#include <linux/irqchip/irq-riscv-intc.h> -#endif - void __init init_IRQ(void) { irqchip_init(); diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 1d5e9b934b8c..3303ed2cd419 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) static int apply_r_riscv_branch_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u32 imm12 = (offset & 0x1000) << (31 - 12); u32 imm11 = (offset & 0x800) >> (11 - 7); u32 imm10_5 = (offset & 0x7e0) << (30 - 10); @@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location, static int apply_r_riscv_jal_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u32 imm20 = (offset & 0x100000) << (31 - 20); u32 imm19_12 = (offset & 0xff000); u32 imm11 = (offset & 0x800) << (20 - 11); @@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location, static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u16 imm8 = (offset & 0x100) << (12 - 8); u16 imm7_6 = (offset & 0xc0) >> (6 - 5); u16 imm5 = (offset & 0x20) >> (5 - 2); @@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u16 imm11 = (offset & 0x800) << (12 - 11); u16 imm10 = (offset & 0x400) >> (10 - 8); u16 imm9_8 = (offset & 0x300) << (12 - 11); @@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; if (offset != (s32)offset) { @@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; /* Always emit the got entry */ @@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 fill_v = offset; u32 hi20, lo12; @@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, static int apply_r_riscv_call_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 fill_v = offset; u32 hi20, lo12; @@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location, static int apply_r_riscv_add32_rela(struct module *me, u32 *location, Elf_Addr v) { - *(u32 *)location += (*(u32 *)v); + *(u32 *)location += (u32)v; return 0; } static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, Elf_Addr v) { - *(u32 *)location -= (*(u32 *)v); + *(u32 *)location -= (u32)v; return 0; } @@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int j; for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { - u64 hi20_loc = + unsigned long hi20_loc = sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); @@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, Elf_Sym *hi20_sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_RISCV_R_SYM(rel[j].r_info); - u64 hi20_sym_val = + unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend; /* Calculate lo12 */ - u64 offset = hi20_sym_val - hi20_loc; + size_t offset = hi20_sym_val - hi20_loc; if (IS_ENABLED(CONFIG_MODULE_SECTIONS) && hi20_type == R_RISCV_GOT_HI20) { offset = module_emit_got_entry( diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index ba3e80712797..9f82a7e34c64 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target, struct pt_regs *regs; regs = task_pt_regs(target); - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); return ret; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ee44a48faf79..f0d2070866d4 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p) riscv_fill_hwcap(); } -static int __init riscv_device_init(void) -{ - return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); -} -subsys_initcall_sync(riscv_device_init); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c77df8142be2..58a522f9bcc3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -28,7 +28,9 @@ static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; +#ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); +#endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index baed39772c84..e44bb2b2873e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -160,6 +160,7 @@ config S390 select HAVE_OPROFILE select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select MODULES_USE_ELF_RELA diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 607c5e9fba3d..2ce28bf0c5ec 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) +COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f03402efab4b..150130c897c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -357,6 +357,10 @@ ENTRY(system_call) stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: +#ifdef CONFIG_DEBUG_RSEQ + lgr %r2,%r11 + brasl %r14,rseq_syscall +#endif LOCKDEP_SYS_EXIT .Lsysc_tif: TSTMSK __PT_FLAGS(%r11),_PIF_WORK @@ -1265,7 +1269,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: BR_EX %r14 +0: BR_EX %r14,%r11 .align 8 .Lcleanup_table: @@ -1301,7 +1305,7 @@ cleanup_critical: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - BR_EX %r14 + BR_EX %r14,%r11 #endif .Lcleanup_system_call: diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 2d2960ab3e10..22f08245aa5d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs) } /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); - + rseq_signal_deliver(&ksig, regs); if (is_compat_task()) handle_signal32(&ksig, oldset, regs); else @@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 8b210ead7956..022fc099b628 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -389,3 +389,5 @@ 379 common statx sys_statx compat_sys_statx 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load +382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +383 common rseq sys_rseq compat_sys_rseq diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 84bd6329a88d..e3bd5627afef 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) spin_unlock_bh(&mm->context.lock); if (mask != 0) return; + } else { + atomic_xor_bits(&page->_refcount, 3U << 24); } pgtable_page_dtor(page); @@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table) break; /* fallthrough */ case 3: /* 4K page table with pgstes */ + if (mask & 3) + atomic_xor_bits(&page->_refcount, 3 << 24); pgtable_page_dtor(page); __free_page(page); break; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d2db8acb1a55..5f0234ec8038 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto free_addrs; } if (bpf_jit_prog(&jit, fp)) { + bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f1dbb4ee19d7..887d3a7bb646 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -63,7 +63,7 @@ config X86 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_REFCOUNT select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 - select ARCH_HAS_UACCESS_MCSAFE if X86_64 + select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e57665b4ba1c..e98522ea6f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) struct pci_setup_rom *rom = NULL; efi_status_t status; unsigned long size; - uint64_t attributes, romsize; + uint64_t romsize; void *romimage; - status = efi_call_proto(efi_pci_io_protocol, attributes, pci, - EfiPciIoAttributeOperationGet, 0ULL, - &attributes); - if (status != EFI_SUCCESS) - return status; - /* - * Some firmware images contain EFI function pointers at the place where the - * romimage and romsize fields are supposed to be. Typically the EFI + * Some firmware images contain EFI function pointers at the place where + * the romimage and romsize fields are supposed to be. Typically the EFI * code is mapped at high addresses, translating to an unrealistically * large romsize. The UEFI spec limits the size of option ROMs to 16 * MiB so we reject any ROMs over 16 MiB in size to catch this. diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index 9254e0b6cc06..717bf0776421 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail) movdqu STATE3, 0x40(STATEP) FRAME_END + ret ENDPROC(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S index 9263c344f2c7..4eda2b8db9e1 100644 --- a/arch/x86/crypto/aegis128l-aesni-asm.S +++ b/arch/x86/crypto/aegis128l-aesni-asm.S @@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail) state_store0 FRAME_END + ret ENDPROC(crypto_aegis128l_aesni_enc_tail) /* diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S index 1d977d515bf9..32aae8397268 100644 --- a/arch/x86/crypto/aegis256-aesni-asm.S +++ b/arch/x86/crypto/aegis256-aesni-asm.S @@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail) state_store0 FRAME_END + ret ENDPROC(crypto_aegis256_aesni_enc_tail) /* diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S index 37d422e77931..07653d4582a6 100644 --- a/arch/x86/crypto/morus1280-avx2-asm.S +++ b/arch/x86/crypto/morus1280-avx2-asm.S @@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail) vmovdqu STATE4, (4 * 32)(%rdi) FRAME_END + ret ENDPROC(crypto_morus1280_avx2_enc_tail) /* diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S index 1fe637c7be9d..bd1aa1b60869 100644 --- a/arch/x86/crypto/morus1280-sse2-asm.S +++ b/arch/x86/crypto/morus1280-sse2-asm.S @@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail) movdqu STATE4_HI, (9 * 16)(%rdi) FRAME_END + ret ENDPROC(crypto_morus1280_sse2_enc_tail) /* diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S index 71c72a0a0862..efa02816d921 100644 --- a/arch/x86/crypto/morus640-sse2-asm.S +++ b/arch/x86/crypto/morus640-sse2-asm.S @@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail) movdqu STATE4, (4 * 16)(%rdi) FRAME_END + ret ENDPROC(crypto_morus640_sse2_enc_tail) /* diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 2582881d19ce..c371bfee137a 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32) * whereas POPF does not.) */ addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ - btr $X86_EFLAGS_IF_BIT, (%esp) + btrl $X86_EFLAGS_IF_BIT, (%esp) popfl /* diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 9de7f1e1dede..7d0df78db727 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ + pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ - pushq %r9 /* pt_regs->r9 */ + pushq $0 /* pt_regs->r9 = 0 */ xorl %r9d, %r9d /* nospec r9 */ - pushq %r10 /* pt_regs->r10 */ + pushq $0 /* pt_regs->r10 = 0 */ xorl %r10d, %r10d /* nospec r10 */ - pushq %r11 /* pt_regs->r11 */ + pushq $0 /* pt_regs->r11 = 0 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ @@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat) pushq %rcx /* pt_regs->cx */ xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ + pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ - pushq $0 /* pt_regs->r9 = 0 */ + pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ - pushq $0 /* pt_regs->r10 = 0 */ + pushq %r10 /* pt_regs->r10*/ xorl %r10d, %r10d /* nospec r10 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8a10a045b57b..8cf03f101938 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu) ds->bts_buffer_base = (unsigned long) cea; ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; - max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); - ds->bts_absolute_maximum = ds->bts_buffer_base + max; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; + ds->bts_absolute_maximum = ds->bts_buffer_base + + max * BTS_RECORD_SIZE; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - + (max / 16) * BTS_RECORD_SIZE; return 0; } diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index f68855499391..402338365651 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); } + if (nr_bank < 0) + goto ipi_mask_ex_done; if (!nr_bank) ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; @@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) for_each_cpu(cur_cpu, mask) { vcpu = hv_cpu_number_to_vp_number(cur_cpu); + if (vcpu == VP_INVAL) + goto ipi_mask_done; + /* * This particular version of the IPI hypercall can * only target upto 64 CPUs. diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 4c431e1c1eff..1ff420217298 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -265,7 +265,7 @@ void __init hyperv_init(void) { u64 guest_id, required_msrs; union hv_x64_msr_hypercall_contents hypercall_msr; - int cpuhp; + int cpuhp, i; if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; @@ -293,6 +293,9 @@ void __init hyperv_init(void) if (!hv_vp_index) return; + for (i = 0; i < num_possible_cpus(); i++) + hv_vp_index[i] = VP_INVAL; + hv_vp_assist_page = kcalloc(num_possible_cpus(), sizeof(*hv_vp_assist_page), GFP_KERNEL); if (!hv_vp_assist_page) { diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index c356098b6fb9..4d4015ddcf26 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -7,8 +7,6 @@ #ifndef _ASM_X86_MACH_DEFAULT_APM_H #define _ASM_X86_MACH_DEFAULT_APM_H -#include <asm/nospec-branch.h> - #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ @@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); } static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, @@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ - firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); - firmware_restrict_branch_speculation_end(); return error; } diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..990770f9e76b 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -46,6 +46,65 @@ #define _ASM_SI __ASM_REG(si) #define _ASM_DI __ASM_REG(di) +#ifndef __x86_64__ +/* 32 bit */ + +#define _ASM_ARG1 _ASM_AX +#define _ASM_ARG2 _ASM_DX +#define _ASM_ARG3 _ASM_CX + +#define _ASM_ARG1L eax +#define _ASM_ARG2L edx +#define _ASM_ARG3L ecx + +#define _ASM_ARG1W ax +#define _ASM_ARG2W dx +#define _ASM_ARG3W cx + +#define _ASM_ARG1B al +#define _ASM_ARG2B dl +#define _ASM_ARG3B cl + +#else +/* 64 bit */ + +#define _ASM_ARG1 _ASM_DI +#define _ASM_ARG2 _ASM_SI +#define _ASM_ARG3 _ASM_DX +#define _ASM_ARG4 _ASM_CX +#define _ASM_ARG5 r8 +#define _ASM_ARG6 r9 + +#define _ASM_ARG1Q rdi +#define _ASM_ARG2Q rsi +#define _ASM_ARG3Q rdx +#define _ASM_ARG4Q rcx +#define _ASM_ARG5Q r8 +#define _ASM_ARG6Q r9 + +#define _ASM_ARG1L edi +#define _ASM_ARG2L esi +#define _ASM_ARG3L edx +#define _ASM_ARG4L ecx +#define _ASM_ARG5L r8d +#define _ASM_ARG6L r9d + +#define _ASM_ARG1W di +#define _ASM_ARG2W si +#define _ASM_ARG3W dx +#define _ASM_ARG4W cx +#define _ASM_ARG5W r8w +#define _ASM_ARG6W r9w + +#define _ASM_ARG1B dil +#define _ASM_ARG2B sil +#define _ASM_ARG3B dl +#define _ASM_ARG4B cl +#define _ASM_ARG5B r8b +#define _ASM_ARG6B r9b + +#endif + /* * Macros to generate condition code outputs from inline assembly, * The output operand must be type "bool". diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 89f08955fff7..c4fc17220df9 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -13,7 +13,7 @@ * Interrupt control: */ -static inline unsigned long native_save_fl(void) +extern inline unsigned long native_save_fl(void) { unsigned long flags; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 3cd14311edfa..5a7375ed5f7c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -9,6 +9,8 @@ #include <asm/hyperv-tlfs.h> #include <asm/nospec-branch.h> +#define VP_INVAL U32_MAX + struct ms_hyperv_info { u32 features; u32 misc_features; @@ -20,7 +22,6 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; - /* * Generate the guest ID. */ @@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, */ for_each_cpu(cpu, cpus) { vcpu = hv_cpu_number_to_vp_number(cpu); + if (vcpu == VP_INVAL) + return -1; vcpu_bank = vcpu / 64; vcpu_offset = vcpu % 64; __set_bit(vcpu_offset, (unsigned long *) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index ada6410fd2ec..fbd578daa66e 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) { + if (!pgtable_l5_enabled()) + return; + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); free_page((unsigned long)p4d); } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 99ecde23c3ec..5715647fc4fe 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -898,7 +898,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) /* to find an entry in a page-table-directory. */ -static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { if (!pgtable_l5_enabled()) return (p4d_t *)pgd; diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 0fdcd21dadbd..3c5385f9a88f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) } #endif -static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) +static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { pgd_t pgd; @@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) *p4dp = native_make_p4d(native_pgd_val(pgd)); } -static __always_inline void native_p4d_clear(p4d_t *p4d) +static inline void native_p4d_clear(p4d_t *p4d) { native_set_p4d(p4d, native_make_p4d(0)); } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 62acb613114b..a9d637bc301d 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len) unsigned long ret; __uaccess_begin(); - ret = memcpy_mcsafe(to, from, len); + /* + * Note, __memcpy_mcsafe() is explicitly used since it can + * handle exceptions / faults. memcpy_mcsafe() may fall back to + * memcpy() which lacks this handling. + */ + ret = __memcpy_mcsafe(to, from, len); __uaccess_end(); return ret; } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 02d6f5cf4e70..8824d01c0c35 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o +obj-y += irqflags.o obj-y += process.o obj-y += fpu/ diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 5d0de79fdab0..ec00d1ff5098 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -240,6 +240,7 @@ #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> +#include <asm/nospec-branch.h> #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); @@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); @@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 082d7875cef8..38915fbfae73 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) nodes_per_socket = ((value >> 3) & 7) + 1; } - if (c->x86 >= 0x15 && c->x86 <= 0x17) { + if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && + !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && + c->x86 >= 0x15 && c->x86 <= 0x17) { unsigned int bit; switch (c->x86) { diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 404df26b7de8..5c0ea39311fe 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -155,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); if (hostval != guestval) { @@ -533,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may * use a completely different MSR and bit dependent on family. */ - if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && + !static_cpu_has(X86_FEATURE_AMD_SSBD)) { x86_amd_ssb_disable(); - else { + } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index c102ad51025e..8c50754c09c1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2165,9 +2165,6 @@ static ssize_t store_int_with_restart(struct device *s, if (check_interval == old_check_interval) return ret; - if (check_interval < 1) - check_interval = 1; - mutex_lock(&mce_sysfs_mutex); mce_restart(); mutex_unlock(&mce_sysfs_mutex); diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4021d3859499..40eee6cc4124 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) memset(line, 0, LINE_SIZE); - length = strncpy_from_user(line, buf, LINE_SIZE - 1); + len = min_t(size_t, len, LINE_SIZE - 1); + length = strncpy_from_user(line, buf, len); if (length < 0) return length; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d1f25c831447..c88c23c658c1 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void) { int i; u64 end; + u64 addr = 0; /* * The bootstrap memblock region count maximum is 128 entries @@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void) struct e820_entry *entry = &e820_table->entries[i]; end = entry->addr + entry->size; + if (addr < entry->addr) + memblock_reserve(addr, entry->addr - addr); + addr = end; if (end != (resource_size_t)end) continue; + /* + * all !E820_TYPE_RAM ranges (including gap ranges) are put + * into memblock.reserved to make sure that struct pages in + * such regions are not left uninitialized after bootup. + */ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - continue; - - memblock_add(entry->addr, entry->size); + memblock_reserve(entry->addr, entry->size); + else + memblock_add(entry->addr, entry->size); } /* Throw away partial pages: */ diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S new file mode 100644 index 000000000000..ddeeaac8adda --- /dev/null +++ b/arch/x86/kernel/irqflags.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <asm/asm.h> +#include <asm/export.h> +#include <linux/linkage.h> + +/* + * unsigned long native_save_fl(void) + */ +ENTRY(native_save_fl) + pushf + pop %_ASM_AX + ret +ENDPROC(native_save_fl) +EXPORT_SYMBOL(native_save_fl) + +/* + * void native_restore_fl(unsigned long flags) + * %eax/%rdi: flags + */ +ENTRY(native_restore_fl) + push %_ASM_ARG1 + popf + ret +ENDPROC(native_restore_fl) +EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bf8d1eb7fca3..3b8e7c13c614 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void) src = &hv_clock[cpu].pvti; tsc_khz = pvclock_tsc_khz(src); put_cpu(); + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); return tsc_khz; } @@ -319,6 +320,8 @@ void __init kvmclock_init(void) printk(KERN_INFO "kvm-clock: Using msrs %x and %x", msr_kvm_system_time, msr_kvm_wall_clock); + pvclock_set_pvti_cpu0_va(hv_clock); + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); @@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void) vcpu_time = &hv_clock[cpu].pvti; flags = pvclock_read_flags(vcpu_time); - if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { - put_cpu(); - return 1; - } - - pvclock_set_pvti_cpu0_va(hv_clock); put_cpu(); + if (!(flags & PVCLOCK_TSC_STABLE_BIT)) + return 1; + kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; #endif return 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2f7d1d2a5c3..db9656e13ea0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused) #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); __flush_tlb_all(); #endif load_current_idt(); diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 92fd433c50b9..1bbec387d289 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -85,7 +85,7 @@ config KVM_AMD_SEV def_bool y bool "AMD Secure Encrypted Virtualization (SEV) support" depends on KVM_AMD && X86_64 - depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP + depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) ---help--- Provides support for launching Encrypted VMs on AMD processors. diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1689f433f3a0..e30da9a2430c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2571,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 int cpu = raw_smp_processor_id(); + unsigned long fs_base, kernel_gs_base; #endif int i; @@ -2586,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; #ifdef CONFIG_X86_64 - save_fsgs_for_kvm(); - vmx->host_state.fs_sel = current->thread.fsindex; - vmx->host_state.gs_sel = current->thread.gsindex; -#else - savesegment(fs, vmx->host_state.fs_sel); - savesegment(gs, vmx->host_state.gs_sel); + if (likely(is_64bit_mm(current->mm))) { + save_fsgs_for_kvm(); + vmx->host_state.fs_sel = current->thread.fsindex; + vmx->host_state.gs_sel = current->thread.gsindex; + fs_base = current->thread.fsbase; + kernel_gs_base = current->thread.gsbase; + } else { +#endif + savesegment(fs, vmx->host_state.fs_sel); + savesegment(gs, vmx->host_state.gs_sel); +#ifdef CONFIG_X86_64 + fs_base = read_msr(MSR_FS_BASE); + kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); + } #endif if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); @@ -2611,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); - vmcs_writel(HOST_FS_BASE, current->thread.fsbase); + vmcs_writel(HOST_FS_BASE, fs_base); vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu)); - vmx->msr_host_kernel_gs_base = current->thread.gsbase; + vmx->msr_host_kernel_gs_base = kernel_gs_base; if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #else @@ -4322,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) vmcs_conf->order = get_order(vmcs_conf->size); vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; - /* KVM supports Enlightened VMCS v1 only */ - if (static_branch_unlikely(&enable_evmcs)) - vmcs_conf->revision_id = KVM_EVMCS_VERSION; - else - vmcs_conf->revision_id = vmx_msr_low; + vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; @@ -4396,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); - vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ + + /* KVM supports Enlightened VMCS v1 only */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->revision_id = KVM_EVMCS_VERSION; + else + vmcs->revision_id = vmcs_config.revision_id; + return vmcs; } @@ -4564,6 +4575,19 @@ static __init int alloc_kvm_area(void) return -ENOMEM; } + /* + * When eVMCS is enabled, alloc_vmcs_cpu() sets + * vmcs->revision_id to KVM_EVMCS_VERSION instead of + * revision_id reported by MSR_IA32_VMX_BASIC. + * + * However, even though not explictly documented by + * TLFS, VMXArea passed as VMXON argument should + * still be marked with revision_id reported by + * physical CPU. + */ + if (static_branch_unlikely(&enable_evmcs)) + vmcs->revision_id = vmcs_config.revision_id; + per_cpu(vmxarea, cpu) = vmcs; } return 0; @@ -11753,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u32 msr_entry_idx; u32 exit_qual; int r; @@ -11775,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) nested_get_vmcs12_pages(vcpu, vmcs12); r = EXIT_REASON_MSR_LOAD_FAIL; - msr_entry_idx = nested_vmx_load_msr(vcpu, - vmcs12->vm_entry_msr_load_addr, - vmcs12->vm_entry_msr_load_count); - if (msr_entry_idx) + exit_qual = nested_vmx_load_msr(vcpu, + vmcs12->vm_entry_msr_load_addr, + vmcs12->vm_entry_msr_load_count); + if (exit_qual) goto fail; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0046aa70205a..2b812b3c5088 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = { MSR_F10H_DECFG, MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, }; static unsigned int num_msr_based_features; @@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) { switch (msr->index) { case MSR_IA32_UCODE_REV: - rdmsrl(msr->index, msr->data); + case MSR_IA32_ARCH_CAPABILITIES: + rdmsrl_safe(msr->index, &msr->data); break; default: if (kvm_x86_ops->get_msr_feature(msr)) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9a84a0d08727..2aafa6ab6103 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) return 0; } -static const char nx_warning[] = KERN_CRIT -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; -static const char smep_warning[] = KERN_CRIT -"unable to execute userspace code (SMEP?) (uid: %d)\n"; - static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) @@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, pte = lookup_address_in_pgd(pgd, address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) - printk(nx_warning, from_kuid(&init_user_ns, current_uid())); + pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", + from_kuid(&init_user_ns, current_uid())); if (pte && pte_present(*pte) && pte_exec(*pte) && (pgd_flags(*pgd) & _PAGE_USER) && (__read_cr4() & X86_CR4_SMEP)) - printk(smep_warning, from_kuid(&init_user_ns, current_uid())); + pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", + from_kuid(&init_user_ns, current_uid())); } - printk(KERN_ALERT "BUG: unable to handle kernel "); - if (address < PAGE_SIZE) - printk(KERN_CONT "NULL pointer dereference"); - else - printk(KERN_CONT "paging request"); - - printk(KERN_CONT " at %px\n", (void *) address); + pr_alert("BUG: unable to handle kernel %s at %px\n", + address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", + (void *)address); dump_pagetable(address); } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index e01f7ceb9e7a..77873ce700ae 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) + if (!pgd_present(*pgd)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); - if (!(p4d_val(*p4d) & _PAGE_PRESENT)) + if (!p4d_present(*p4d)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2e9ee023e6bc..81a8e33115ad 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) -$(obj)/sha256.o: $(srctree)/lib/sha256.c +$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(call if_changed_rule,cc_o_c) LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 8d4e2e1ae60b..439a94bf89ad 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1207,12 +1207,20 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_setup_features(); - xen_setup_machphys_mapping(); - /* Install Xen paravirt ops */ pv_info = xen_info; pv_init_ops.patch = paravirt_patch_default; pv_cpu_ops = xen_cpu_ops; + xen_init_irq_ops(); + + /* + * Setup xen_vcpu early because it is needed for + * local_irq_disable(), irqs_disabled(), e.g. in printk(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); x86_platform.get_nmi_reason = xen_get_nmi_reason; @@ -1225,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void) * Set up some pagetable state before starting to set any ptes. */ + xen_setup_machphys_mapping(); xen_init_mmu_ops(); /* Prevent unwanted bits from being set in PTEs. */ __supported_pte_mask &= ~_PAGE_GLOBAL; + __default_kernel_pte_mask &= ~_PAGE_GLOBAL; /* * Prevent page tables from being allocated in highmem, even @@ -1249,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void) get_cpu_cap(&boot_cpu_data); x86_configure_nx(); - xen_init_irq_ops(); - /* Let's presume PV guests always boot on vCPU with id 0. */ per_cpu(xen_vcpu_id, 0) = 0; - /* - * Setup xen_vcpu early because idt_setup_early_handler needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - idt_setup_early_handler(); xen_init_capabilities(); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 74179852e46c..7515a19fd324 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = { void __init xen_init_irq_ops(void) { - /* For PVH we use default pv_irq_ops settings. */ - if (!xen_feature(XENFEAT_hvm_callback_vector)) - pv_irq_ops = xen_irq_ops; + pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; } diff --git a/block/blk-core.c b/block/blk-core.c index afd2596ea3d3..f84a9b7b6f5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3473,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { + dst->rq_flags |= RQF_SPECIAL_PAYLOAD; + dst->special_vec = src->special_vec; + } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; diff --git a/block/blk-mq.c b/block/blk-mq.c index b429d515b568..95919268564b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1075,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx, #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ +/* + * Returns true if we did some work AND can potentially do more. + */ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, bool got_budget) { @@ -1205,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, blk_mq_run_hw_queue(hctx, true); else if (needs_restart && (ret == BLK_STS_RESOURCE)) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); + + return false; } + /* + * If the host/device is unable to accept more work, inform the + * caller of that. + */ + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) + return false; + return (queued + errors) != 0; } diff --git a/block/bsg.c b/block/bsg.c index 66602c489956..3da540faf673 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) } else if (hdr->din_xfer_len) { ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); - } else { - ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL); } if (ret) diff --git a/certs/blacklist.h b/certs/blacklist.h index 150d82da8e99..1efd6fa0dc60 100644 --- a/certs/blacklist.h +++ b/certs/blacklist.h @@ -1,3 +1,3 @@ #include <linux/kernel.h> -extern const char __initdata *const blacklist_hashes[]; +extern const char __initconst *const blacklist_hashes[]; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 49fa8582138b..c166f424871c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) } EXPORT_SYMBOL_GPL(af_alg_async_cb); -__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events) +/** + * af_alg_poll - poll system call handler + */ +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; if (!ctx->more || ctx->used) mask |= EPOLLIN | EPOLLRDNORM; @@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL_GPL(af_alg_poll_mask); +EXPORT_SYMBOL_GPL(af_alg_poll); /** * af_alg_alloc_areq - allocate struct af_alg_async_req @@ -1148,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, /* make one iovec available as scatterlist */ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) + if (err < 0) { + rsgl->sg_num_bytes = 0; return err; + } /* chain the new scatterlist with previous one */ if (areq->last_rsgl) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 825524f27438..c40a8c7ee8ae 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = { .sendmsg = aead_sendmsg, .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static int aead_check_key(struct socket *sock) @@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = { .sendmsg = aead_sendmsg_nokey, .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static void *aead_bind(const char *name, u32 type, u32 mask) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 4c04eb9888ad..cfdaab2b7d76 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = { .sendmsg = skcipher_sendmsg, .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static int skcipher_check_key(struct socket *sock) @@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = { .sendmsg = skcipher_sendmsg_nokey, .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 7d81e6bb461a..b6cabac4b62b 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { + /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; + + value++; + vlen--; + } + ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index fc0c2e2328cd..fe9d46d81750 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) return_ACPI_STATUS(status); } - /* - * 1) Disable all GPEs - * 2) Enable all wakeup GPEs - */ + /* Disable all GPEs */ status = acpi_hw_disable_all_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + /* + * If the target sleep state is S5, clear all GPEs and fixed events too + */ + if (sleep_state == ACPI_STATE_S5) { + status = acpi_hw_clear_acpi_status(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } acpi_gbl_system_awake_and_running = FALSE; + /* Enable all wakeup GPEs */ status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 5a64ddaed8a3..e47430272692 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, switch (lookup_status) { case AE_ALREADY_EXISTS: - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + acpi_os_printf(ACPI_MSG_BIOS_ERROR); message = "Failure creating"; break; case AE_NOT_FOUND: - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + acpi_os_printf(ACPI_MSG_BIOS_ERROR); message = "Could not resolve"; break; default: - acpi_os_printf("\n" ACPI_MSG_ERROR); + acpi_os_printf(ACPI_MSG_ERROR); message = "Failure resolving"; break; } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b0113a5802a3..d79ad844c78f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook) */ pr_err("extension failed to load: %s", hook->name); __battery_hook_unregister(hook, 0); - return; + goto end; } } pr_info("new extension: %s\n", hook->name); +end: mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_register); @@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register); */ static void battery_hook_add_battery(struct acpi_battery *battery) { - struct acpi_battery_hook *hook_node; + struct acpi_battery_hook *hook_node, *tmp; mutex_lock(&hook_mutex); INIT_LIST_HEAD(&battery->list); @@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery) * when a battery gets hotplugged or initialized * during the battery module initialization. */ - list_for_each_entry(hook_node, &battery_hook_list, list) { + list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) { if (hook_node->add_battery(battery->bat)) { /* * The notification of the extensions has failed, to * prevent further errors we will unload the extension. */ - __battery_hook_unregister(hook_node, 0); pr_err("error in extension, unloading: %s", hook_node->name); + __battery_hook_unregister(hook_node, 0); } } mutex_unlock(&hook_mutex); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 442a9e24f439..917f77f4cb55 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2042,7 +2042,7 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { .ident = "Thinkpad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), }, }, { }, diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index d15814e1727f..7c479002e798 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, const guid_t *guid; int rc, i; + if (cmd_rc) + *cmd_rc = -EINVAL; func = cmd; if (cmd == ND_CMD_CALL) { call_pkg = buf; @@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, * If we return an error (like elsewhere) then caller wouldn't * be able to rely upon data returned to make calculation. */ + if (cmd_rc) + *cmd_rc = 0; return 0; } @@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev, mutex_lock(&acpi_desc->init_mutex); rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - work_busy(&acpi_desc->dwork.work) + acpi_desc->scrub_busy && !acpi_desc->cancel ? "+\n" : "\n"); mutex_unlock(&acpi_desc->init_mutex); } @@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, return 0; } +static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) +{ + lockdep_assert_held(&acpi_desc->init_mutex); + + acpi_desc->scrub_busy = 1; + /* note this should only be set from within the workqueue */ + if (tmo) + acpi_desc->scrub_tmo = tmo; + queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); +} + +static void sched_ars(struct acpi_nfit_desc *acpi_desc) +{ + __sched_ars(acpi_desc, 0); +} + +static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) +{ + lockdep_assert_held(&acpi_desc->init_mutex); + + acpi_desc->scrub_busy = 0; + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); +} + static void acpi_nfit_scrub(struct work_struct *work) { struct acpi_nfit_desc *acpi_desc; @@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work) mutex_lock(&acpi_desc->init_mutex); query_rc = acpi_nfit_query_poison(acpi_desc); tmo = __acpi_nfit_scrub(acpi_desc, query_rc); - if (tmo) { - queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); - acpi_desc->scrub_tmo = tmo; - } else { - acpi_desc->scrub_count++; - if (acpi_desc->scrub_count_state) - sysfs_notify_dirent(acpi_desc->scrub_count_state); - } + if (tmo) + __sched_ars(acpi_desc, tmo); + else + notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); mutex_unlock(&acpi_desc->init_mutex); } @@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) break; } - queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); + sched_ars(acpi_desc); return 0; } @@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) } } if (scheduled) { - queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); + sched_ars(acpi_desc); dev_dbg(dev, "ars_scan triggered\n"); } mutex_unlock(&acpi_desc->init_mutex); diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 7d15856a739f..a97ff42fe311 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -203,6 +203,7 @@ struct acpi_nfit_desc { unsigned int max_ars; unsigned int scrub_count; unsigned int scrub_mode; + unsigned int scrub_busy:1; unsigned int cancel:1; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7ca41bf023c9..8df9abfa947b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -45,6 +45,8 @@ #include <linux/uaccess.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include "acpica/accommon.h" +#include "acpica/acnamesp.h" #include "internal.h" #define _COMPONENT ACPI_OS_SERVICES @@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n, } EXPORT_SYMBOL(acpi_check_region); +static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, + void *_res, void **return_value) +{ + struct acpi_mem_space_context **mem_ctx; + union acpi_operand_object *handler_obj; + union acpi_operand_object *region_obj2; + union acpi_operand_object *region_obj; + struct resource *res = _res; + acpi_status status; + + region_obj = acpi_ns_get_attached_object(handle); + if (!region_obj) + return AE_OK; + + handler_obj = region_obj->region.handler; + if (!handler_obj) + return AE_OK; + + if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return AE_OK; + + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) + return AE_OK; + + region_obj2 = acpi_ns_get_secondary_object(region_obj); + if (!region_obj2) + return AE_OK; + + mem_ctx = (void *)®ion_obj2->extra.region_context; + + if (!(mem_ctx[0]->address >= res->start && + mem_ctx[0]->address < res->end)) + return AE_OK; + + status = handler_obj->address_space.setup(region_obj, + ACPI_REGION_DEACTIVATE, + NULL, (void **)mem_ctx); + if (ACPI_SUCCESS(status)) + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); + + return status; +} + +/** + * acpi_release_memory - Release any mappings done to a memory region + * @handle: Handle to namespace node + * @res: Memory resource + * @level: A level that terminates the search + * + * Walks through @handle and unmaps all SystemMemory Operation Regions that + * overlap with @res and that have already been activated (mapped). + * + * This is a helper that allows drivers to place special requirements on memory + * region that may overlap with operation regions, primarily allowing them to + * safely map the region as non-cached memory. + * + * The unmapped Operation Regions will be automatically remapped next time they + * are called, so the drivers do not need to do anything else. + */ +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level) +{ + if (!(res->flags & IORESOURCE_MEM)) + return AE_TYPE; + + return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, + acpi_deactivate_mem_region, NULL, res, NULL); +} +EXPORT_SYMBOL_GPL(acpi_release_memory); + /* * Let drivers know whether the resource checks are effective */ diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index e5ea1974d1e3..d1e26cb599bf 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, if (cpu_node) { cpu_node = acpi_find_processor_package_id(table, cpu_node, level, flag); - /* Only the first level has a guaranteed id */ - if (level == 0) + /* + * As per specification if the processor structure represents + * an actual processor, then ACPI processor ID must be valid. + * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID + * should be set if the UID is valid + */ + if (level == 0 || + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) return cpu_node->acpi_processor_id; return ACPI_PTR_DIFF(cpu_node, table); } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 2b16e7c8fff3..39b181d6bd0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" - depends on HAS_DMA depends on ARCH_HIGHBANK || COMPILE_TEST help This option enables support for the Calxeda Highbank SoC's @@ -408,7 +407,6 @@ config SATA_HIGHBANK config SATA_MV tristate "Marvell SATA support" - depends on HAS_DMA depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST select GENERIC_PHY diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 738fb22978dd..b2b9eba1d214 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ + { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) return strcmp(buf, dmi->driver_data) < 0; } +static bool ahci_broken_lpm(struct pci_dev *pdev) +{ + static const struct dmi_system_id sysids[] = { + /* Various Lenovo 50 series have LPM issues with older BIOSen */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"), + }, + .driver_data = "20180406", /* 1.31 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"), + }, + .driver_data = "20180420", /* 1.28 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"), + }, + .driver_data = "20180315", /* 1.33 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"), + }, + /* + * Note date based on release notes, 2.35 has been + * reported to be good, but I've been unable to get + * a hold of the reporter to get the DMI BIOS date. + * TODO: fix this. + */ + .driver_data = "20180310", /* 2.35 */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + int year, month, date; + char buf[9]; + + if (!dmi) + return false; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + return strcmp(buf, dmi->driver_data) < 0; +} + static bool ahci_broken_online(struct pci_dev *pdev) { #define ENCODE_BUSDEVFN(bus, slot, func) \ @@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } + if (ahci_broken_lpm(pdev)) { + pi.flags |= ATA_FLAG_NO_LPM; + dev_warn(&pdev->dev, + "BIOS update required for Link Power Management support\n"); + } + if (ahci_broken_suspend(pdev)) { hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; dev_warn(&pdev->dev, diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 0045dacd814b..72d90b4c3aae 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) * * Return: 0 on success; Error code otherwise. */ -int ahci_mvebu_stop_engine(struct ata_port *ap) +static int ahci_mvebu_stop_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); u32 tmp, port_fbs; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 965842a08743..09620c2ffa0f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, /* get the slot number from the message */ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; - if (pmp < EM_MAX_SLOTS) + if (pmp < EM_MAX_SLOTS) { + pmp = array_index_nospec(pmp, EM_MAX_SLOTS); emp = &pp->em_priv[pmp]; - else + } else { return -EINVAL; + } /* mask off the activity bits if we are in sw_activity * mode, user should turn off sw_activity before setting diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 27d15ed7fa3d..cc71c63df381 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->horkage |= ATA_HORKAGE_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) + dev->horkage |= ATA_HORKAGE_NOLPM; + if (dev->horkage & ATA_HORKAGE_NOLPM) { ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index d5412145d76d..01306c018398 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; - for (i = 0; i < ATA_MAX_QUEUE; i++) { - qc = __ata_qc_from_tag(ap, i); + ata_qc_for_each_raw(ap, qc, i) { if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) break; @@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh); static int ata_eh_nr_in_flight(struct ata_port *ap) { + struct ata_queued_cmd *qc; unsigned int tag; int nr = 0; /* count only non-internal commands */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - if (ata_tag_internal(tag)) - continue; - if (ata_qc_from_tag(ap, tag)) + ata_qc_for_each(ap, qc, tag) { + if (qc) nr++; } @@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) goto out_unlock; if (cnt == ap->fastdrain_cnt) { + struct ata_queued_cmd *qc; unsigned int tag; /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); + ata_qc_for_each(ap, qc, tag) { if (qc) qc->err_mask |= AC_ERR_TIMEOUT; } @@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap) static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { + struct ata_queued_cmd *qc; int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); @@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) ata_eh_set_pending(ap, 0); /* include internal tag in iteration */ - for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); - + ata_qc_for_each_with_internal(ap, qc, tag) { if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); @@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) return; /* has LLDD analyzed already? */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; @@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; + struct ata_queued_cmd *qc; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; int tag, nr_failed = 0, nr_quiet = 0; @@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) all_err_mask |= ehc->i.err_mask; - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link) continue; @@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; + struct ata_queued_cmd *qc; const char *frozen, *desc; char tries_buf[6] = ""; int tag, nr_failed = 0; @@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link) if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && @@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link) ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); #endif - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); + ata_qc_for_each_raw(ap, qc, tag) { struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; char data_buf[20] = ""; char cdb_buf[70] = ""; @@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, */ void ata_eh_finish(struct ata_port *ap) { + struct ata_queued_cmd *qc; int tag; /* retry or finish qcs */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6a91d04351d9..aad1b01447de 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) */ goto invalid_param_len; } - if (block > dev->n_sectors) - goto out_of_range; all = cdb[14] & 0x1; + if (all) { + /* + * Ignore the block address (zone ID) as defined by ZBC. + */ + block = 0; + } else if (block >= dev->n_sectors) { + /* + * Block must be a valid zone ID (a zone start LBA). + */ + fp = 2; + goto invalid_fld; + } if (ata_ncq_enabled(qc->dev) && ata_fpdma_zac_mgmt_out_supported(qc->dev)) { @@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) invalid_fld: ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); return 1; - out_of_range: - /* "Logical Block Address out of range" */ - ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00); - return 1; invalid_param_len: /* "Parameter list length error" */ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index b8d9cfc60374..4dc528bf8e85 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag, { /* We let libATA core do actual (queue) tag allocation */ - /* all non NCQ/queued commands should have tag#0 */ - if (ata_tag_internal(tag)) { - DPRINTK("mapping internal cmds to tag#0\n"); - return 0; - } - if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { DPRINTK("tag %d invalid : out of range\n", tag); return 0; @@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) /* Workaround for data length mismatch errata */ if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - qc = ata_qc_from_tag(ap, tag); + ata_qc_for_each_with_internal(ap, qc, tag) { if (qc && ata_is_atapi(qc->tf.protocol)) { u32 hcontrol; /* Set HControl[27] to clear error registers */ diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 10ae11aa1926..72c9b922a77b 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; struct nv_adma_port_priv *port0, *port1; - struct scsi_device *sdev0, *sdev1; struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long segment_boundary, flags; unsigned short sg_tablesize; @@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) port0 = ap->host->ports[0]->private_data; port1 = ap->host->ports[1]->private_data; - sdev0 = ap->host->ports[0]->link.device[0].sdev; - sdev1 = ap->host->ports[1]->link.device[0].sdev; if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { /* diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ff81a576347e..82532c299bb5 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev) skb_queue_head_init(&iadev->rx_dma_q); iadev->rx_free_desc_qhead = NULL; - iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); + iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL); if (!iadev->rx_open) { printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", dev->number); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d..2c288d1f42bb 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 4925af5c4cf0..9e8484189034 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev) } static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, - unsigned int index) + unsigned int index, bool power_on) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; @@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - genpd_lock(pd); - ret = genpd_power_on(pd, 0); - genpd_unlock(pd); + if (power_on) { + genpd_lock(pd); + ret = genpd_power_on(pd, 0); + genpd_unlock(pd); + } if (ret) genpd_remove_device(pd, dev); @@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev) "#power-domain-cells") != 1) return 0; - return __genpd_dev_pm_attach(dev, dev->of_node, 0); + return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); @@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); + ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); if (ret < 1) { device_unregister(genpd_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_set_active(genpd_dev); pm_runtime_enable(genpd_dev); + genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); return genpd_dev; } @@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); * power domain corresponding to a DT node's "required-opps" property. * * @dev: Device for which the performance-state needs to be found. - * @opp_node: DT node where the "required-opps" property is present. This can be + * @np: DT node where the "required-opps" property is present. This can be * the device node itself (if it doesn't have an OPP table) or a node * within the OPP table of a device (if device has an OPP table). - * @state: Pointer to return performance state. * * Returns performance state corresponding to the "required-opps" property of * a DT node. This calls platform specific genpd->opp_to_performance_state() @@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); * Returns performance state on success and 0 on failure. */ unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node) + struct device_node *np) { struct generic_pm_domain *genpd; struct dev_pm_opp *opp; @@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev, genpd_lock(genpd); - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node); + opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); if (IS_ERR(opp)) { dev_err(dev, "Failed to find required OPP: %ld\n", PTR_ERR(opp)); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index a47e4987ee46..d146fedc38bb 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long _drbd_start_io_acct(device, req); /* process discards always from our submitter thread */ - if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || - (bio_op(bio) & REQ_OP_DISCARD)) + if (bio_op(bio) == REQ_OP_WRITE_ZEROES || + bio_op(bio) == REQ_OP_DISCARD) goto queue_for_submitter_thread; if (rw == WRITE && req->private_bio && req->i.size diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1476cb3439f4..5e793dd7adfb 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) what = COMPLETED_OK; } - bio_put(req->private_bio); req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); + bio_put(bio); /* not req_mod(), we need irqsave here! */ spin_lock_irqsave(&device->resource->req_lock, flags); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d6b6f434fd4b..4cb1d1be3cfb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, arg = (unsigned long) compat_ptr(arg); case LOOP_SET_FD: case LOOP_CHANGE_FD: + case LOOP_SET_BLOCK_SIZE: err = lo_ioctl(bdev, mode, cmd, arg); break; default: diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 1cc29629d238..80d60f43db56 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata) const char *name; int nr_fck = 0, nr_ick = 0, i, error = 0; - ddata->clock_roles = devm_kzalloc(ddata->dev, - sizeof(*ddata->clock_roles) * + ddata->clock_roles = devm_kcalloc(ddata->dev, SYSC_MAX_CLOCKS, + sizeof(*ddata->clock_roles), GFP_KERNEL); if (!ddata->clock_roles) return -ENOMEM; @@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } - ddata->clocks = devm_kzalloc(ddata->dev, - sizeof(*ddata->clocks) * ddata->nr_clocks, + ddata->clocks = devm_kcalloc(ddata->dev, + ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); if (!ddata->clocks) return -ENOMEM; diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index 53fe633df1e8..c9bf2c219841 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -11,7 +11,7 @@ #include "agp.h" -static int alpha_core_agp_vm_fault(struct vm_fault *vmf) +static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index e50c29c97ca7..c69e39fdd02b 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) /* Address to map to */ pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); - aperturebase = tmp << 25; + aperturebase = (u64)tmp << 25; aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); enable_gart_translation(hammer, gatt_table); @@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); nb_order = (nb_order >> 1) & 7; pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); - nb_aper = nb_base << 25; + nb_aper = (u64)nb_base << 25; /* Northbridge seems to contain crap. Try the AGP bridge. */ diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index ad353be871bf..90ec010bffbd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err: - ipmi_unregister_smi(new_smi->intf); - new_smi->intf = NULL; + if (new_smi->intf) { + ipmi_unregister_smi(new_smi->intf); + new_smi->intf = NULL; + } kfree(init_name); diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index fbfc05e3f3d1..bb882ab161fe 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc) int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) { unsigned long flags; - int ret = 0; + int ret = -ENODATA; u8 status; spin_lock_irqsave(&kcs_bmc->lock, flags); - if (!kcs_bmc->running) { - kcs_force_abort(kcs_bmc); - ret = -ENODEV; - goto out_unlock; - } - - status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT); - - switch (status) { - case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT: - kcs_bmc_handle_cmd(kcs_bmc); - break; - - case KCS_STATUS_IBF: - kcs_bmc_handle_data(kcs_bmc); - break; + status = read_status(kcs_bmc); + if (status & KCS_STATUS_IBF) { + if (!kcs_bmc->running) + kcs_force_abort(kcs_bmc); + else if (status & KCS_STATUS_CMD_DAT) + kcs_bmc_handle_cmd(kcs_bmc); + else + kcs_bmc_handle_data(kcs_bmc); - default: - ret = -ENODATA; - break; + ret = 0; } -out_unlock: spin_unlock_irqrestore(&kcs_bmc->lock, flags); return ret; diff --git a/drivers/char/random.c b/drivers/char/random.c index a8fb0020ba5c..cd888d4ee605 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -402,7 +402,8 @@ static struct poolinfo { /* * Static global variables */ -static DECLARE_WAIT_QUEUE_HEAD(random_wait); +static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); +static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static struct fasync_struct *fasync; static DEFINE_SPINLOCK(random_ready_list_lock); @@ -721,8 +722,8 @@ retry: /* should we wake readers? */ if (entropy_bits >= random_read_wakeup_bits && - wq_has_sleeper(&random_wait)) { - wake_up_interruptible_poll(&random_wait, POLLIN); + wq_has_sleeper(&random_read_wait)) { + wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } /* If the input pool is getting full, send some @@ -1396,7 +1397,7 @@ retry: trace_debit_entropy(r->name, 8 * ibytes); if (ibytes && (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { - wake_up_interruptible_poll(&random_wait, POLLOUT); + wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } @@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes) if (nonblock) return -EAGAIN; - wait_event_interruptible(random_wait, + wait_event_interruptible(random_read_wait, ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits); if (signal_pending(current)) @@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) return ret; } -static struct wait_queue_head * -random_get_poll_head(struct file *file, __poll_t events) -{ - return &random_wait; -} - static __poll_t -random_poll_mask(struct file *file, __poll_t events) +random_poll(struct file *file, poll_table * wait) { - __poll_t mask = 0; + __poll_t mask; + poll_wait(file, &random_read_wait, wait); + poll_wait(file, &random_write_wait, wait); + mask = 0; if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) mask |= EPOLLIN | EPOLLRDNORM; if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) @@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on) const struct file_operations random_fops = { .read = random_read, .write = random_write, - .get_poll_head = random_get_poll_head, - .poll_mask = random_poll_mask, + .poll = random_poll, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, .llseek = noop_llseek, @@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. */ - wait_event_interruptible(random_wait, kthread_should_stop() || + wait_event_interruptible(random_write_wait, kthread_should_stop() || ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); mix_pool_bytes(poolp, buffer, count); credit_entropy_bits(poolp, entropy); diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index ae40cbe770f0..0bb25dd009d1 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/ obj-$(CONFIG_ARCH_STI) += st/ obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ -obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ +obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index aae62a5b8734..d1bbee19ed0f 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap) usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); if (IS_ERR(usb1)) { - if (PTR_ERR(usb0) == -EPROBE_DEFER) + if (PTR_ERR(usb1) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h index 6a42529d31a9..cc5614567a70 100644 --- a/drivers/clk/davinci/psc.h +++ b/drivers/clk/davinci/psc.h @@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data; #ifdef CONFIG_ARCH_DAVINCI_DM355 extern const struct davinci_psc_init_data dm355_psc_init_data; #endif -#ifdef CONFIG_ARCH_DAVINCI_DM356 +#ifdef CONFIG_ARCH_DAVINCI_DM365 extern const struct davinci_psc_init_data dm365_psc_init_data; #endif #ifdef CONFIG_ARCH_DAVINCI_DM644x diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index acaa14cfa25c..49454700f2e5 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -1,24 +1,24 @@ # SPDX-License-Identifier: GPL-2.0 # Common objects -lib-$(CONFIG_SUNXI_CCU) += ccu_common.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o -lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o +obj-y += ccu_common.o +obj-y += ccu_mmc_timing.o +obj-y += ccu_reset.o # Base clock types -lib-$(CONFIG_SUNXI_CCU) += ccu_div.o -lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o -lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o -lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o -lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o +obj-y += ccu_div.o +obj-y += ccu_frac.o +obj-y += ccu_gate.o +obj-y += ccu_mux.o +obj-y += ccu_mult.o +obj-y += ccu_phase.o +obj-y += ccu_sdm.o # Multi-factor clocks -lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o +obj-y += ccu_nk.o +obj-y += ccu_nkm.o +obj-y += ccu_nkmp.o +obj-y += ccu_nm.o +obj-y += ccu_mp.o # SoC support obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o @@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o - -# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our -# case, we want to use that goal, but even though lib.a will be properly -# generated, it will not be linked in, eventually resulting in a linker error -# for missing symbols. -# -# We can work around that by explicitly adding lib.a to the obj-y goal. This is -# an undocumented behaviour, but works well for now. -obj-$(CONFIG_SUNXI_CCU) += lib.a diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57cb2f00fc07..d8c7f5750cdb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type, clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->name = "arch_mem_timer"; clk->rating = 400; - clk->cpumask = cpu_all_mask; + clk->cpumask = cpu_possible_mask; if (arch_timer_mem_use_virtual) { clk->set_state_shutdown = arch_timer_shutdown_virt_mem; clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ece120da3353..3c3971256130 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2394,6 +2394,18 @@ static bool __init intel_pstate_no_acpi_pss(void) return true; } +static bool __init intel_pstate_no_acpi_pcch(void) +{ + acpi_status status; + acpi_handle handle; + + status = acpi_get_handle(NULL, "\\_SB", &handle); + if (ACPI_FAILURE(status)) + return true; + + return !acpi_has_method(handle, "PCCH"); +} + static bool __init intel_pstate_has_acpi_ppc(void) { int i; @@ -2453,7 +2465,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) switch (plat_info[idx].data) { case PSS: - return intel_pstate_no_acpi_pss(); + if (!intel_pstate_no_acpi_pss()) + return false; + + return intel_pstate_no_acpi_pcch(); case PPC: return intel_pstate_has_acpi_ppc() && !force_load; } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 3f0ce2ae35ee..0c56c9759672 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void) { int ret; + /* Skip initialization if another cpufreq driver is there. */ + if (cpufreq_get_current_driver()) + return 0; + if (acpi_disabled) return 0; diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index 01bddacf5c3b..29389accf3e9 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -87,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) int ret; cpu_dev = get_cpu_device(0); - if (NULL == cpu_dev) - ret = -ENODEV; + if (!cpu_dev) + return -ENODEV; msm8996_version = qcom_cpufreq_kryo_get_msm_id(); if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { @@ -97,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) } np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); - if (IS_ERR(np)) - return PTR_ERR(np); + if (!np) + return -ENOENT; ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); if (!ret) { diff --git a/drivers/dax/device.c b/drivers/dax/device.c index de2f8297a210..108c37fca782 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, /* prevent private mappings from being established */ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { - dev_info(dev, "%s: %s: fail, attempted private mapping\n", + dev_info_ratelimited(dev, + "%s: %s: fail, attempted private mapping\n", current->comm, func); return -EINVAL; } mask = dax_region->align - 1; if (vma->vm_start & mask || vma->vm_end & mask) { - dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", + dev_info_ratelimited(dev, + "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", current->comm, func, vma->vm_start, vma->vm_end, mask); return -EINVAL; @@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV && (vma->vm_flags & VM_DONTCOPY) == 0) { - dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", + dev_info_ratelimited(dev, + "%s: %s: fail, dax range requires MADV_DONTFORK\n", current->comm, func); return -EINVAL; } if (!vma_is_dax(vma)) { - dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", + dev_info_ratelimited(dev, + "%s: %s: fail, vma is not DAX capable\n", current->comm, func); return -EINVAL; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 903d9c473749..45276abf03aa 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) { struct dax_device *dax_dev; bool dax_enabled = false; + struct request_queue *q; pgoff_t pgoff; int err, id; void *kaddr; @@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) return false; } + q = bdev_get_queue(bdev); + if (!q || !blk_queue_dax(q)) { + pr_debug("%s: error: request queue doesn't support dax\n", + bdevname(bdev, buf)); + return false; + } + err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); if (err) { pr_debug("%s: error: unaligned partition for dax\n", diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index fa31cccbe04f..6bfa217ed6d0 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct k3_dma_dev *d = ofdma->of_dma_data; unsigned int request = dma_spec->args[0]; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; return dma_get_slave_channel(&(d->chans[request].vc.chan)); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index defcdde4d358..de0957fe9668 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->src_addr_widths = PL330_DMA_BUSWIDTHS; pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? 1 : PL330_MAX_BURST); diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 9b5ca8691f27..a4a931ddf6f6 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + if (__dma_omap15xx(od->plat->dma_attr)) + od->ddev.residue_granularity = + DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + else + od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index dd4edd8f22ce..7fa793672a7a 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev, mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, &altera_cvp_ops, conf); - if (!mgr) - return -ENOMEM; + if (!mgr) { + ret = -ENOMEM; + goto err_unmap; + } pci_set_drvdata(pdev, mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a59c07590cee..7dcbac8af9a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -190,6 +190,7 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; +struct amdgpu_atif; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch { /* * ACPI */ -struct amdgpu_atif_notification_cfg { - bool enabled; - int command_code; -}; - -struct amdgpu_atif_notifications { - bool display_switch; - bool expansion_mode_change; - bool thermal_state; - bool forced_power_state; - bool system_power_state; - bool display_conf_change; - bool px_gfx_switch; - bool brightness_change; - bool dgpu_display_event; -}; - -struct amdgpu_atif_functions { - bool system_params; - bool sbios_requests; - bool select_active_disp; - bool lid_state; - bool get_tv_standard; - bool set_tv_standard; - bool get_panel_expansion_mode; - bool set_panel_expansion_mode; - bool temperature_change; - bool graphics_device_types; -}; - -struct amdgpu_atif { - struct amdgpu_atif_notifications notifications; - struct amdgpu_atif_functions functions; - struct amdgpu_atif_notification_cfg notification_cfg; - struct amdgpu_encoder *encoder_for_bl; -}; - struct amdgpu_atcs_functions { bool get_ext_state; bool pcie_perf_req; @@ -1466,7 +1430,7 @@ struct amdgpu_device { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; #endif - struct amdgpu_atif atif; + struct amdgpu_atif *atif; struct amdgpu_atcs atcs; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ @@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; static inline bool amdgpu_has_atpx(void) { return false; } #endif +#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void); +#else +static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } +#endif + /* * KMS */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index f4c474a95875..71efcf38f11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -57,6 +57,10 @@ #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 +#define ACP_BT_PLAY_REGS_START 0x14970 +#define ACP_BT_PLAY_REGS_END 0x14a24 +#define ACP_BT_COMP1_REG_OFFSET 0xac +#define ACP_BT_COMP2_REG_OFFSET 0xa8 #define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_CONFIG_REG 0x51ca @@ -77,7 +81,7 @@ #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF #define ACP_TIMEOUT_LOOP 0x000000FF -#define ACP_DEVS 3 +#define ACP_DEVS 4 #define ACP_SRC_ID 162 enum { @@ -316,14 +320,13 @@ static int acp_hw_init(void *handle) if (adev->acp.acp_cell == NULL) return -ENOMEM; - adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); - + adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { kfree(adev->acp.acp_cell); return -ENOMEM; } - i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); + i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); @@ -358,6 +361,20 @@ static int acp_hw_init(void *handle) i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; + i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + break; + } + + i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; + i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; + i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; + adev->acp.acp_res[0].name = "acp2x_dma"; adev->acp.acp_res[0].flags = IORESOURCE_MEM; adev->acp.acp_res[0].start = acp_base; @@ -373,13 +390,18 @@ static int acp_hw_init(void *handle) adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; - adev->acp.acp_res[3].name = "acp2x_dma_irq"; - adev->acp.acp_res[3].flags = IORESOURCE_IRQ; - adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); - adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; + adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; + adev->acp.acp_res[3].flags = IORESOURCE_MEM; + adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; + adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; + + adev->acp.acp_res[4].name = "acp2x_dma_irq"; + adev->acp.acp_res[4].flags = IORESOURCE_IRQ; + adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); + adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; - adev->acp.acp_cell[0].num_resources = 4; + adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); @@ -396,6 +418,12 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); + adev->acp.acp_cell[3].name = "designware-i2s"; + adev->acp.acp_cell[3].num_resources = 1; + adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; + adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; + adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) @@ -451,7 +479,6 @@ static int acp_hw_init(void *handle) val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8fa850a070e0..0d8c3fc6eace 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -34,6 +34,45 @@ #include "amd_acpi.h" #include "atom.h" +struct amdgpu_atif_notification_cfg { + bool enabled; + int command_code; +}; + +struct amdgpu_atif_notifications { + bool display_switch; + bool expansion_mode_change; + bool thermal_state; + bool forced_power_state; + bool system_power_state; + bool display_conf_change; + bool px_gfx_switch; + bool brightness_change; + bool dgpu_display_event; +}; + +struct amdgpu_atif_functions { + bool system_params; + bool sbios_requests; + bool select_active_disp; + bool lid_state; + bool get_tv_standard; + bool set_tv_standard; + bool get_panel_expansion_mode; + bool set_panel_expansion_mode; + bool temperature_change; + bool graphics_device_types; +}; + +struct amdgpu_atif { + acpi_handle handle; + + struct amdgpu_atif_notifications notifications; + struct amdgpu_atif_functions functions; + struct amdgpu_atif_notification_cfg notification_cfg; + struct amdgpu_encoder *encoder_for_bl; +}; + /* Call the ATIF method */ /** @@ -46,8 +85,9 @@ * Executes the requested ATIF function (all asics). * Returns a pointer to the acpi output buffer. */ -static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, - struct acpi_buffer *params) +static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, + int function, + struct acpi_buffer *params) { acpi_status status; union acpi_object atif_arg_elements[2]; @@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, atif_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); + status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, + &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas * (all asics). * returns 0 on success, error on failure. */ -static int amdgpu_atif_verify_interface(acpi_handle handle, - struct amdgpu_atif *atif) +static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif) { union acpi_object *info; struct atif_verify_interface output; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; @@ -176,6 +216,35 @@ out: return err; } +static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) +{ + acpi_handle handle = NULL; + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; + acpi_status status; + + /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only + * systems, ATIF is in the dGPU's namespace. + */ + status = acpi_get_handle(dhandle, "ATIF", &handle); + if (ACPI_SUCCESS(status)) + goto out; + + if (amdgpu_has_atpx()) { + status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", + &handle); + if (ACPI_SUCCESS(status)) + goto out; + } + + DRM_DEBUG_DRIVER("No ATIF handle found\n"); + return NULL; +out: + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); + return handle; +} + /** * amdgpu_atif_get_notification_params - determine notify configuration * @@ -188,15 +257,16 @@ out: * where n is specified in the result if a notifier is used. * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_notification_params(acpi_handle handle, - struct amdgpu_atif_notification_cfg *n) +static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif) { union acpi_object *info; + struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; struct atif_system_params params; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, + NULL); if (!info) { err = -EIO; goto out; @@ -250,14 +320,15 @@ out: * (all asics). * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_sbios_requests(acpi_handle handle, - struct atif_sbios_requests *req) +static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, + struct atif_sbios_requests *req) { union acpi_object *info; size_t size; int count = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, + NULL); if (!info) return -EIO; @@ -290,11 +361,10 @@ out: * Returns NOTIFY code */ static int amdgpu_atif_handler(struct amdgpu_device *adev, - struct acpi_bus_event *event) + struct acpi_bus_event *event) { - struct amdgpu_atif *atif = &adev->atif; + struct amdgpu_atif *atif = adev->atif; struct atif_sbios_requests req; - acpi_handle handle; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; - if (!atif->notification_cfg.enabled || + if (!atif || + !atif->notification_cfg.enabled || event->type != atif->notification_cfg.command_code) /* Not our event */ return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = ACPI_HANDLE(&adev->pdev->dev); - count = amdgpu_atif_get_sbios_requests(handle, &req); + count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) return NOTIFY_DONE; @@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb, */ int amdgpu_acpi_init(struct amdgpu_device *adev) { - acpi_handle handle; - struct amdgpu_atif *atif = &adev->atif; + acpi_handle handle, atif_handle; + struct amdgpu_atif *atif; struct amdgpu_atcs *atcs = &adev->atcs; int ret; @@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); } + /* Probe for ATIF, and initialize it if found */ + atif_handle = amdgpu_atif_probe_handle(handle); + if (!atif_handle) + goto out; + + atif = kzalloc(sizeof(*atif), GFP_KERNEL); + if (!atif) { + DRM_WARN("Not enough memory to initialize ATIF\n"); + goto out; + } + atif->handle = atif_handle; + /* Call the ATIF method */ - ret = amdgpu_atif_verify_interface(handle, atif); + ret = amdgpu_atif_verify_interface(atif); if (ret) { DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); + kfree(atif); goto out; } + adev->atif = atif; if (atif->notifications.brightness_change) { struct drm_encoder *tmp; @@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } if (atif->functions.system_params) { - ret = amdgpu_atif_get_notification_params(handle, - &atif->notification_cfg); + ret = amdgpu_atif_get_notification_params(atif); if (ret) { DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", ret); @@ -720,4 +803,6 @@ out: void amdgpu_acpi_fini(struct amdgpu_device *adev) { unregister_acpi_notifier(&adev->acpi_nb); + if (adev->atif) + kfree(adev->atif); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index daa06e7c5bb7..ca8bf1c9a98e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } +#if defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void) { + return amdgpu_atpx_priv.dhandle; +} +#endif + /** * amdgpu_atpx_call - call an ATPX method * @@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82312a7bc6ad..9c85a90be293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, r = amdgpu_bo_vm_update_pte(p); if (r) return r; + + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); + if (r) + return r; } return amdgpu_cs_sync_rings(p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e5284e6c028..2c5f093e79e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2747,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) return r; + /* Make sure IB tests flushed */ + flush_delayed_work(&adev->late_init_work); + /* blat the mode back in */ if (fbcon) { if (!amdgpu_device_has_dc_support(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 39ec6b8890a1..e74d620d9699 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.inst[ring->me].ring) { + if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f70eeed9ed76..7aaa263ad8c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; + /* wrap the last IB with fence */ + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, + fence_flags | AMDGPU_FENCE_FLAG_64BIT); + } + r = amdgpu_fence_emit(ring, f, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); @@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->insert_end) ring->funcs->insert_end(ring); - /* wrap the last IB with fence */ - if (job && job->uf_addr) { - amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, - fence_flags | AMDGPU_FENCE_FLAG_64BIT); - } - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b455da487782..fc818b4d849c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 127e87b470ff..1b4ad9b2a755 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; - unsigned version_major, version_minor, family_id; + unsigned char fw_check; int r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); @@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); - family_id = le32_to_cpu(hdr->ucode_version) & 0xff; - version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; - version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", - version_major, version_minor, family_id); + /* Bit 20-23, it is encode major and non-zero for new naming convention. + * This field is part of version minor and DRM_DISABLED_FLAG in old naming + * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG + * is zero in old naming convention, this field is always zero so far. + * These four bits are used to tell which naming convention is present. + */ + fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; + if (fw_check) { + unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; + + fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; + enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; + enc_major = fw_check; + dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; + vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; + DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", + enc_major, enc_minor, dec_ver, vep, fw_rev); + } else { + unsigned int version_major, version_minor, family_id; + + family_id = le32_to_cpu(hdr->ucode_version) & 0xff; + version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; + version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; + DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", + version_major, version_minor, family_id); + } bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b0eb2f537392..fdcb498f6d19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, return; list_add_tail(&base->bo_list, &bo->va); + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&base->vm_status, &vm->relocated); + if (bo->tbo.resv != vm->root.base.bo->tbo.resv) return; @@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - list_move(&entry->base.vm_status, &vm->relocated); } if (level < AMDGPU_VM_PTB) { @@ -1463,7 +1465,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t count; max_entries = min(max_entries, 16ull * 1024ull); - for (count = 1; count < max_entries; ++count) { + for (count = 1; + count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); + ++count) { uint64_t idx = pfn + count; if (pages_addr[idx] != @@ -1476,7 +1480,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, dma_addr = pages_addr; } else { addr = pages_addr[pfn]; - max_entries = count; + max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); } } else if (flags & AMDGPU_PTE_VALID) { @@ -1491,7 +1495,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (r) return r; - pfn += last - start + 1; + pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); if (nodes && nodes->size == pfn) { pfn = 0; ++nodes; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 0999c843f623..a71b97519cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { .emit_frame_size = 4 + /* vce_v3_0_emit_pipeline_sync */ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ - .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, .test_ring = amdgpu_vce_ring_test_ring, @@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 6 + /* vce_v3_0_emit_vm_flush */ 4 + /* vce_v3_0_emit_pipeline_sync */ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ - .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ .emit_ib = vce_v3_0_ring_emit_ib, .emit_vm_flush = vce_v3_0_emit_vm_flush, .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f9add85157e7..770c6b24be0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } +static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) +{ + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + + timing_out->display_color_depth--; +} + +static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) +{ + int normalized_clk; + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + do { + normalized_clk = timing_out->pix_clk_khz; + /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ + if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) + normalized_clk /= 2; + /* Adjusting pix clock following on HDMI spec based on colour depth */ + switch (timing_out->display_color_depth) { + case COLOR_DEPTH_101010: + normalized_clk = (normalized_clk * 30) / 24; + break; + case COLOR_DEPTH_121212: + normalized_clk = (normalized_clk * 36) / 24; + break; + case COLOR_DEPTH_161616: + normalized_clk = (normalized_clk * 48) / 24; + break; + default: + return; + } + if (normalized_clk <= info->max_tmds_clock) + return; + reduce_mode_colour_depth(timing_out); + + } while (timing_out->display_color_depth > COLOR_DEPTH_888); + +} /*****************************************************************************/ static void @@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, const struct drm_connector *connector) { struct dc_crtc_timing *timing_out = &stream->timing; + const struct drm_display_info *info = &connector->display_info; memset(timing_out, 0, sizeof(struct dc_crtc_timing)); @@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->v_border_top = 0; timing_out->v_border_bottom = 0; /* TODO: un-hardcode */ - - if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) + if (drm_mode_is_420_only(info, mode_in) + && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else @@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + adjust_colour_depth_from_display_info(timing_out, info); } static void fill_audio_info(struct audio_info *audio_info, @@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, if (acrtc->base.state->event) prepare_flip_isr(acrtc); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; surface_updates->flip_addr = &addr; - dc_commit_updates_for_stream(adev->dm.dc, surface_updates, 1, @@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, __func__, addr.address.grph.addr.high_part, addr.address.grph.addr.low_part); - - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } /* @@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; drm_atomic_helper_update_legacy_modeset_state(dev, state); @@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); bool modeset_needed; + if (old_crtc_state->active && !new_crtc_state->active) + crtc_disable_count++; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); modeset_needed = modeset_required( @@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * so we can put the GPU into runtime suspend if we're not driving any * displays anymore */ + for (i = 0; i < crtc_disable_count; i++) + pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (old_crtc_state->active && !new_crtc_state->active) - pm_runtime_put_autosuspend(dev->dev); - } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 4304d9e408b8..ace9ad578ca0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? I2C_MOT_TRUE : I2C_MOT_FALSE; enum ddc_result res; - uint32_t read_bytes = msg->size; + ssize_t read_bytes; if (WARN_ON(msg->size > 16)) return -E2BIG; switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_READ: - res = dal_ddc_service_read_dpcd_data( + read_bytes = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, false, I2C_MOT_UNDEF, msg->address, msg->buffer, - msg->size, - &read_bytes); - break; + msg->size); + return read_bytes; case DP_AUX_NATIVE_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, msg->size); break; case DP_AUX_I2C_READ: - res = dal_ddc_service_read_dpcd_data( + read_bytes = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, true, mot, msg->address, msg->buffer, - msg->size, - &read_bytes); - break; + msg->size); + return read_bytes; case DP_AUX_I2C_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, r == DDC_RESULT_SUCESSFULL); #endif - if (res != DDC_RESULT_SUCESSFULL) - return -EIO; - return read_bytes; + return msg->size; } static enum drm_connector_status diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 5a3346124a01..5a2e952c5bea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency( DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); for (i = 0; i < clk_level_info->num_levels; i++) { - DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); - clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz); + /* translate 10kHz to kHz */ + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10; clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index ae48d603ebd6..49c2face1e7a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data( return ret; } -enum ddc_result dal_ddc_service_read_dpcd_data( +ssize_t dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len, - uint32_t *read) + uint32_t len) { struct aux_payload read_payload = { .i2c_over_aux = i2c, @@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data( .mot = mot }; - *read = 0; - if (len > DEFAULT_AUX_MAX_DATA_SIZE) { BREAK_TO_DEBUGGER(); return DDC_RESULT_FAILED_INVALID_OPERATION; @@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data( ddc->ctx->i2caux, ddc->ddc_pin, &command)) { - *read = command.payloads->length; - return DDC_RESULT_SUCESSFULL; + return (ssize_t)command.payloads->length; } return DDC_RESULT_FAILED_OPERATION; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7857cb42b3e6..bdd121485cbc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1767,12 +1767,10 @@ static void dp_test_send_link_training(struct dc_link *link) dp_retrain_link_dp_test(link, &link_settings, false); } -/* TODO hbr2 compliance eye output is unstable +/* TODO Raven hbr2 compliance eye output is unstable * (toggling on and off) with debugger break * This caueses intermittent PHY automation failure * Need to look into the root cause */ -static uint8_t force_tps4_for_cp2520 = 1; - static void dp_test_send_phy_test_pattern(struct dc_link *link) { union phy_test_pattern dpcd_test_pattern; @@ -1832,13 +1830,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) break; case PHY_TEST_PATTERN_CP2520_1: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; case PHY_TEST_PATTERN_CP2520_2: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9cfde0ccf4e9..53c71296f3dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -76,6 +76,7 @@ struct dc_caps { bool is_apu; bool dual_link_dvi; bool post_blend_color_processing; + bool force_dp_tps4_for_cp2520; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index b235a75355b8..bae752332a9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = { .mem_input_is_flip_pending = dce_mi_is_flip_pending }; +static struct mem_input_funcs dce112_mi_funcs = { + .mem_input_program_display_marks = dce112_mi_program_display_marks, + .allocate_mem_input = dce_mi_allocate_dmif, + .free_mem_input = dce_mi_free_dmif, + .mem_input_program_surface_flip_and_addr = + dce_mi_program_surface_flip_and_addr, + .mem_input_program_pte_vm = dce_mi_program_pte_vm, + .mem_input_program_surface_config = + dce_mi_program_surface_config, + .mem_input_is_flip_pending = dce_mi_is_flip_pending +}; + +static struct mem_input_funcs dce120_mi_funcs = { + .mem_input_program_display_marks = dce120_mi_program_display_marks, + .allocate_mem_input = dce_mi_allocate_dmif, + .free_mem_input = dce_mi_free_dmif, + .mem_input_program_surface_flip_and_addr = + dce_mi_program_surface_flip_and_addr, + .mem_input_program_pte_vm = dce_mi_program_pte_vm, + .mem_input_program_surface_config = + dce_mi_program_surface_config, + .mem_input_is_flip_pending = dce_mi_is_flip_pending +}; void dce_mem_input_construct( struct dce_mem_input *dce_mi, @@ -769,7 +792,7 @@ void dce112_mem_input_construct( const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); - dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks; + dce_mi->base.funcs = &dce112_mi_funcs; } void dce120_mem_input_construct( @@ -781,5 +804,5 @@ void dce120_mem_input_construct( const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); - dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; + dce_mi->base.funcs = &dce120_mi_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 38ec0d609297..344dd2e69e7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -678,9 +678,22 @@ bool dce100_validate_bandwidth( struct dc *dc, struct dc_state *context) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw.dce.dispclk_khz = 681000; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + } else { + context->bw.dce.dispclk_khz = 0; + context->bw.dce.yclk_khz = 0; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index df5cb2d1d164..34dac84066a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1027,6 +1027,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ + dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 30b3a08b91be..090b7a8dd67b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); -enum ddc_result dal_ddc_service_read_dpcd_data( +ssize_t dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len, - uint32_t *read); + uint32_t len); enum ddc_result dal_ddc_service_write_dpcd_data( struct ddc_service *ddc, diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 092d800b703a..33b4de4ad66e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1 uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; - uint32_t boardreserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t boardreserved[9]; }; /* diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 5325661fedff..d27c1c9df286 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI return 0; } +static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_2 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + +static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_1 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values) { - struct atom_firmware_info_v3_1 *info = NULL; + struct atom_firmware_info_v3_2 *fwinfo_3_2; + struct atom_firmware_info_v3_1 *fwinfo_3_1; + struct atom_common_table_header *info = NULL; uint16_t ix; ix = GetIndexIntoMasterDataTable(firmwareinfo); - info = (struct atom_firmware_info_v3_1 *) + info = (struct atom_common_table_header *) smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); @@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, return -EINVAL; } - boot_values->ulRevision = info->firmware_revision; - boot_values->ulGfxClk = info->bootup_sclk_in10khz; - boot_values->ulUClk = info->bootup_mclk_in10khz; - boot_values->usVddc = info->bootup_vddc_mv; - boot_values->usVddci = info->bootup_vddci_mv; - boot_values->usMvddc = info->bootup_mvddc_mv; - boot_values->usVddGfx = info->bootup_vddgfx_mv; - boot_values->ucCoolingID = info->coolingsolution_id; - boot_values->ulSocClk = 0; - boot_values->ulDCEFClk = 0; + if ((info->format_revision == 3) && (info->content_revision == 2)) { + fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr, + boot_values, fwinfo_3_2); + } else if ((info->format_revision == 3) && (info->content_revision == 1)) { + fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr, + boot_values, fwinfo_3_1); + } else { + pr_info("Fw info table revision does not match!"); + return -EINVAL; + } return 0; } @@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; + param->Vr2_I2C_address = info->Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe10aa4db5e6..22e21668c93a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulUClk; uint32_t ulSocClk; uint32_t ulDCEFClk; + uint32_t ulEClk; + uint32_t ulVClk; + uint32_t ulDClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters uint8_t acggfxclkspreadenabled; uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; + + uint8_t Vr2_I2C_address; }; int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 782e2098824d..c98e5de777cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disallowed_features = 0x0; data->registry_data.od_state_in_dc_support = 0; + data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; data->registry_data.log_avfs_param = 0; @@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; + data->vbios_boot_state.eclock = boot_up_values.ulEClk; + data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.vclock = boot_up_values.ulVClk; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index e81ded1ec198..49b38df8c7f2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -167,6 +167,9 @@ struct vega12_vbios_boot_state { uint32_t mem_clock; uint32_t soc_clock; uint32_t dcef_clock; + uint32_t eclock; + uint32_t dclock; + uint32_t vclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 888ddca902d8..29914700ee82 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; } + ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index 2f8a3b983cce..b08526fd1619 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -499,7 +499,10 @@ typedef struct { uint8_t AcgGfxclkSpreadPercent; uint16_t AcgGfxclkSpreadFreq; - uint32_t BoardReserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t BoardReserved[9]; uint32_t MmHubPadding[7]; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index d644a9bb9078..9f407c48d4f0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; + uint32_t num_entries = 0; if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); @@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) } toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->num_entries = 0; toc->structure_version = 1; PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + UCODE_ID_RLC_G, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_CE, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_PFP, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_ME, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + UCODE_ID_SDMA0, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + UCODE_ID_SDMA1, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); if (!hwmgr->not_vf) PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), + UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); + toc->num_entries = num_entries; smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 8d20faa198cf..0a788d76ed5f 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm) static void malidp_fini(struct drm_device *drm) { - drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); } @@ -646,6 +645,7 @@ vblank_fail: malidp_de_irq_fini(drm); drm->irq_enabled = false; irq_init_fail: + drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); bind_fail: of_node_put(malidp->crtc.port); @@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev) malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); drm->irq_enabled = false; + drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index d789b46dc817..069783e715f1 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .vsync_irq = MALIDP500_DE_IRQ_VSYNC, }, .se_irq_map = { - .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE | + MALIDP500_SE_IRQ_GLOBAL, .vsync_irq = 0, }, .dc_irq_map = { diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 7a44897c50fe..29409a65d864 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -23,6 +23,7 @@ /* Layer specific register offsets */ #define MALIDP_LAYER_FORMAT 0x000 +#define LAYER_FORMAT_MASK 0x3f #define MALIDP_LAYER_CONTROL 0x004 #define LAYER_ENABLE (1 << 0) #define LAYER_FLOWCFG_MASK 7 @@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, if (state->rotation & MALIDP_ROTATED_MASK) { int val; - val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h, - state->crtc_w, + val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, + state->crtc_h, fb->format->format); if (val < 0) return val; @@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane, dest_w = plane->state->crtc_w; dest_h = plane->state->crtc_h; - malidp_hw_write(mp->hwdev, ms->format, mp->layer->base); + val = malidp_hw_read(mp->hwdev, mp->layer->base); + val = (val & ~LAYER_FORMAT_MASK) | ms->format; + malidp_hw_write(mp->hwdev, val, mp->layer->base); for (i = 0; i < ms->n_planes; i++) { /* calculate the offset for the layer's plane registers */ diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 03eeee11dd5b..42a40daff132 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg) u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); /* - * This is rediculous - rather than writing bits to clear, we - * have to set the actual status register value. This is racy. + * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR + * is set. Writing has some other effect to acknowledge the IRQ - + * without this, we only get a single IRQ. */ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); @@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc, static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); return 0; } static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); } static const struct drm_crtc_funcs armada_crtc_funcs = { @@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h index 27319a8335e2..345dc4d0851e 100644 --- a/drivers/gpu/drm/armada/armada_hw.h +++ b/drivers/gpu/drm/armada/armada_hw.h @@ -160,6 +160,7 @@ enum { CFG_ALPHAM_GRA = 0x1 << 16, CFG_ALPHAM_CFG = 0x2 << 16, CFG_ALPHA_MASK = 0xff << 8, +#define CFG_ALPHA(x) ((x) << 8) CFG_PIXCMD_MASK = 0xff, }; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index c391955009d6..afa7ded3ae31 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -28,6 +28,7 @@ struct armada_ovl_plane_properties { uint16_t contrast; uint16_t saturation; uint32_t colorkey_mode; + uint32_t colorkey_enable; }; struct armada_ovl_plane { @@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop, writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); spin_lock_irq(&dcrtc->irq_lock); - armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, - CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, - dcrtc->base + LCD_SPU_DMA_CTRL1); - - armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); + armada_updatel(prop->colorkey_mode, + CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, + dcrtc->base + LCD_SPU_DMA_CTRL1); + if (dcrtc->variant->has_spu_adv_reg) + armada_updatel(prop->colorkey_enable, + ADV_GRACOLORKEY | ADV_VIDCOLORKEY, + dcrtc->base + LCD_SPU_ADV_REG); spin_unlock_irq(&dcrtc->irq_lock); } @@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane, dplane->prop.colorkey_vb |= K2B(val); update_attr = true; } else if (property == priv->colorkey_mode_prop) { - dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; - dplane->prop.colorkey_mode |= CFG_CKMODE(val); + if (val == CKMODE_DISABLE) { + dplane->prop.colorkey_mode = + CFG_CKMODE(CKMODE_DISABLE) | + CFG_ALPHAM_CFG | CFG_ALPHA(255); + dplane->prop.colorkey_enable = 0; + } else { + dplane->prop.colorkey_mode = + CFG_CKMODE(val) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; + } update_attr = true; } else if (property == priv->brightness_prop) { dplane->prop.brightness = val - 256; @@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) dplane->prop.colorkey_yr = 0xfefefe00; dplane->prop.colorkey_ug = 0x01010100; dplane->prop.colorkey_vb = 0x01010100; - dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); + dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; dplane->prop.brightness = 0; dplane->prop.contrast = 0x4000; dplane->prop.saturation = 0x4000; diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 250effa0e6b8..a6e8f4591e63 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -14,6 +14,7 @@ #include <drm/bridge/mhl.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <linux/clk.h> #include <linux/delay.h> @@ -72,9 +73,7 @@ struct sii8620 { struct regulator_bulk_data supplies[2]; struct mutex lock; /* context lock, protects fields below */ int error; - int pixel_clock; unsigned int use_packed_pixel:1; - int video_code; enum sii8620_mode mode; enum sii8620_sink_type sink_type; u8 cbus_status; @@ -82,7 +81,6 @@ struct sii8620 { u8 xstat[MHL_XDS_SIZE]; u8 devcap[MHL_DCAP_SIZE]; u8 xdevcap[MHL_XDC_SIZE]; - u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; bool feature_complete; bool devcap_read; bool sink_detected; @@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx) static void sii8620_set_format(struct sii8620 *ctx) { + u8 out_fmt; + if (sii8620_is_mhl3(ctx)) { sii8620_setbits(ctx, REG_M3_P0CTRL, BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, ctx->use_packed_pixel ? ~0 : 0); } else { + if (ctx->use_packed_pixel) { + sii8620_write_seq_static(ctx, + REG_VID_MODE, BIT_VID_MODE_M1080P, + REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, + REG_MHLTX_CTL6, 0x60 + ); + } else { sii8620_write_seq_static(ctx, REG_VID_MODE, 0, REG_MHL_TOP_CTL, 1, REG_MHLTX_CTL6, 0xa0 ); + } } + if (ctx->use_packed_pixel) + out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); + else + out_fmt = VAL_TPI_FORMAT(RGB, FULL); + sii8620_write_seq(ctx, REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), - REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), + REG_TPI_OUTPUT, out_fmt, ); } @@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame, return frm_len; } -static void sii8620_set_infoframes(struct sii8620 *ctx) +static void sii8620_set_infoframes(struct sii8620 *ctx, + struct drm_display_mode *mode) { struct mhl3_infoframe mhl_frm; union hdmi_infoframe frm; u8 buf[31]; int ret; + ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, + mode, + true); + if (ctx->use_packed_pixel) + frm.avi.colorspace = HDMI_COLORSPACE_YUV422; + + if (!ret) + ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); + if (ret > 0) + sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); + if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3, - ARRAY_SIZE(ctx->avif) - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) return; } - ret = hdmi_avi_infoframe_init(&frm.avi); - frm.avi.colorspace = HDMI_COLORSPACE_YUV422; - frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; - frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9; - frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709; - frm.avi.video_code = ctx->video_code; - if (!ret) - ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); - if (ret > 0) - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) static void sii8620_start_video(struct sii8620 *ctx) { + struct drm_display_mode *mode = + &ctx->bridge.encoder->crtc->state->adjusted_mode; + if (!sii8620_is_mhl3(ctx)) sii8620_stop_video(ctx); @@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx) sii8620_set_format(ctx); if (!sii8620_is_mhl3(ctx)) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); + u8 link_mode = MHL_DST_LM_PATH_ENABLED; + + if (ctx->use_packed_pixel) + link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode |= MHL_DST_LM_CLK_MODE_NORMAL; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); sii8620_set_auto_zone(ctx); } else { static const struct { @@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx) MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, }; u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; - int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); + int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3); int i; for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) @@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx) clk_spec[i].link_rate); } - sii8620_set_infoframes(ctx); + sii8620_set_infoframes(ctx, mode); } static void sii8620_disable_hpd(struct sii8620 *ctx) @@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx) static void sii8620_status_changed_path(struct sii8620 *ctx) { - if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL - | MHL_DST_LM_PATH_ENABLED); - } else { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL); - } + u8 link_mode; + + if (ctx->use_packed_pixel) + link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode = MHL_DST_LM_CLK_MODE_NORMAL; + + if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) + link_mode |= MHL_DST_LM_PATH_ENABLED; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + link_mode); } static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) @@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge, mutex_lock(&ctx->lock); ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); - ctx->video_code = drm_match_cea_mode(adjusted_mode); - ctx->pixel_clock = adjusted_mode->clock; mutex_unlock(&ctx->lock); diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 50c73c0a20b9..d638c0fb3418 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Clone the lessor file to create a new file for us */ DRM_DEBUG_LEASE("Allocating lease file\n"); - path_get(&lessor_file->f_path); - lessee_file = alloc_file(&lessor_file->f_path, - lessor_file->f_mode, - fops_get(lessor_file->f_inode->i_fop)); - + lessee_file = filp_clone_open(lessor_file); if (IS_ERR(lessee_file)) { ret = PTR_ERR(lessee_file); goto out_lessee; } - /* Initialize the new file for DRM */ - DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open); - ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file); - if (ret) - goto out_lessee_file; - lessee_priv = lessee_file->private_data; - /* Change the file to a master one */ drm_master_put(&lessee_priv->master); lessee_priv->master = lessee; @@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); return 0; -out_lessee_file: - fput(lessee_file); - out_lessee: drm_master_put(&lessee); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 1f8031e30f53..cdb10f885a4f 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref) drm_mode_object_unregister(blob->dev, &blob->base); - kfree(blob); + kvfree(blob); } /** @@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return ERR_PTR(-ENOMEM); @@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, true, drm_property_free_blob); if (ret) { - kfree(blob); + kvfree(blob); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e5013a999147..540b59fb4103 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = { }, }; +static struct platform_device *etnaviv_drm; + static int __init etnaviv_init(void) { + struct platform_device *pdev; int ret; struct device_node *np; @@ -644,7 +647,7 @@ static int __init etnaviv_init(void) ret = platform_driver_register(&etnaviv_platform_driver); if (ret != 0) - platform_driver_unregister(&etnaviv_gpu_driver); + goto unregister_gpu_driver; /* * If the DT contains at least one available GPU device, instantiate @@ -653,20 +656,33 @@ static int __init etnaviv_init(void) for_each_compatible_node(np, NULL, "vivante,gc") { if (!of_device_is_available(np)) continue; - - platform_device_register_simple("etnaviv", -1, NULL, 0); + pdev = platform_device_register_simple("etnaviv", -1, + NULL, 0); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + of_node_put(np); + goto unregister_platform_driver; + } + etnaviv_drm = pdev; of_node_put(np); break; } + return 0; + +unregister_platform_driver: + platform_driver_unregister(&etnaviv_platform_driver); +unregister_gpu_driver: + platform_driver_unregister(&etnaviv_gpu_driver); return ret; } module_init(etnaviv_init); static void __exit etnaviv_exit(void) { - platform_driver_unregister(&etnaviv_gpu_driver); + platform_device_unregister(etnaviv_drm); platform_driver_unregister(&etnaviv_platform_driver); + platform_driver_unregister(&etnaviv_gpu_driver); } module_exit(etnaviv_exit); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index dd430f0f8ff5..90f17ff7888e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -131,6 +131,9 @@ struct etnaviv_gpu { struct work_struct sync_point_work; int sync_point_event; + /* hang detection */ + u32 hangcheck_dma_addr; + void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a74eb57af15b..50d6b88cb7aa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -10,6 +10,7 @@ #include "etnaviv_gem.h" #include "etnaviv_gpu.h" #include "etnaviv_sched.h" +#include "state.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; + u32 dma_addr; + int change; + + /* + * If the GPU managed to complete this jobs fence, the timout is + * spurious. Bail out. + */ + if (fence_completed(gpu, submit->out_fence->seqno)) + return; + + /* + * If the GPU is still making forward progress on the front-end (which + * should never loop) we shift out the timeout to give it a chance to + * finish the job. + */ + dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + change = dma_addr - gpu->hangcheck_dma_addr; + if (change < 0 || change > 16) { + gpu->hangcheck_dma_addr = dma_addr; + schedule_delayed_work(&sched_job->work_tdr, + sched_job->sched->timeout); + return; + } /* block scheduler */ kthread_park(gpu->sched.thread); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 82c95c34447f..e868773ea509 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); - val &= ~WINCONx_BPPMODE_MASK; + val &= WINCONx_ENWIN_F; switch (fb->format->format) { case DRM_FORMAT_XRGB1555: @@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->addr + DECON_VIDOSDxB(win)); } - val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | - VIDOSD_Wx_ALPHA_B_F(0x0); + val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) | + VIDOSD_Wx_ALPHA_B_F(0xff); writel(val, ctx->addr + DECON_VIDOSDxC(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index a81b4a5e24a7..ed3cc2989f93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -420,7 +420,7 @@ err_mode_config_cleanup: err_free_private: kfree(private); err_free_drm: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev) drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_unref(drm); + drm_dev_put(drm); } static const struct component_master_ops exynos_drm_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7fcc1a7ab1a0..27b7d34d776c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, err: while (i--) - drm_gem_object_unreference_unlocked(&exynos_gem[i]->base); + drm_gem_object_put_unlocked(&exynos_gem[i]->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6127ef25acd6..e8d0670bb5f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) static void fimc_set_window(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, h1, h2, v1, v2; /* cropped image */ h1 = buf->rect.x; - h2 = buf->buf.width - buf->rect.w - buf->rect.x; + h2 = real_width - buf->rect.w - buf->rect.x; v1 = buf->rect.y; v2 = buf->buf.height - buf->rect.h - buf->rect.y; DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, - buf->buf.width, buf->buf.height); + real_width, buf->buf.height); DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); /* @@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx, static void fimc_src_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) | EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGISIZE); @@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx, * for now, we support only ITU601 8 bit mode */ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | - EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | + EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) | EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_CISRCFMT); @@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) static void fimc_dst_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, cfg_ext; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) | EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 6e1494fa71b4..bdf5a7655228 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } @@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, exynos_gem = to_exynos_gem(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return exynos_gem->size; } @@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, return; } - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); /* * decrease obj->refcount one more time because we has already * increased it at exynos_drm_gem_get_dma_addr(). */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); } static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, @@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, args->flags = exynos_gem->flags; args->size = exynos_gem->size; - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 35ac66730563..7ba414b52faa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_IN_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV61: - cfg |= (GSC_IN_CHROMA_ORDER_CRCB | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P); break; case DRM_FORMAT_YUV422: cfg |= GSC_IN_YUV422_3P; break; case DRM_FORMAT_YUV420: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_IN_YUV420_3P; + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_IN_CHROMA_ORDER_CBCR | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P); break; } @@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) switch (degree) { case DRM_MODE_ROTATE_0: - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_90: cfg |= GSC_IN_ROT_90; - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_180: cfg |= GSC_IN_ROT_180; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_270: cfg |= GSC_IN_ROT_270; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; } @@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx, cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | GSC_SRCIMG_WIDTH_MASK); - cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | + cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_SRCIMG_HEIGHT(buf->buf.height)); gsc_write(cfg, GSC_SRCIMG_SIZE); @@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_OUT_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: - case DRM_FORMAT_NV61: cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); break; + case DRM_FORMAT_NV61: + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P); + break; case DRM_FORMAT_YUV422: + cfg |= GSC_OUT_YUV422_3P; + break; case DRM_FORMAT_YUV420: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_OUT_YUV420_3P; + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | - GSC_OUT_YUV420_2P); + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P); break; } @@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx, /* original size */ cfg = gsc_read(GSC_DSTIMG_SIZE); cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); - cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | + cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_DSTIMG_HEIGHT(buf->buf.height); gsc_write(cfg, GSC_DSTIMG_SIZE); @@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { }; static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { - { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) }, { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 26374e58c557..b435db8fc916 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, int ret = 0; int i; - /* basic checks */ - if (buf->buf.width == 0 || buf->buf.height == 0) - return -EINVAL; - buf->format = drm_format_info(buf->buf.fourcc); - for (i = 0; i < buf->format->num_planes; i++) { - unsigned int width = (i == 0) ? buf->buf.width : - DIV_ROUND_UP(buf->buf.width, buf->format->hsub); - - if (buf->buf.pitch[i] == 0) - buf->buf.pitch[i] = width * buf->format->cpp[i]; - if (buf->buf.pitch[i] < width * buf->format->cpp[i]) - return -EINVAL; - if (!buf->buf.gem_id[i]) - return -ENOENT; - } - - /* pitch for additional planes must match */ - if (buf->format->num_planes > 2 && - buf->buf.pitch[1] != buf->buf.pitch[2]) - return -EINVAL; - /* get GEM buffers and check their size */ for (i = 0; i < buf->format->num_planes; i++) { unsigned int height = (i == 0) ? buf->buf.height : @@ -428,7 +407,7 @@ enum drm_ipp_size_id { IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX }; -static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { +static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = { [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, @@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; struct drm_ipp_limit l; struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; + int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; if (!limits) return 0; __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); - if (!__size_limit_check(buf->buf.width, &l.h) || + if (!__size_limit_check(real_width, &l.h) || !__size_limit_check(buf->buf.height, &l.v)) return -EINVAL; @@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits( return 0; } +static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, + struct exynos_drm_ipp_buffer *buf, + struct exynos_drm_ipp_buffer *src, + struct exynos_drm_ipp_buffer *dst, + bool rotate, bool swap) +{ + const struct exynos_drm_ipp_formats *fmt; + int ret, i; + + fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier, + buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE : + DRM_EXYNOS_IPP_FORMAT_DESTINATION); + if (!fmt) { + DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task, + buf == src ? "src" : "dst"); + return -EINVAL; + } + + /* basic checks */ + if (buf->buf.width == 0 || buf->buf.height == 0) + return -EINVAL; + + buf->format = drm_format_info(buf->buf.fourcc); + for (i = 0; i < buf->format->num_planes; i++) { + unsigned int width = (i == 0) ? buf->buf.width : + DIV_ROUND_UP(buf->buf.width, buf->format->hsub); + + if (buf->buf.pitch[i] == 0) + buf->buf.pitch[i] = width * buf->format->cpp[i]; + if (buf->buf.pitch[i] < width * buf->format->cpp[i]) + return -EINVAL; + if (!buf->buf.gem_id[i]) + return -ENOENT; + } + + /* pitch for additional planes must match */ + if (buf->format->num_planes > 2 && + buf->buf.pitch[1] != buf->buf.pitch[2]) + return -EINVAL; + + /* check driver limits */ + ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits, + fmt->num_limits, + rotate, + buf == dst ? swap : false); + if (ret) + return ret; + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, + fmt->limits, + fmt->num_limits, swap); + return ret; +} + static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) { struct exynos_drm_ipp *ipp = task->ipp; - const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; unsigned int rotation = task->transform.rotation; int ret = 0; @@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) return -EINVAL; } - src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_SOURCE); - if (!src_fmt) { - DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, - src_fmt->num_limits, - rotate, false); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - src_fmt->limits, - src_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap); if (ret) return ret; - dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_DESTINATION); - if (!dst_fmt) { - DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, - dst_fmt->num_limits, - false, swap); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - dst_fmt->limits, - dst_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 38a2a7f1204b..7098c6d35266 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane) if (plane->state) { exynos_state = to_exynos_plane_state(plane->state); if (exynos_state->base.fb) - drm_framebuffer_unreference(exynos_state->base.fb); + drm_framebuffer_put(exynos_state->base.fb); kfree(exynos_state); plane->state = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 1a76dd3d52e1..a820a68429b9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot, val &= ~ROT_CONTROL_FLIP_MASK; if (rotation & DRM_MODE_REFLECT_X) - val |= ROT_CONTROL_FLIP_HORIZONTAL; - if (rotation & DRM_MODE_REFLECT_Y) val |= ROT_CONTROL_FLIP_VERTICAL; + if (rotation & DRM_MODE_REFLECT_Y) + val |= ROT_CONTROL_FLIP_HORIZONTAL; val &= ~ROT_CONTROL_ROT_MASK; diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 91d4382343d0..0ddb6eec7b11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -30,6 +30,7 @@ #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) #define SCALER_MAX_CLK 4 #define SCALER_AUTOSUSPEND_DELAY 2000 +#define SCALER_RESET_WAIT_RETRIES 100 struct scaler_data { const char *clk_name[SCALER_MAX_CLK]; @@ -51,9 +52,9 @@ struct scaler_context { static u32 scaler_get_format(u32 drm_fmt) { switch (drm_fmt) { - case DRM_FORMAT_NV21: - return SCALER_YUV420_2P_UV; case DRM_FORMAT_NV12: + return SCALER_YUV420_2P_UV; + case DRM_FORMAT_NV21: return SCALER_YUV420_2P_VU; case DRM_FORMAT_YUV420: return SCALER_YUV420_3P; @@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt) return SCALER_YUV422_1P_UYVY; case DRM_FORMAT_YVYU: return SCALER_YUV422_1P_YVYU; - case DRM_FORMAT_NV61: - return SCALER_YUV422_2P_UV; case DRM_FORMAT_NV16: + return SCALER_YUV422_2P_UV; + case DRM_FORMAT_NV61: return SCALER_YUV422_2P_VU; case DRM_FORMAT_YUV422: return SCALER_YUV422_3P; - case DRM_FORMAT_NV42: - return SCALER_YUV444_2P_UV; case DRM_FORMAT_NV24: + return SCALER_YUV444_2P_UV; + case DRM_FORMAT_NV42: return SCALER_YUV444_2P_VU; case DRM_FORMAT_YUV444: return SCALER_YUV444_3P; @@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt) return 0; } +static inline int scaler_reset(struct scaler_context *scaler) +{ + int retry = SCALER_RESET_WAIT_RETRIES; + + scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); + do { + cpu_relax(); + } while (retry > 1 && + scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); + do { + cpu_relax(); + scaler_write(1, SCALER_INT_EN); + } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + + return retry ? 0 : -EIO; +} + static inline void scaler_enable_int(struct scaler_context *scaler) { u32 val; @@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp, u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; - scaler->task = task; - pm_runtime_get_sync(scaler->dev); + if (scaler_reset(scaler)) { + pm_runtime_put(scaler->dev); + return -EIO; + } + + scaler->task = task; scaler_set_src_fmt(scaler, src_fmt); scaler_set_src_base(scaler, &task->src); @@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler) static inline u32 scaler_get_int_status(struct scaler_context *scaler) { - return scaler_read(SCALER_INT_STATUS); + u32 val = scaler_read(SCALER_INT_STATUS); + + scaler_write(val, SCALER_INT_STATUS); + + return val; } static inline int scaler_task_done(u32 val) diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 4704a993cbb7..16b39734115c 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -138,6 +138,7 @@ #define GSC_OUT_YUV420_3P (3 << 4) #define GSC_OUT_YUV422_1P (4 << 4) #define GSC_OUT_YUV422_2P (5 << 4) +#define GSC_OUT_YUV422_3P (6 << 4) #define GSC_OUT_YUV444 (7 << 4) #define GSC_OUT_TILE_TYPE_MASK (1 << 2) #define GSC_OUT_TILE_C_16x8 (0 << 2) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b51c05d03f14..7f562410f9cf 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, { struct intel_vgpu *vgpu = s->vgpu; struct intel_gvt *gvt = vgpu->gvt; + u32 ctx_sr_ctl; if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", @@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s, patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } + /* TODO + * Right now only scan LRI command on KBL and in inhibit context. + * It's good enough to support initializing mmio by lri command in + * vgpu inhibit context on KBL. + */ + if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && + intel_gvt_mmio_is_in_ctx(gvt, offset) && + !strncmp(cmd, "lri", 3)) { + intel_gvt_hypervisor_read_gpa(s->vgpu, + s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); + /* check inhibit context */ + if (ctx_sr_ctl & 1) { + u32 data = cmd_val(s, index + 1); + + if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) + intel_vgpu_mask_mmio_write(vgpu, + offset, &data, 4); + else + vgpu_vreg(vgpu, offset) = data; + } + } + /* TODO: Update the global mask if this MMIO is a masked-MMIO */ intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8180e8d1e2..4b072ade8c38 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 23296547da95..4efec8fa6c1d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.last_partial_off = -1UL; return mm; } @@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + mm->ggtt_mm.last_partial_off = -1UL; } vgpu_free_mm(mm); @@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); + /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes + * write, we assume the two 4 bytes writes are consecutive. + * Otherwise, we abort and report error + */ + if (bytes < info->gtt_entry_size) { + if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { + /* the first partial part*/ + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + return 0; + } else if ((g_gtt_index == + (ggtt_mm->ggtt_mm.last_partial_off >> + info->gtt_entry_size_shift)) && + (off != ggtt_mm->ggtt_mm.last_partial_off)) { + /* the second partial part */ + + int last_off = ggtt_mm->ggtt_mm.last_partial_off & + (info->gtt_entry_size - 1); + + memcpy((void *)&e.val64 + last_off, + (void *)&ggtt_mm->ggtt_mm.last_partial_data + + last_off, bytes); + + ggtt_mm->ggtt_mm.last_partial_off = -1UL; + } else { + int last_offset; + + gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", + ggtt_mm->ggtt_mm.last_partial_off, off, + bytes, info->gtt_entry_size); + + /* set host ggtt entry to scratch page and clear + * virtual ggtt entry as not present for last + * partially write offset + */ + last_offset = ggtt_mm->ggtt_mm.last_partial_off & + (~(info->gtt_entry_size - 1)); + + ggtt_get_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate_pte(vgpu, &m); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + ggtt_set_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate(gvt->dev_priv); + + ggtt_get_guest_entry(ggtt_mm, &e, last_offset); + ops->clear_present(&e); + ggtt_set_guest_entry(ggtt_mm, &e, last_offset); + + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + + return 0; + } + } + if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); m = e; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..97e62647418a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -150,6 +150,8 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + unsigned long last_partial_off; + u64 last_partial_data; } ggtt_mm; }; }; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 05d15a095310..858967daf04b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -268,6 +268,8 @@ struct intel_gvt_mmio { #define F_CMD_ACCESSED (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) +/* This reg is saved/restored in context */ +#define F_IN_CTX (1 << 7) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; @@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; } +/** + * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * True if a MMIO has a in-context mask, false if it isn't. + * + */ +static inline bool intel_gvt_mmio_is_in_ctx( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; +} + +/** + * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_in_ctx( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; +} + int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); int intel_gvt_debugfs_init(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bcbc47a88a70..8f1caacdc78a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3046,6 +3046,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } /** + * intel_vgpu_mask_mmio_write - write mask register + * @vgpu: a vGPU + * @offset: access offset + * @p_data: write data buffer + * @bytes: access data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 mask, old_vreg; + + old_vreg = vgpu_vreg(vgpu, offset); + write_vreg(vgpu, offset, p_data, bytes); + mask = vgpu_vreg(vgpu, offset) >> 16; + vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | + (vgpu_vreg(vgpu, offset) & mask); + + return 0; +} + +/** * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be * force-nopriv register * diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 71b620875943..dac8c6401e26 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, void *pdata, unsigned int bytes, bool is_read); +int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); #endif diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 0f949554d118..5ca9caf7552a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { - if (mmio->in_context) + if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; + intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); + } } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7014a96546f4..52f3b91d14fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2245,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg) **/ static inline struct scatterlist *__sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif return sg_is_last(sg) ? NULL : ____sg_next(sg); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d44ad7bc1e94..17c5097721e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf) bool write = !!(vmf->flags & FAULT_FLAG_WRITE); struct i915_vma *vma; pgoff_t page_offset; - unsigned int flags; int ret; /* We don't use vmf->pgoff since that has the fake offset */ @@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* If the object is smaller than a couple of partial vma, it is - * not worth only creating a single partial vma - we may as well - * clear enough space for the full object. - */ - flags = PIN_MAPPABLE; - if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) - flags |= PIN_NONBLOCK | PIN_NONFAULT; /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; - /* Userspace is now writing through an untracked VMA, abandon + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ + + /* + * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a02747ac658..c16cb025755e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1998,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_status = 0, hotplug_status_mask; + int i; + + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; - if (hotplug_status) + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; + + hotplug_status |= tmp; I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + } + + WARN_ONCE(1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 9324d476e0a7..0531c01c3604 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT)); vma->size = view->partial.size; vma->size <<= PAGE_SHIFT; - GEM_BUG_ON(vma->size >= obj->base.size); + GEM_BUG_ON(vma->size > obj->base.size); } else if (view->type == I915_GGTT_VIEW_ROTATED) { vma->size = intel_rotation_info_size(&view->rotated); vma->size <<= PAGE_SHIFT; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 32b1a6cdecfc..d3443125e661 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) priv->io_base = regs; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); - if (!res) - return -EINVAL; + if (!res) { + ret = -EINVAL; + goto free_drm; + } /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) { @@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); - if (!res) - return -EINVAL; + if (!res) { + ret = -EINVAL; + goto free_drm; + } /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 501d2d290e9c..70dce544984e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) nouveau_display(dev)->init = nv04_display_init; nouveau_display(dev)->fini = nv04_display_fini; + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ + dev->driver->driver_features &= ~DRIVER_ATOMIC; + nouveau_hw_save_vga_fonts(dev, 1); nv04_crtc_create(dev, 0); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b83465ae7c1b..9bae4db84cfb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1585,8 +1585,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) *****************************************************************************/ static void -nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) +nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) { + struct nouveau_drm *drm = nouveau_drm(state->dev); struct nv50_disp *disp = nv50_disp(drm->dev); struct nv50_core *core = disp->core; struct nv50_mstm *mstm; @@ -1618,6 +1619,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) } static void +nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock) +{ + struct drm_plane_state *new_plane_state; + struct drm_plane *plane; + int i; + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + struct nv50_wndw *wndw = nv50_wndw(plane); + if (interlock[wndw->interlock.type] & wndw->interlock.data) { + if (wndw->func->update) + wndw->func->update(wndw, interlock); + } + } +} + +static void nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -1684,7 +1701,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) help->disable(encoder); interlock[NV50_DISP_INTERLOCK_CORE] |= 1; if (outp->flush_disable) { - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_wndw(state, interlock); + nv50_disp_atomic_commit_core(state, interlock); memset(interlock, 0x00, sizeof(interlock)); } } @@ -1693,15 +1711,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) /* Flush disable. */ if (interlock[NV50_DISP_INTERLOCK_CORE]) { if (atom->flush_disable) { - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (interlock[wndw->interlock.type] & wndw->interlock.data) { - if (wndw->func->update) - wndw->func->update(wndw, interlock); - } - } - - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_wndw(state, interlock); + nv50_disp_atomic_commit_core(state, interlock); memset(interlock, 0x00, sizeof(interlock)); } } @@ -1762,18 +1773,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } /* Flush update. */ - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (interlock[wndw->interlock.type] & wndw->interlock.data) { - if (wndw->func->update) - wndw->func->update(wndw, interlock); - } - } + nv50_disp_atomic_commit_wndw(state, interlock); if (interlock[NV50_DISP_INTERLOCK_CORE]) { if (interlock[NV50_DISP_INTERLOCK_BASE] || + interlock[NV50_DISP_INTERLOCK_OVLY] || + interlock[NV50_DISP_INTERLOCK_WNDW] || !atom->state.legacy_cursor_update) - nv50_disp_atomic_commit_core(drm, interlock); + nv50_disp_atomic_commit_core(state, interlock); else disp->core->func->update(disp->core, interlock, false); } @@ -1871,7 +1878,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, nv50_disp_atomic_commit_tail(state); drm_for_each_crtc(crtc, dev) { - if (crtc->state->enable) { + if (crtc->state->active) { if (!drm->have_disp_power_ref) { drm->have_disp_power_ref = true; return 0; @@ -2119,10 +2126,6 @@ nv50_display_destroy(struct drm_device *dev) kfree(disp); } -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); -static int nouveau_atomic = 0; -module_param_named(atomic, nouveau_atomic, int, 0400); - int nv50_display_create(struct drm_device *dev) { @@ -2147,8 +2150,6 @@ nv50_display_create(struct drm_device *dev) disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; - if (nouveau_atomic) - dev->driver->driver_features |= DRIVER_ATOMIC; /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index debbbf0fd4bd..408b955e5c39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; INIT_LIST_HEAD(&drm->bl_connectors); @@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev) return 0; } - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && connector->connector_type != DRM_MODE_CONNECTOR_eDP) continue; @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev) break; } } - + drm_connector_list_iter_end(&conn_iter); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7b557c354307..af68eae4c626 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_connector *nv_connector = NULL; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int type, ret = 0; bool dummy; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { nv_connector = nouveau_connector(connector); - if (nv_connector->index == index) + if (nv_connector->index == index) { + drm_connector_list_iter_end(&conn_iter); return connector; + } } + drm_connector_list_iter_end(&conn_iter); nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); if (!nv_connector) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index a4d1a059bd3d..dc7454e7f19a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -33,6 +33,7 @@ #include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> #include "nouveau_crtc.h" +#include "nouveau_encoder.h" struct nvkm_i2c_port; @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } +static inline bool +nouveau_connector_is_mst(struct drm_connector *connector) +{ + const struct nouveau_encoder *nv_encoder; + const struct drm_encoder *encoder; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return false; + + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY); + if (!nv_encoder) + return false; + + encoder = &nv_encoder->base.base; + return encoder->encoder_type == DRM_MODE_ENCODER_DPMST; +} + +#define nouveau_for_each_non_mst_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + for_each_if(!nouveau_connector_is_mst(connector)) + static inline struct nouveau_connector * nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct nouveau_connector *nv_connector = NULL; struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + if (connector->encoder && connector->encoder->crtc == crtc) { + nv_connector = nouveau_connector(connector); + break; + } } + drm_connector_list_iter_end(&conn_iter); - return NULL; + return nv_connector; } struct drm_connector * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 774b429142bc..ec7861457b84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int ret; ret = disp->init(dev); @@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_get(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); /* enable flip completion events */ nvif_notify_get(&drm->flip); @@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) nvif_notify_put(&drm->flip); /* disable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_put(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); drm_kms_helper_poll_disable(dev); disp->fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 775443c9af94..f5d3158f0378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); +static int nouveau_atomic = 0; +module_param_named(atomic, nouveau_atomic, int, 0400); + MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); static int nouveau_runtime_pm = -1; module_param_named(runpm, nouveau_runtime_pm, int, 0400); @@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, pci_set_master(pdev); + if (nouveau_atomic) + driver_pci.driver_features |= DRIVER_ATOMIC; + ret = drm_get_pci_dev(pdev, pent, &driver_pci); if (ret) { nvkm_device_del(&device); @@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev) static int nouveau_pmops_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct nouveau_drm *drm = nouveau_drm(drm_dev); - struct drm_crtc *crtc; - if (!nouveau_pmops_runtime()) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; - } - } pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 300daee74209..e6ccafcb9c41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct nouveau_bo *nvbo; uint32_t data; - if (unlikely(r->bo_index > req->nr_buffers)) { + if (unlikely(r->bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc bo index invalid\n"); ret = -EINVAL; break; @@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, if (b->presumed.valid) continue; - if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc container bo index invalid\n"); ret = -EINVAL; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index 73b5d46104bd..434d2fc5bb1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init) fb->func->init(fb); + if (fb->func->init_remapper) + fb->func->init_remapper(fb); + if (fb->func->init_page) { ret = fb->func->init_page(fb); if (WARN_ON(ret)) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c index dffe1f5e1071..8205ce436b3e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c @@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base) } void +gp100_fb_init_remapper(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + /* Disable address remapper. */ + nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000); +} + +void gp100_fb_init(struct nvkm_fb *base) { struct gf100_fb *fb = gf100_fb(base); @@ -56,6 +64,7 @@ gp100_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gp100_fb_init, + .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, .init_unkn = gp100_fb_init_unkn, .ram_new = gp100_ram_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b84b9861ef26..b4d74e815674 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -31,6 +31,7 @@ gp102_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gp100_fb_init, + .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, .ram_new = gp100_ram_new, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 2857f31466bf..1e4ad61c19e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -11,6 +11,7 @@ struct nvkm_fb_func { u32 (*tags)(struct nvkm_fb *); int (*oneinit)(struct nvkm_fb *); void (*init)(struct nvkm_fb *); + void (*init_remapper)(struct nvkm_fb *); int (*init_page)(struct nvkm_fb *); void (*init_unkn)(struct nvkm_fb *); void (*intr)(struct nvkm_fb *); @@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *); int gm200_fb_init_page(struct nvkm_fb *); +void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index 2589f4acd5ae..9c81301d0eed 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o -obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o +obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o +ifdef CONFIG_DRM_SUN4I_BACKEND +obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o +endif obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 776c1513e582..a2bd5876c633 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, * unaligned offset is malformed and cause commands stream * corruption on the buffer address relocation. */ - if (offset & 3 || offset >= obj->gem.size) { + if (offset & 3 || offset > obj->gem.size) { err = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 2ebdc6d5a76e..d5583190f3e4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (cmd > (char *) urb->transfer_buffer) { /* Send partial buffer remaining before exiting */ - int len = cmd - (char *) urb->transfer_buffer; + int len; + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) + *cmd++ = 0xAF; + len = cmd - (char *) urb->transfer_buffer; ret = udl_submit_urb(dev, urb, len); bytes_sent += len; } else diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 0c87b1ac6b68..b992644c17e6 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -153,11 +153,11 @@ static void udl_compress_hline16( raw_pixels_count_byte = cmd++; /* we'll know this later */ raw_pixel_start = pixel; - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, - min((int)(pixel_end - pixel) / bpp, - (int)(cmd_buffer_end - cmd) / 2))) * bpp; + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, + (unsigned long)(pixel_end - pixel) / bpp, + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + prefetch_range((void *) pixel, cmd_pixel_end - pixel); pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { @@ -193,6 +193,9 @@ static void udl_compress_hline16( if (pixel > raw_pixel_start) { /* finalize last RAW span */ *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; + } else { + /* undo unused byte */ + cmd--; } *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f1d5f76e9c33..d88073e7d22d 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev) return err; } + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) + goto skip_iommu; + host->group = iommu_group_get(&pdev->dev); if (host->group) { struct iommu_domain_geometry *geometry; diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index e2f4a4d93d20..527a1cddb14f 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; - if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { + if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && + unpin->size && host->domain) { iommu_unmap(host->domain, job->addr_phys[i], unpin->size); free_iova(&host->iova, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f858cc72011d..3942ee61bd1c 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev) } hdev->io_started = false; + clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); + if (!hdev->driver) { id = hid_match_device(hdev, hdrv); if (id == NULL) { @@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data) struct hid_device *hdev = to_hid_device(dev); if (hdev->driver == hdrv && - !hdrv->match(hdev, hid_ignore_special_drivers)) + !hdrv->match(hdev, hid_ignore_special_drivers) && + !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) return device_reprobe(dev); return 0; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 8469b6964ff6..b48100236df8 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1154,6 +1154,8 @@ copy_rest: goto out; if (list->tail > list->head) { len = list->tail - list->head; + if (len > count) + len = count; if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1163,6 +1165,8 @@ copy_rest: list->head += len; } else { len = HID_DEBUG_BUFSIZE - list->head; + if (len > count) + len = count; if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1170,7 +1174,9 @@ copy_rest: } list->head = 0; ret += len; - goto copy_rest; + count -= len; + if (count > 0) + goto copy_rest; } } diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index c1652bb7bd15..eae0cb3ddec6 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) return; } - if ((ret_size > size) || (ret_size <= 2)) { + if ((ret_size > size) || (ret_size < 2)) { dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", __func__, size, ret_size); return; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e3ce233f8bdc..23872d08308c 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -36,6 +36,7 @@ #include <linux/hiddev.h> #include <linux/compat.h> #include <linux/vmalloc.h> +#include <linux/nospec.h> #include "usbhid.h" #ifdef CONFIG_USB_DYNAMIC_MINORS @@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = array_index_nospec(uref->usage_index, + field->maxusage); uref->usage_code = field->usage[uref->usage_index].hid; @@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; @@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (finfo.field_index >= report->maxfield) break; + finfo.field_index = array_index_nospec(finfo.field_index, + report->maxfield); field = report->field[finfo.field_index]; memset(&finfo, 0, sizeof(finfo)); @@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (cinfo.index >= hid->maxcollection) break; + cinfo.index = array_index_nospec(cinfo.index, + hid->maxcollection); cinfo.type = hid->collection[cinfo.index].type; cinfo.usage = hid->collection[cinfo.index].usage; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 0bb44d0088ed..ad7afa74d365 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) features->device_type |= WACOM_DEVICETYPE_PAD; - features->x_max = 4096; - features->y_max = 4096; + if (features->type == INTUOSHT2) { + features->x_max = features->x_max / 10; + features->y_max = features->y_max / 10; + } + else { + features->x_max = 4096; + features->y_max = 4096; + } } else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->device_type |= WACOM_DEVICETYPE_PAD; diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 4a34f311e1ff..6ec65adaba49 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, if (bit_adap->getscl == NULL) adap->quirks = &i2c_bit_quirk_no_clk_stretch; - /* Bring bus to a known state. Looks like STOP if bus is not free yet */ - setscl(bit_adap, 1); - udelay(bit_adap->udelay); - setsda(bit_adap, 1); + /* + * We tried forcing SCL/SDA to an initial state here. But that caused a + * regression, sadly. Check Bugzilla #200045 for details. + */ ret = add_adapter(adap); if (ret < 0) diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 44cffad43701..c4d176f5ed79 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = { .name = "cht_wc_ext_chrg_irq_chip", }; -static const char * const bq24190_suppliers[] = { "fusb302-typec-source" }; +static const char * const bq24190_suppliers[] = { + "tcpm-source-psy-i2c-fusb302" }; static const struct property_entry bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 005e6e0330c2..66f85bbf3591 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev) * required for an I2C bus. */ if (pdata->scl_is_open_drain) - gflags = GPIOD_OUT_LOW; + gflags = GPIOD_OUT_HIGH; else - gflags = GPIOD_OUT_LOW_OPEN_DRAIN; + gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags); if (IS_ERR(priv->scl)) return PTR_ERR(priv->scl); diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index e866c481bfc3..fce52bdab2b7 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -127,7 +127,7 @@ enum stu300_error { /* * The number of address send athemps tried before giving up. - * If the first one failes it seems like 5 to 8 attempts are required. + * If the first one fails it seems like 5 to 8 attempts are required. */ #define NUM_ADDR_RESEND_ATTEMPTS 12 diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 5fccd1f1bca8..797def5319f1 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) { u32 cnfg; + /* + * NACK interrupt is generated before the I2C controller generates + * the STOP condition on the bus. So wait for 2 clock periods + * before disabling the controller so that the STOP condition has + * been delivered properly. + */ + udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); + cnfg = i2c_readl(i2c_dev, I2C_CNFG); if (cnfg & I2C_CNFG_PACKET_MODE_EN) i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); @@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) return 0; - /* - * NACK interrupt is generated before the I2C controller generates - * the STOP condition on the bus. So wait for 2 clock periods - * before resetting the controller so that the STOP condition has - * been delivered properly. - */ - if (i2c_dev->msg_err == I2C_ERR_NO_ACK) - udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); - tegra_i2c_init(i2c_dev); if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 31d16ada6e7d..301285c54603 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) val = !val; bri->set_scl(adap, val); - ndelay(RECOVERY_NDELAY); + + /* + * If we can set SDA, we will always create STOP here to ensure + * the additional pulses will do no harm. This is achieved by + * letting SDA follow SCL half a cycle later. + */ + ndelay(RECOVERY_NDELAY / 2); + if (bri->set_sda) + bri->set_sda(adap, val); + ndelay(RECOVERY_NDELAY / 2); } /* check if recovery actually succeeded */ diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index f3f683041e7f..51970bae3c4a 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, status = i2c_transfer(adapter, msg, num); if (status < 0) - return status; - if (status != num) - return -EIO; + goto cleanup; + if (status != num) { + status = -EIO; + goto cleanup; + } + status = 0; /* Check PEC if last message is a read */ if (i && (msg[num-1].flags & I2C_M_RD)) { status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); if (status < 0) - return status; + goto cleanup; } if (read_write == I2C_SMBUS_READ) @@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; } +cleanup: if (msg[0].flags & I2C_M_DMA_SAFE) kfree(msg[0].buf); if (msg[1].flags & I2C_M_DMA_SAFE) kfree(msg[1].buf); - return 0; + return status; } /** diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 7e3d82cff3d5..c149c9c360fc 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p) if (src < 0) return IRQ_NONE; - if (!(src & data->chip_info->enabled_events)) + if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY))) return IRQ_NONE; if (src & MMA8452_INT_DRDY) { diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index f9c0624505a2..42618fe4f83e 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, } irq_type = irqd_get_trigger_type(desc); + if (!irq_type) + irq_type = IRQF_TRIGGER_RISING; if (irq_type == IRQF_TRIGGER_RISING) st->irq_mask = INV_MPU6050_ACTIVE_HIGH; else if (irq_type == IRQF_TRIGGER_FALLING) diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c index 34d42a2504c9..df5b2a0da96c 100644 --- a/drivers/iio/light/tsl2772.c +++ b/drivers/iio/light/tsl2772.c @@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev) "%s: failed to get lux\n", __func__); return lux_val; } + if (lux_val == 0) + return -ERANGE; ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) / lux_val; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 5ec3e41b65f2..fe87d27779d9 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) } comp_humidity = bmp280_compensate_humidity(data, adc_humidity); - *val = comp_humidity; - *val2 = 1024; + *val = comp_humidity * 1000 / 1024; - return IIO_VAL_FRACTIONAL; + return IIO_VAL_INT; } static int bmp280_read_raw(struct iio_dev *indio_dev, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 3e90b6a1d9d2..cc06e8404e9b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, struct ib_flow_attr *flow_attr; struct ib_qp *qp; struct ib_uflow_resources *uflow_res; + struct ib_uverbs_flow_spec_hdr *kern_spec; int err = 0; - void *kern_spec; void *ib_spec; int i; @@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, if (!kern_flow_attr) return -ENOMEM; - memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); - err = ib_copy_from_udata(kern_flow_attr + 1, ucore, + *kern_flow_attr = cmd.flow_attr; + err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore, cmd.flow_attr.size); if (err) goto err_free_attr; @@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_uobj; } + if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { + err = -EINVAL; + goto err_put; + } + flow_attr = kzalloc(struct_size(flow_attr, flows, cmd.flow_attr.num_of_specs), GFP_KERNEL); if (!flow_attr) { @@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, flow_attr->flags = kern_flow_attr->flags; flow_attr->size = sizeof(*flow_attr); - kern_spec = kern_flow_attr + 1; + kern_spec = kern_flow_attr->flow_specs; ib_spec = flow_attr + 1; for (i = 0; i < flow_attr->num_of_specs && - cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && - cmd.flow_attr.size >= - ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { - err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, - uflow_res); + cmd.flow_attr.size >= sizeof(*kern_spec) && + cmd.flow_attr.size >= kern_spec->size; + i++) { + err = kern_spec_to_ib_spec( + file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec, + ib_spec, uflow_res); if (err) goto err_free; flow_attr->size += ((union ib_flow_spec *) ib_spec)->size; - cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; - kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; + cmd.flow_attr.size -= kern_spec->size; + kern_spec = ((void *)kern_spec) + kern_spec->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size; } if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 1445918e3239..7b76e6f81aeb 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); - if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) + if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 1a1a47ac53c6..f15c93102081 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) lockdep_assert_held(&qp->s_lock); ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (priv->hdr_type == HFI1_PKT_TYPE_9B) { diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index b7b671017e59..e254dcec6f64 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) int middle = 0; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 1ab332f1866e..70d39fc450a1 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) u32 lid; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index 873e48ea923f..c4ab2d5b4502 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 - 2017 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->s_lock) { - struct verbs_txreq *tx = ERR_PTR(-EBUSY); + struct verbs_txreq *tx = NULL; write_seqlock(&dev->txwait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 729244c3086c..1c19bbc764b2 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); - if (IS_ERR(tx)) + if (!tx) return tx; } tx->qp = qp; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e3e330f59c2c..b3ba9a222550 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); - if (MLX5_VPORT_MANAGER(mdev) && + if (MLX5_ESWITCH_MANAGER(mdev) && mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0af7b7905550..f5de5adc9b1a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); - if (desc_size == 0 || srq->msrq.max_gs > desc_size) - return ERR_PTR(-EINVAL); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) { + err = -EINVAL; + goto err_srq; + } desc_size = roundup_pow_of_two(desc_size); desc_size = max_t(size_t, 32, desc_size); - if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) - return ERR_PTR(-EINVAL); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { + err = -EINVAL; + goto err_srq; + } srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - if (buf_size < desc_size) - return ERR_PTR(-EINVAL); + if (buf_size < desc_size) { + err = -EINVAL; + goto err_srq; + } in.type = init_attr->srq_type; if (pd->uobject) diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index cf30523c6ef6..6c7326c93721 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots); * inactive, or if the tool type is changed, a new tracking id is * assigned to the slot. The tool type is only reported if the * corresponding absbit field is set. + * + * Returns true if contact is active. */ -void input_mt_report_slot_state(struct input_dev *dev, +bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active) { struct input_mt *mt = dev->mt; @@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev, int id; if (!mt) - return; + return false; slot = &mt->slots[mt->slot]; slot->frame = mt->frame; if (!active) { input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); - return; + return false; } id = input_mt_get_value(slot, ABS_MT_TRACKING_ID); - if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type) + if (id < 0) id = input_mt_new_trkid(mt); input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id); input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type); + + return true; } EXPORT_SYMBOL(input_mt_report_slot_state); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 48e36acbeb49..cd620e009bad 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -125,7 +125,7 @@ static const struct xpad_device { u8 mapping; u8 xtype; } xpad_device[] = { - { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 }, + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c index f6e643b589b6..e8dae6195b30 100644 --- a/drivers/input/keyboard/goldfish_events.c +++ b/drivers/input/keyboard/goldfish_events.c @@ -45,7 +45,7 @@ struct event_dev { static irqreturn_t events_interrupt(int irq, void *dev_id) { struct event_dev *edev = dev_id; - unsigned type, code, value; + unsigned int type, code, value; type = __raw_readl(edev->addr + REG_READ); code = __raw_readl(edev->addr + REG_READ); @@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id) } static void events_import_bits(struct event_dev *edev, - unsigned long bits[], unsigned type, size_t count) + unsigned long bits[], unsigned int type, size_t count) { void __iomem *addr = edev->addr; int i, j; @@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev) for (j = 0; j < ARRAY_SIZE(val); j++) { int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32); + val[j] = __raw_readl(edev->addr + REG_DATA + offset); } @@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev) struct input_dev *input_dev; struct event_dev *edev; struct resource *res; - unsigned keymapnamelen; + unsigned int keymapnamelen; void __iomem *addr; int irq; int i; @@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev) for (i = 0; i < keymapnamelen; i++) edev->name[i] = __raw_readb(edev->addr + REG_DATA + i); - pr_debug("events_probe() keymap=%s\n", edev->name); + pr_debug("%s: keymap=%s\n", __func__, edev->name); input_dev->name = edev->name; input_dev->id.bustype = BUS_HOST; diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index c25606e00693..ca59a2be9bc5 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON To compile this driver as a module, choose M here: the module will be called rave-sp-pwrbutton. +config INPUT_SC27XX_VIBRA + tristate "Spreadtrum sc27xx vibrator support" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + select INPUT_FF_MEMLESS + help + This option enables support for Spreadtrum sc27xx vibrator driver. + + To compile this driver as a module, choose M here. The module will + be called sc27xx_vibra. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 72cde28649e2..9d0f9d1ff68f 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o +obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c new file mode 100644 index 000000000000..295251abbdac --- /dev/null +++ b/drivers/input/misc/sc27xx-vibra.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Spreadtrum Communications Inc. + */ + +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/input.h> +#include <linux/workqueue.h> + +#define CUR_DRV_CAL_SEL GENMASK(13, 12) +#define SLP_LDOVIBR_PD_EN BIT(9) +#define LDO_VIBR_PD BIT(8) + +struct vibra_info { + struct input_dev *input_dev; + struct work_struct play_work; + struct regmap *regmap; + u32 base; + u32 strength; + bool enabled; +}; + +static void sc27xx_vibra_set(struct vibra_info *info, bool on) +{ + if (on) { + regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0); + regmap_update_bits(info->regmap, info->base, + SLP_LDOVIBR_PD_EN, 0); + info->enabled = true; + } else { + regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, + LDO_VIBR_PD); + regmap_update_bits(info->regmap, info->base, + SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN); + info->enabled = false; + } +} + +static int sc27xx_vibra_hw_init(struct vibra_info *info) +{ + return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0); +} + +static void sc27xx_vibra_play_work(struct work_struct *work) +{ + struct vibra_info *info = container_of(work, struct vibra_info, + play_work); + + if (info->strength && !info->enabled) + sc27xx_vibra_set(info, true); + else if (info->strength == 0 && info->enabled) + sc27xx_vibra_set(info, false); +} + +static int sc27xx_vibra_play(struct input_dev *input, void *data, + struct ff_effect *effect) +{ + struct vibra_info *info = input_get_drvdata(input); + + info->strength = effect->u.rumble.weak_magnitude; + schedule_work(&info->play_work); + + return 0; +} + +static void sc27xx_vibra_close(struct input_dev *input) +{ + struct vibra_info *info = input_get_drvdata(input); + + cancel_work_sync(&info->play_work); + if (info->enabled) + sc27xx_vibra_set(info, false); +} + +static int sc27xx_vibra_probe(struct platform_device *pdev) +{ + struct vibra_info *info; + int error; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!info->regmap) { + dev_err(&pdev->dev, "failed to get vibrator regmap.\n"); + return -ENODEV; + } + + error = device_property_read_u32(&pdev->dev, "reg", &info->base); + if (error) { + dev_err(&pdev->dev, "failed to get vibrator base address.\n"); + return error; + } + + info->input_dev = devm_input_allocate_device(&pdev->dev); + if (!info->input_dev) { + dev_err(&pdev->dev, "failed to allocate input device.\n"); + return -ENOMEM; + } + + info->input_dev->name = "sc27xx:vibrator"; + info->input_dev->id.version = 0; + info->input_dev->close = sc27xx_vibra_close; + + input_set_drvdata(info->input_dev, info); + input_set_capability(info->input_dev, EV_FF, FF_RUMBLE); + INIT_WORK(&info->play_work, sc27xx_vibra_play_work); + info->enabled = false; + + error = sc27xx_vibra_hw_init(info); + if (error) { + dev_err(&pdev->dev, "failed to initialize the vibrator.\n"); + return error; + } + + error = input_ff_create_memless(info->input_dev, NULL, + sc27xx_vibra_play); + if (error) { + dev_err(&pdev->dev, "failed to register vibrator to FF.\n"); + return error; + } + + error = input_register_device(info->input_dev); + if (error) { + dev_err(&pdev->dev, "failed to register input device.\n"); + return error; + } + + return 0; +} + +static const struct of_device_id sc27xx_vibra_of_match[] = { + { .compatible = "sprd,sc2731-vibrator", }, + {} +}; +MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match); + +static struct platform_driver sc27xx_vibra_driver = { + .driver = { + .name = "sc27xx-vibrator", + .of_match_table = sc27xx_vibra_of_match, + }, + .probe = sc27xx_vibra_probe, +}; + +module_platform_driver(sc27xx_vibra_driver); + +MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>"); diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 599544c1a91c..243e0fa6e3e3 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -27,6 +27,8 @@ #define ETP_DISABLE_POWER 0x0001 #define ETP_PRESSURE_OFFSET 25 +#define ETP_CALIBRATE_MAX_LEN 3 + /* IAP Firmware handling */ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 8ff75114e762..1f9cd7d8b7ad 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev, int tries = 20; int retval; int error; - u8 val[3]; + u8 val[ETP_CALIBRATE_MAX_LEN]; retval = mutex_lock_interruptible(&data->sysfs_mutex); if (retval) @@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0618", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index cfcb32559925..c060d270bc4d 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -56,7 +56,7 @@ static int elan_smbus_initialize(struct i2c_client *client) { u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; + u8 values[I2C_SMBUS_BLOCK_MAX] = {0}; int len, error; /* Get hello packet */ @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client) static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) { int error; + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0}; + + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf)); error = i2c_smbus_read_block_data(client, - ETP_SMBUS_CALIBRATE_QUERY, val); + ETP_SMBUS_CALIBRATE_QUERY, buf); if (error < 0) return error; + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN); return 0; } @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report) { int len; + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN); + len = i2c_smbus_read_block_data(client, ETP_SMBUS_PACKET_QUERY, &report[ETP_SMBUS_REPORT_OFFSET]); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index fb4d902c4403..dd85b16dc6f8 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) else if (ic_version == 7 && etd->info.samples[1] == 0x2A) sanity_check = ((packet[3] & 0x1c) == 0x10); else - sanity_check = ((packet[0] & 0x0c) == 0x04 && + sanity_check = ((packet[0] & 0x08) == 0x00 && (packet[3] & 0x1c) == 0x10); if (!sanity_check) @@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { { } }; +static const char * const middle_button_pnp_ids[] = { + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + NULL +}; + /* * Set the appropriate event bits for the input subsystem */ @@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse) __clear_bit(EV_REL, dev->evbit); __set_bit(BTN_LEFT, dev->keybit); - if (dmi_check_system(elantech_dmi_has_middle_button)) + if (dmi_check_system(elantech_dmi_has_middle_button) || + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids)) __set_bit(BTN_MIDDLE, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 5ff5b1952be0..d3ff1fc09af7 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) else input_report_rel(dev, REL_WHEEL, -wheel); - input_report_key(dev, BTN_SIDE, BIT(4)); - input_report_key(dev, BTN_EXTRA, BIT(5)); + input_report_key(dev, BTN_SIDE, packet[3] & BIT(4)); + input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5)); break; } break; @@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) input_report_rel(dev, REL_WHEEL, -(s8) packet[3]); /* Extra buttons on Genius NewNet 3D */ - input_report_key(dev, BTN_SIDE, BIT(6)); - input_report_key(dev, BTN_EXTRA, BIT(7)); + input_report_key(dev, BTN_SIDE, packet[0] & BIT(6)); + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7)); break; case PSMOUSE_THINKPS: /* Extra button on ThinkingMouse */ - input_report_key(dev, BTN_EXTRA, BIT(3)); + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3)); /* * Without this bit of weirdness moving up gives wildly @@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) * Cortron PS2 Trackball reports SIDE button in the * 4th bit of the first byte. */ - input_report_key(dev, BTN_SIDE, BIT(3)); + input_report_key(dev, BTN_SIDE, packet[0] & BIT(3)); packet[0] |= BIT(3); break; diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig index 7172b88cd064..fad2eae4a118 100644 --- a/drivers/input/rmi4/Kconfig +++ b/drivers/input/rmi4/Kconfig @@ -3,6 +3,7 @@ # config RMI4_CORE tristate "Synaptics RMI4 bus support" + select IRQ_DOMAIN help Say Y here if you want to support the Synaptics RMI4 bus. This is required for all RMI4 device support. diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c index 8bb866c7b985..8eeffa066022 100644 --- a/drivers/input/rmi4/rmi_2d_sensor.c +++ b/drivers/input/rmi4/rmi_2d_sensor.c @@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor, if (obj->type == RMI_2D_OBJECT_NONE) return; - if (axis_align->swap_axes) - swap(obj->x, obj->y); - if (axis_align->flip_x) obj->x = sensor->max_x - obj->x; if (axis_align->flip_y) obj->y = sensor->max_y - obj->y; + if (axis_align->swap_axes) + swap(obj->x, obj->y); + /* * Here checking if X offset or y offset are specified is * redundant. We just add the offsets or clip the values. @@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y) x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x)); y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y)); - if (axis_align->swap_axes) - swap(x, y); - if (axis_align->flip_x) x = min(RMI_2D_REL_POS_MAX, -x); if (axis_align->flip_y) y = min(RMI_2D_REL_POS_MAX, -y); + if (axis_align->swap_axes) + swap(x, y); + if (x || y) { input_report_rel(sensor->input, REL_X, x); input_report_rel(sensor->input, REL_Y, y); @@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) struct input_dev *input = sensor->input; int res_x; int res_y; + int max_x, max_y; int input_flags = 0; if (sensor->report_abs) { - if (sensor->axis_align.swap_axes) { - swap(sensor->max_x, sensor->max_y); - swap(sensor->axis_align.clip_x_low, - sensor->axis_align.clip_y_low); - swap(sensor->axis_align.clip_x_high, - sensor->axis_align.clip_y_high); - } - sensor->min_x = sensor->axis_align.clip_x_low; if (sensor->axis_align.clip_x_high) sensor->max_x = min(sensor->max_x, @@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) sensor->axis_align.clip_y_high); set_bit(EV_ABS, input->evbit); - input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x, - 0, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y, - 0, 0); + + max_x = sensor->max_x; + max_y = sensor->max_y; + if (sensor->axis_align.swap_axes) + swap(max_x, max_y); + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0); if (sensor->x_mm && sensor->y_mm) { res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm; res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm; + if (sensor->axis_align.swap_axes) + swap(res_x, res_y); input_abs_set_res(input, ABS_X, res_x); input_abs_set_res(input, ABS_Y, res_y); diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index c5fa53adba8d..bd0d5ff01b08 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -9,6 +9,8 @@ #include <linux/kernel.h> #include <linux/device.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/list.h> #include <linux/pm.h> #include <linux/rmi.h> @@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn) {} #endif +static struct irq_chip rmi_irq_chip = { + .name = "rmi4", +}; + +static int rmi_create_function_irq(struct rmi_function *fn, + struct rmi_function_handler *handler) +{ + struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev); + int i, error; + + for (i = 0; i < fn->num_of_irqs; i++) { + set_bit(fn->irq_pos + i, fn->irq_mask); + + fn->irq[i] = irq_create_mapping(drvdata->irqdomain, + fn->irq_pos + i); + + irq_set_chip_data(fn->irq[i], fn); + irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip, + handle_simple_irq); + irq_set_nested_thread(fn->irq[i], 1); + + error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL, + handler->attention, IRQF_ONESHOT, + dev_name(&fn->dev), fn); + if (error) { + dev_err(&fn->dev, "Error %d registering IRQ\n", error); + return error; + } + } + + return 0; +} + static int rmi_function_probe(struct device *dev) { struct rmi_function *fn = to_rmi_function(dev); @@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev) if (handler->probe) { error = handler->probe(fn); - return error; + if (error) + return error; + } + + if (fn->num_of_irqs && handler->attention) { + error = rmi_create_function_irq(fn, handler); + if (error) + return error; } return 0; @@ -230,12 +272,18 @@ err_put_device: void rmi_unregister_function(struct rmi_function *fn) { + int i; + rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n", fn->fd.function_number); device_del(&fn->dev); of_node_put(fn->dev.of_node); put_device(&fn->dev); + + for (i = 0; i < fn->num_of_irqs; i++) + irq_dispose_mapping(fn->irq[i]); + } /** diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h index b7625a9ac66a..96383eab41ba 100644 --- a/drivers/input/rmi4/rmi_bus.h +++ b/drivers/input/rmi4/rmi_bus.h @@ -14,6 +14,12 @@ struct rmi_device; +/* + * The interrupt source count in the function descriptor can represent up to + * 6 interrupt sources in the normal manner. + */ +#define RMI_FN_MAX_IRQS 6 + /** * struct rmi_function - represents the implementation of an RMI4 * function for a particular device (basically, a driver for that RMI4 function) @@ -26,6 +32,7 @@ struct rmi_device; * @irq_pos: The position in the irq bitfield this function holds * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN * interrupt handling. + * @irqs: assigned virq numbers (up to num_of_irqs) * * @node: entry in device's list of functions */ @@ -36,6 +43,7 @@ struct rmi_function { struct list_head node; unsigned int num_of_irqs; + int irq[RMI_FN_MAX_IRQS]; unsigned int irq_pos; unsigned long irq_mask[]; }; @@ -76,7 +84,7 @@ struct rmi_function_handler { void (*remove)(struct rmi_function *fn); int (*config)(struct rmi_function *fn); int (*reset)(struct rmi_function *fn); - int (*attention)(struct rmi_function *fn, unsigned long *irq_bits); + irqreturn_t (*attention)(int irq, void *ctx); int (*suspend)(struct rmi_function *fn); int (*resume)(struct rmi_function *fn); }; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 7d29053dfb0f..fc3ab93b7aea 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -21,6 +21,7 @@ #include <linux/pm.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/irqdomain.h> #include <uapi/linux/input.h> #include <linux/rmi.h> #include "rmi_bus.h" @@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) return 0; } -static void process_one_interrupt(struct rmi_driver_data *data, - struct rmi_function *fn) -{ - struct rmi_function_handler *fh; - - if (!fn || !fn->dev.driver) - return; - - fh = to_rmi_function_handler(fn->dev.driver); - if (fh->attention) { - bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, - data->irq_count); - if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) - fh->attention(fn, data->fn_irq_bits); - } -} - static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) { struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); struct device *dev = &rmi_dev->dev; - struct rmi_function *entry; + int i; int error; if (!data) @@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) */ mutex_unlock(&data->irq_mutex); - /* - * It would be nice to be able to use irq_chip to handle these - * nested IRQs. Unfortunately, most of the current customers for - * this driver are using older kernels (3.0.x) that don't support - * the features required for that. Once they've shifted to more - * recent kernels (say, 3.3 and higher), this should be switched to - * use irq_chip. - */ - list_for_each_entry(entry, &data->function_list, node) - process_one_interrupt(data, entry); + for_each_set_bit(i, data->irq_status, data->irq_count) + handle_nested_irq(irq_find_mapping(data->irqdomain, i)); if (data->input) input_sync(data->input); @@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume); static int rmi_driver_remove(struct device *dev) { struct rmi_device *rmi_dev = to_rmi_device(dev); + struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); rmi_disable_irq(rmi_dev, false); + irq_domain_remove(data->irqdomain); + data->irqdomain = NULL; + rmi_f34_remove_sysfs(rmi_dev); rmi_free_function_list(rmi_dev); @@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) { struct rmi_device *rmi_dev = data->rmi_dev; struct device *dev = &rmi_dev->dev; - int irq_count; + struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode; + int irq_count = 0; size_t size; int retval; @@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) * being accessed. */ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); - irq_count = 0; data->bootloader_mode = false; retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); @@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) if (data->bootloader_mode) dev_warn(dev, "Device in bootloader mode.\n"); + /* Allocate and register a linear revmap irq_domain */ + data->irqdomain = irq_domain_create_linear(fwnode, irq_count, + &irq_domain_simple_ops, + data); + if (!data->irqdomain) { + dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n"); + return -ENOMEM; + } + data->irq_count = irq_count; data->num_of_irq_regs = (data->irq_count + 7) / 8; @@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data) { struct rmi_device *rmi_dev = data->rmi_dev; struct device *dev = &rmi_dev->dev; - int irq_count; + int irq_count = 0; int retval; - irq_count = 0; rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); if (retval < 0) { diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index 8a07ae147df6..4edaa14fe878 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c @@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn) return 0; } -static int rmi_f01_attention(struct rmi_function *fn, - unsigned long *irq_bits) +static irqreturn_t rmi_f01_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; int error; u8 device_status; @@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn, if (error) { dev_err(&fn->dev, "Failed to read device status: %d.\n", error); - return error; + return IRQ_RETVAL(error); } if (RMI_F01_STATUS_BOOTLOADER(device_status)) @@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn, error = rmi_dev->driver->reset_handler(rmi_dev); if (error) { dev_err(&fn->dev, "Device reset failed: %d\n", error); - return error; + return IRQ_RETVAL(error); } } - return 0; + return IRQ_HANDLED; } struct rmi_function_handler rmi_f01_handler = { diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index 88822196d6b7..aaa1edc95522 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn) return 0; } -static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f03_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f03_data *f03 = dev_get_drvdata(&fn->dev); @@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) /* First grab the data passed by the transport device */ if (drvdata->attn_data.size < ob_len) { dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n"); - return 0; + return IRQ_HANDLED; } memcpy(obs, drvdata->attn_data.data, ob_len); @@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) "%s: Failed to read F03 output buffers: %d\n", __func__, error); serio_interrupt(f03->serio, 0, SERIO_TIMEOUT); - return error; + return IRQ_RETVAL(error); } } @@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) serio_interrupt(f03->serio, ob_data, serio_flags); } - return 0; + return IRQ_HANDLED; } static void rmi_f03_remove(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index 12a233251793..df64d6aed4f7 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c @@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger) } static void rmi_f11_finger_handler(struct f11_data *f11, - struct rmi_2d_sensor *sensor, - unsigned long *irq_bits, int num_irq_regs, - int size) + struct rmi_2d_sensor *sensor, int size) { const u8 *f_state = f11->data.f_state; u8 finger_state; @@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11, int rel_fingers; int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES; - int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask, - num_irq_regs * 8); - int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask, - num_irq_regs * 8); - - if (abs_bits) { + if (sensor->report_abs) { if (abs_size > size) abs_fingers = size / RMI_F11_ABS_BYTES; else @@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11, rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i], finger_state, i); } - } - if (rel_bits) { - if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size) - rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES; - else - rel_fingers = sensor->nbr_fingers; - - for (i = 0; i < rel_fingers; i++) - rmi_f11_rel_pos_report(f11, i); - } - - if (abs_bits) { /* * the absolute part is made in 2 parts to allow the kernel * tracking to take place. @@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11, } input_mt_sync_frame(sensor->input); + } else if (sensor->report_rel) { + if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size) + rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES; + else + rel_fingers = sensor->nbr_fingers; + + for (i = 0; i < rel_fingers; i++) + rmi_f11_rel_pos_report(f11, i); } + } static int f11_2d_construct_data(struct f11_data *f11) @@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn) return 0; } -static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f11_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f11_data *f11 = dev_get_drvdata(&fn->dev); @@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) data_base_addr, f11->sensor.data_pkt, f11->sensor.pkt_size); if (error < 0) - return error; + return IRQ_RETVAL(error); } - rmi_f11_finger_handler(f11, &f11->sensor, irq_bits, - drvdata->num_of_irq_regs, valid_bytes); + rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes); - return 0; + return IRQ_HANDLED; } static int rmi_f11_resume(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c index a3d1aa88f2a9..5c7f48915779 100644 --- a/drivers/input/rmi4/rmi_f12.c +++ b/drivers/input/rmi4/rmi_f12.c @@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size) rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i); } -static int rmi_f12_attention(struct rmi_function *fn, - unsigned long *irq_nr_regs) +static irqreturn_t rmi_f12_attention(int irq, void *ctx) { int retval; + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f12_data *f12 = dev_get_drvdata(&fn->dev); @@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn, if (retval < 0) { dev_err(&fn->dev, "Failed to read object data. Code: %d.\n", retval); - return retval; + return IRQ_RETVAL(retval); } } @@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn, input_mt_sync_frame(sensor->input); - return 0; + return IRQ_HANDLED; } static int rmi_f12_write_control_regs(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 82e0f0d43d55..5e3ed5ac0c3e 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn, } } -static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f30_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct f30_data *f30 = dev_get_drvdata(&fn->dev); struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev); int error; @@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) if (drvdata->attn_data.size < f30->register_count) { dev_warn(&fn->dev, "F30 interrupted, but data is missing\n"); - return 0; + return IRQ_HANDLED; } memcpy(f30->data_regs, drvdata->attn_data.data, f30->register_count); @@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) dev_err(&fn->dev, "%s: Failed to read F30 data registers: %d\n", __func__, error); - return error; + return IRQ_RETVAL(error); } } @@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) rmi_f03_commit_buttons(f30->f03); } - return 0; + return IRQ_HANDLED; } static int rmi_f30_config(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index f1f5ac539d5d..87a7d4ba382d 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command, return 0; } -static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f34_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct f34_data *f34 = dev_get_drvdata(&fn->dev); int ret; u8 status; @@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits) complete(&f34->v7.cmd_done); } - return 0; + return IRQ_HANDLED; } static int rmi_f34_write_blocks(struct f34_data *f34, const void *data, diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index e8a59d164019..a6f515bcab22 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -610,11 +610,6 @@ error: mutex_unlock(&f54->data_mutex); } -static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits) -{ - return 0; -} - static int rmi_f54_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; @@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = { .func = 0x54, .probe = rmi_f54_probe, .config = rmi_f54_config, - .attention = rmi_f54_attention, .remove = rmi_f54_remove, }; diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index ff7043f74a3d..d196ac3d8b8c 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = { { "GSL3692", 0 }, { "MSSL1680", 0 }, { "MSSL0001", 0 }, + { "MSSL0002", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..689ffe538370 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -142,7 +142,6 @@ config DMAR_TABLE config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) - select DMA_DIRECT_OPS select IOMMU_API select IOMMU_IOVA select NEED_DMA_MAP_STATE diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..115ff26e9ced 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -31,7 +31,6 @@ #include <linux/pci.h> #include <linux/dmar.h> #include <linux/dma-mapping.h> -#include <linux/dma-direct.h> #include <linux/mempool.h> #include <linux/memory.h> #include <linux/cpu.h> @@ -485,14 +484,37 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int intel_iommu_ecs = 1; +static int intel_iommu_pasid28; static int iommu_identity_mapping; #define IDENTMAP_ALL 1 #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 -#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap)) -#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap)) +/* Broadwell and Skylake have broken ECS support — normal so-called "second + * level" translation of DMA requests-without-PASID doesn't actually happen + * unless you also set the NESTE bit in an extended context-entry. Which of + * course means that SVM doesn't work because it's trying to do nested + * translation of the physical addresses it finds in the process page tables, + * through the IOVA->phys mapping found in the "second level" page tables. + * + * The VT-d specification was retroactively changed to change the definition + * of the capability bits and pretend that Broadwell/Skylake never happened... + * but unfortunately the wrong bit was changed. It's ECS which is broken, but + * for some reason it was the PASID capability bit which was redefined (from + * bit 28 on BDW/SKL to bit 40 in future). + * + * So our test for ECS needs to eschew those implementations which set the old + * PASID capabiity bit 28, since those are the ones on which ECS is broken. + * Unless we are working around the 'pasid28' limitations, that is, by putting + * the device into passthrough mode for normal DMA and thus masking the bug. + */ +#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ + (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap))) +/* PASID support is thus enabled if ECS is enabled and *either* of the old + * or new capability bits are set. */ +#define pasid_enabled(iommu) (ecs_enabled(iommu) && \ + (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap))) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); @@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable extended context table support\n"); intel_iommu_ecs = 0; + } else if (!strncmp(str, "pasid28", 7)) { + printk(KERN_INFO + "Intel-IOMMU: enable pre-production PASID support\n"); + intel_iommu_pasid28 = 1; + iommu_identity_mapping |= IDENTMAP_GFX; } else if (!strncmp(str, "tboot_noforce", 13)) { printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); @@ -3713,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - void *vaddr; + struct page *page = NULL; + int order; - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); - if (iommu_no_mapping(dev) || !vaddr) - return vaddr; + size = PAGE_ALIGN(size); + order = get_order(size); - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), - PAGE_ALIGN(size), DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (!*dma_handle) - goto out_free_pages; - return vaddr; + if (!iommu_no_mapping(dev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } + + if (gfpflags_allow_blocking(flags)) { + unsigned int count = size >> PAGE_SHIFT; + + page = dma_alloc_from_contiguous(dev, count, order, flags); + if (page && iommu_no_mapping(dev) && + page_to_phys(page) + size > dev->coherent_dma_mask) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + + if (!page) + page = alloc_pages(flags, order); + if (!page) + return NULL; + memset(page_address(page), 0, size); + + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, + DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); + if (*dma_handle) + return page_address(page); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); -out_free_pages: - dma_direct_free(dev, size, vaddr, *dma_handle, attrs); return NULL; } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (!iommu_no_mapping(dev)) - intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); - dma_direct_free(dev, size, vaddr, dma_handle, attrs); + int order; + struct page *page = virt_to_page(vaddr); + + size = PAGE_ALIGN(size); + order = get_order(size); + + intel_unmap(dev, dma_handle, size); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); } static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 98f90aadd141..18c0a1281914 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = { .getname = data_sock_getname, .sendmsg = mISDN_sock_sendmsg, .recvmsg = mISDN_sock_recvmsg, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = data_sock_setsockopt, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index ab13fcec3fca..75df4c9d8b54 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout) } /* Return md raid10 algorithm for @name */ -static const int raid10_name_to_format(const char *name) +static int raid10_name_to_format(const char *name) { if (!strcasecmp(name, "near")) return ALGORITHM_RAID10_NEAR; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 938766794c2e..3d0e2c198f06 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_dax(q); + return bdev_dax_supported(dev->bdev, PAGE_SIZE); } static bool dm_table_supports_dax(struct dm_table *t) @@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_dax(t)) blk_queue_flag_set(QUEUE_FLAG_DAX, q); + else + blk_queue_flag_clear(QUEUE_FLAG_DAX, q); + if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 36ef284ad086..72142021b5c9 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd) static int __commit_transaction(struct dm_pool_metadata *pmd) { int r; - size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; struct dm_block *sblock; @@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (r < 0) return r; - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_root_size(pmd->data_sm, &data_len); - if (r < 0) - return r; - r = save_sm_roots(pmd); if (r < 0) return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7945238df1c0..b900723bbd0f 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); +static void requeue_bios(struct pool *pool); + static void check_for_space(struct pool *pool) { int r; @@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool) if (r) return; - if (nr_free) + if (nr_free) { set_pool_mode(pool, PM_WRITE); + requeue_bios(pool); + } } /* @@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) r = dm_pool_alloc_data_block(pool->pmd, result); if (r) { - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); + if (r == -ENOSPC) + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); + else + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); return r; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 5961c7794ef3..87107c995cb5 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -136,6 +136,7 @@ struct dm_writecache { struct dm_target *ti; struct dm_dev *dev; struct dm_dev *ssd_dev; + sector_t start_sector; void *memory_map; uint64_t memory_map_size; size_t metadata_sectors; @@ -259,7 +260,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) if (da != p) { long i; wc->memory_map = NULL; - pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); if (!pages) { r = -ENOMEM; goto err2; @@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc) } dax_read_unlock(id); + + wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; + wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; + return 0; err3: kvfree(pages); @@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) static void persistent_memory_release(struct dm_writecache *wc) { if (wc->memory_vmapped) - vunmap(wc->memory_map); + vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); } static struct page *persistent_memory_page(void *addr) @@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) { - return wc->metadata_sectors + + return wc->start_sector + wc->metadata_sectors + ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); } @@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc) if (unlikely(region.sector + region.count > wc->metadata_sectors)) region.count = wc->metadata_sectors - region.sector; + region.sector += wc->start_sector; atomic_inc(&endio.count); req.bi_op = REQ_OP_WRITE; req.bi_op_flags = REQ_SYNC; @@ -859,7 +865,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc) if (wc->entries) return 0; - wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks); + wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); if (!wc->entries) return -ENOMEM; for (b = 0; b < wc->n_blocks; b++) { @@ -1481,9 +1487,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba wb->bio.bi_iter.bi_sector = read_original_sector(wc, e); wb->page_offset = PAGE_SIZE; if (max_pages <= WB_LIST_INLINE || - unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *), - GFP_NOIO | __GFP_NORETRY | - __GFP_NOMEMALLOC | __GFP_NOWARN)))) { + unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), + GFP_NOIO | __GFP_NORETRY | + __GFP_NOMEMALLOC | __GFP_NOWARN)))) { wb->wc_list = wb->wc_list_inline; max_pages = WB_LIST_INLINE; } @@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) } wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); - if (WC_MODE_PMEM(wc)) { - r = persistent_memory_claim(wc); - if (r) { - ti->error = "Unable to map persistent memory for cache"; - goto bad; - } - } - /* * Parse the cache block size */ @@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) while (opt_params) { string = dm_shift_arg(&as), opt_params--; - if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { + if (!strcasecmp(string, "start_sector") && opt_params >= 1) { + unsigned long long start_sector; + string = dm_shift_arg(&as), opt_params--; + if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) + goto invalid_optional; + wc->start_sector = start_sector; + if (wc->start_sector != start_sector || + wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) + goto invalid_optional; + } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { string = dm_shift_arg(&as), opt_params--; if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) goto invalid_optional; @@ -2039,12 +2046,20 @@ invalid_optional: goto bad; } - if (!WC_MODE_PMEM(wc)) { + if (WC_MODE_PMEM(wc)) { + r = persistent_memory_claim(wc); + if (r) { + ti->error = "Unable to map persistent memory for cache"; + goto bad; + } + } else { struct dm_io_region region; struct dm_io_request req; size_t n_blocks, n_metadata_blocks; uint64_t n_bitmap_bits; + wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; + bio_list_init(&wc->flush_list); wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); if (IS_ERR(wc->flush_thread)) { @@ -2097,7 +2112,7 @@ invalid_optional: } region.bdev = wc->ssd_dev->bdev; - region.sector = 0; + region.sector = wc->start_sector; region.count = wc->metadata_sectors; req.bi_op = REQ_OP_READ; req.bi_op_flags = REQ_SYNC; @@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type, static struct target_type writecache_target = { .name = "writecache", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = writecache_ctr, .dtr = writecache_dtr, diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 3c0e45f4dcf5..a44183ff4be0 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e65429a29c06..b0dd7027848b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (len < 1) goto out; nr_pages = min(len, nr_pages); - if (ti->type->direct_access) - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * the usage of io->orig_bio in dm_remap_zone_report() * won't be affected by this reassignment. */ - struct bio *b = bio_clone_bioset(bio, GFP_NOIO, - &md->queue->bio_split); + struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, + GFP_NOIO, &md->queue->bio_split); ci.io->orig_bio = b; - bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9); bio_chain(b, bio); ret = generic_make_request(bio); break; diff --git a/drivers/md/md.c b/drivers/md/md.c index 29b0cd9ec951..994aed2f9dff 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev) else pr_warn("md: personality for level %s is not loaded!\n", mddev->clevel); - return -EINVAL; + err = -EINVAL; + goto abort; } spin_unlock(&pers_lock); if (mddev->level != pers->level) { @@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev) pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ module_put(pers->owner); - return -EINVAL; + err = -EINVAL; + goto abort; } if (pers->sync_request) { @@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev) mddev->private = NULL; module_put(pers->owner); bitmap_destroy(mddev); - return err; + goto abort; } if (mddev->queue) { bool nonrot = true; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 478cf446827f..35bd3a62451b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev) disk->rdev->saved_raid_disk < 0) conf->fullsync = 1; } + + if (disk->replacement && + !test_bit(In_sync, &disk->replacement->flags) && + disk->replacement->saved_raid_disk < 0) { + conf->fullsync = 1; + } + disk->recovery_disabled = mddev->recovery_disabled - 1; } diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 40826bba06b6..fcfab6635f9c 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev) bpf_prog_array_free(rcdev->raw->progs); } -int lirc_prog_attach(const union bpf_attr *attr) +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { - struct bpf_prog *prog; struct rc_dev *rcdev; int ret; if (attr->attach_flags) return -EINVAL; - prog = bpf_prog_get_type(attr->attach_bpf_fd, - BPF_PROG_TYPE_LIRC_MODE2); - if (IS_ERR(prog)) - return PTR_ERR(prog); - rcdev = rc_dev_get_from_fd(attr->target_fd); - if (IS_ERR(rcdev)) { - bpf_prog_put(prog); + if (IS_ERR(rcdev)) return PTR_ERR(rcdev); - } ret = lirc_bpf_attach(rcdev, prog); - if (ret) - bpf_prog_put(prog); put_device(&rcdev->dev); diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 753b1a698fc4..6b16946f9b05 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name, d_instantiate(path.dentry, inode); file = alloc_file(&path, OPEN_FMODE(flags), fops); - if (IS_ERR(file)) - goto err_dput; + if (IS_ERR(file)) { + path_put(&path); + goto err_fs; + } file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; return file; -err_dput: - path_put(&path); err_inode: iput(inode); err_fs: diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e05c3245930a..fa840666bdd1 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file) static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; - unsigned char *page; - int retval; int len = 0; unsigned int value; - - if (*offset < 0) - return -EINVAL; - if (count == 0 || count > 1024) - return 0; - if (*offset != 0) - return 0; - - page = (unsigned char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; + char lbuf[20]; value = readl(address); - len = sprintf(page, "%d\n", value); - - if (copy_to_user(buf, page, len)) { - retval = -EFAULT; - goto exit; - } - *offset += len; - retval = len; + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); -exit: - free_page((unsigned long)page); - return retval; + return simple_read_from_buffer(buf, count, offset, lbuf, len); } static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index b0b8f18a85e3..6649f0d56d2f 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev, if (&cl->link == &dev->file_list) { /* A message for not connected fixed address clients * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages */ - if (hdr_is_fixed(mei_hdr)) { + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { mei_irq_discard_msg(dev, mei_hdr); ret = 0; goto reset_slots; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index efd733472a35..56c6f79a5c5a 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.lock[is_2m_pages]); @@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.unlock[is_2m_pages]); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index ef05e0039378..2a833686784b 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -27,8 +27,8 @@ struct mmc_gpio { bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; - char cd_label[0]; u32 cd_debounce_delay_ms; + char cd_label[]; }; static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 623f4d27fa01..80dc2fd6576c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) * It's used when HS400 mode is enabled. */ if (data->flags & MMC_DATA_WRITE && - !(host->timing != MMC_TIMING_MMC_HS400)) - return; + host->timing != MMC_TIMING_MMC_HS400) + goto disable; if (data->flags & MMC_DATA_WRITE) enable = SDMMC_CARD_WR_THR_EN; @@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) enable = SDMMC_CARD_RD_THR_EN; if (host->timing != MMC_TIMING_MMC_HS200 && - host->timing != MMC_TIMING_UHS_SDR104) + host->timing != MMC_TIMING_UHS_SDR104 && + host->timing != MMC_TIMING_MMC_HS400) goto disable; blksz_depth = blksz / (1 << host->data_shift); diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index f7f9773d161f..d032bd63444d 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, RST_RESERVED_BITS | val); - if (host->data && host->data->flags & MMC_DATA_READ) - clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); renesas_sdhi_internal_dmac_enable_dma(host, true); } @@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, goto force_pio; /* This DMAC cannot handle if buffer is not 8-bytes alignment */ - if (!IS_ALIGNED(sg_dma_address(sg), 8)) { - dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, - mmc_get_dma_dir(data)); - goto force_pio; - } + if (!IS_ALIGNED(sg_dma_address(sg), 8)) + goto force_pio_with_unmap; if (data->flags & MMC_DATA_READ) { dtran_mode |= DTRAN_MODE_CH_NUM_CH1; if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) - goto force_pio; + goto force_pio_with_unmap; } else { dtran_mode |= DTRAN_MODE_CH_NUM_CH0; } @@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, return; +force_pio_with_unmap: + dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data)); + force_pio: host->force_pio = true; renesas_sdhi_internal_dmac_enable_dma(host, false); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d6aef70d34fa..4eb3d29ecde1 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) if (imx_data->socdata->flags & ESDHC_FLAG_HS400) val |= SDHCI_SUPPORT_HS400; + + /* + * Do not advertise faster UHS modes if there are no + * pinctrl states for 100MHz/200MHz. + */ + if (IS_ERR_OR_NULL(imx_data->pins_100mhz) || + IS_ERR_OR_NULL(imx_data->pins_200mhz)) + val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50 + | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); } } @@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* - * fall back to not supporting uhs by specifying no - * 1.8v quirk - */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; } /* call to generic mmc_of_parse to support additional capabilities */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index e7472590f2ed..8e7f3e35ee3d 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev) sunxi_mmc_init_host(host); sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); sunxi_mmc_set_clk(host, &mmc->ios); + enable_irq(host->irq); return 0; } @@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct sunxi_mmc_host *host = mmc_priv(mmc); + /* + * When clocks are off, it's possible receiving + * fake interrupts, which will stall the system. + * Disabling the irq will prevent this. + */ + disable_irq(host->irq); sunxi_mmc_reset_host(host); sunxi_mmc_disable(host); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a0c655628d6d..1b64ac8c5bc8 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct ppb_lock { struct flchip *chip; - loff_t offset; + unsigned long adr; int locked; }; @@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, unsigned long timeo; int ret; + adr += chip->start; mutex_lock(&chip->mutex); - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; @@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { chip->state = FL_LOCKING; - map_write(map, CMD(0xA0), chip->start + adr); - map_write(map, CMD(0x00), chip->start + adr); + map_write(map, CMD(0xA0), adr); + map_write(map, CMD(0x00), adr); } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { /* * Unlocking of one specific sector is not supported, so we @@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, map_write(map, CMD(0x00), chip->start); chip->state = FL_READY; - put_chip(map, chip, adr + chip->start); + put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; @@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, * sectors shall be unlocked, so lets keep their locking * status at "unlocked" (locked=0) for the final re-locking. */ - if ((adr < ofs) || (adr >= (ofs + len))) { + if ((offset < ofs) || (offset >= (ofs + len))) { sect[sectors].chip = &cfi->chips[chipnum]; - sect[sectors].offset = offset; + sect[sectors].adr = adr; sect[sectors].locked = do_ppb_xxlock( map, &cfi->chips[chipnum], adr, 0, DO_XXLOCK_ONEBLOCK_GETLOCK); @@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, i++; if (adr >> cfi->chipshift) { + if (offset >= (ofs + len)) + break; adr = 0; chipnum++; @@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, */ for (i = 0; i < sectors; i++) { if (sect[i].locked) - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, DO_XXLOCK_ONEBLOCK_LOCK); } diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 3a6f450d1093..53febe8a68c3 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = { { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, - { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, - { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, + { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, + { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, }; static struct flash_info *jedec_lookup(struct spi_device *spi, diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index cfd33e6ca77f..5869e90cc14b 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) return ret; - denali->clk_x_rate = clk_get_rate(dt->clk); + /* + * Hardcode the clock rate for the backward compatibility. + * This works for both SOCFPGA and UniPhier. + */ + denali->clk_x_rate = 200000000; ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 45786e707b7b..26cef218bb43 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -48,7 +48,7 @@ #define NFC_V1_V2_CONFIG (host->regs + 0x0a) #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) -#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) +#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10) #define NFC_V1_V2_WRPROT (host->regs + 0x12) #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) @@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd) writew(config1, NFC_V1_V2_CONFIG1); /* preset operation */ + /* spare area size in 16-bit half-words */ + writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA); + /* Unlock the internal RAM Buffer */ writew(0x2, NFC_V1_V2_CONFIG); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 10c4f9919850..b01d15ec4c56 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) for (; page < page_end; page++) { res = chip->ecc.read_oob(mtd, chip, page); - if (res) + if (res < 0) return res; bad = chip->oob_poi[chip->badblockpos]; diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 7ed1f87e742a..49c546c97c6f 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -17,23 +17,47 @@ #include <linux/mtd/rawnand.h> +/* + * Macronix AC series does not support using SET/GET_FEATURES to change + * the timings unlike what is declared in the parameter page. Unflag + * this feature to avoid unnecessary downturns. + */ +static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip) +{ + unsigned int i; + static const char * const broken_get_timings[] = { + "MX30LF1G18AC", + "MX30LF1G28AC", + "MX30LF2G18AC", + "MX30LF2G28AC", + "MX30LF4G18AC", + "MX30LF4G28AC", + "MX60LF8G18AC", + }; + + if (!chip->parameters.supports_set_get_features) + return; + + for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) { + if (!strcmp(broken_get_timings[i], chip->parameters.model)) + break; + } + + if (i == ARRAY_SIZE(broken_get_timings)) + return; + + bitmap_clear(chip->parameters.get_feature_list, + ONFI_FEATURE_ADDR_TIMING_MODE, 1); + bitmap_clear(chip->parameters.set_feature_list, + ONFI_FEATURE_ADDR_TIMING_MODE, 1); +} + static int macronix_nand_init(struct nand_chip *chip) { if (nand_is_slc(chip)) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; - /* - * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change - * the timings unlike what is declared in the parameter page. Unflag - * this feature to avoid unnecessary downturns. - */ - if (chip->parameters.supports_set_get_features && - !strcmp("MX30LF2G18AC", chip->parameters.model)) { - bitmap_clear(chip->parameters.get_feature_list, - ONFI_FEATURE_ADDR_TIMING_MODE, 1); - bitmap_clear(chip->parameters.set_feature_list, - ONFI_FEATURE_ADDR_TIMING_MODE, 1); - } + macronix_nand_fix_broken_get_timings(chip); return 0; } diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 0af45b134c0c..5ec4c90a637d 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip) if (p->supports_set_get_features) { set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list); + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list); set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list); + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list); } return 0; diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index c3f7aaa5d18f..d7e10b36a0b9 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, if (ret) return ret; - if (f_pdata->use_direct_mode) + if (f_pdata->use_direct_mode) { memcpy_toio(cqspi->ahb_base + to, buf, len); - else + ret = cqspi_wait_idle(cqspi); + } else { ret = cqspi_indirect_write_execute(nor, to, buf, len); + } if (ret) return ret; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index fc7383106946..91eb8910b1c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -63,8 +63,6 @@ #define AQ_CFG_NAPI_WEIGHT 64U -#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U - /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ #define AQ_NIC_FC_OFF 0U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index a2d416b24ffc..2c6ebd91a9f2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -98,6 +98,8 @@ struct aq_stats_s { #define AQ_HW_MEDIA_TYPE_TP 1U #define AQ_HW_MEDIA_TYPE_FIBRE 2U +#define AQ_HW_MULTICAST_ADDRESS_MAX 32U + struct aq_hw_s { atomic_t flags; u8 rbl_enabled:1; @@ -177,7 +179,7 @@ struct aq_hw_ops { unsigned int packet_filter); int (*hw_multicast_list_set)(struct aq_hw_s *self, - u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index ba5fe8c4125d..e3ae29e523f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -135,17 +135,10 @@ err_exit: static void aq_ndev_set_multicast_settings(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - int err = 0; - err = aq_nic_set_packet_filter(aq_nic, ndev->flags); - if (err < 0) - return; + aq_nic_set_packet_filter(aq_nic, ndev->flags); - if (netdev_mc_count(ndev)) { - err = aq_nic_set_multicast_list(aq_nic, ndev); - if (err < 0) - return; - } + aq_nic_set_multicast_list(aq_nic, ndev); } static const struct net_device_ops aq_ndev_ops = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1a1a6380c128..7a22d0257e04 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -563,34 +563,41 @@ err_exit: int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) { + unsigned int packet_filter = self->packet_filter; struct netdev_hw_addr *ha = NULL; unsigned int i = 0U; - self->mc_list.count = 0U; - - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(self->mc_list.ar[i++], ha->addr); - ++self->mc_list.count; + self->mc_list.count = 0; + if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_PROMISC; + } else { + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); - if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) - break; + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) + break; + } } - if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { - /* Number of filters is too big: atlantic does not support this. - * Force all multi filter to support this. - * With this we disable all UC filters and setup "all pass" - * multicast mask - */ - self->packet_filter |= IFF_ALLMULTI; - self->aq_nic_cfg.mc_list_count = 0; - return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, - self->packet_filter); + if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_ALLMULTI; } else { - return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) + break; + } + } + + if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_MULTICAST; + self->mc_list.count = i; + self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); } + return aq_nic_set_packet_filter(self, packet_filter); } int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index faa533a0ec47..fecfc401f95d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -75,7 +75,7 @@ struct aq_nic_s { struct aq_hw_link_status_s link_status; struct { u32 count; - u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN]; } mc_list; struct pci_dev *pdev; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 67e2f9fb9402..8cc6abadc03b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, u8 ar_mac - [AQ_CFG_MULTICAST_ADDRESS_MAX] + [AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 819f6bcf9b4e..956860a69797 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, u8 ar_mac - [AQ_CFG_MULTICAST_ADDRESS_MAX] + [AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count) { @@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, hw_atl_rpfl2_uc_flr_en_set(self, (self->aq_nic_cfg->is_mc_list_enabled), - HW_ATL_B0_MAC_MIN + i); + HW_ATL_B0_MAC_MIN + i); } err = aq_hw_err_from_flags(self); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; + int err; alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; netif_device_attach(alx->dev); - return __alx_open(alx, true); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); + + return err; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d5fca2e5a9bc..a1f60f89e059 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev) if (!priv->is_lite) priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); else - priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP); + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index d6e5d0cbf3a3..cf440b91fd04 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -278,7 +278,8 @@ struct bcm_rsb { #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) -#define GIB_FCS_STRIP (1 << 6) +#define GIB_FCS_STRIP_SHIFT 6 +#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT) #define GIB_LCL_LOOP_EN (1 << 7) #define GIB_LCL_LOOP_TXEN (1 << 8) #define GIB_RMT_LOOP_EN (1 << 9) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1533,6 +1533,7 @@ struct bnx2x { struct link_vars link_vars; u32 link_cnt; struct bnx2x_link_report_data last_reported_link; + bool force_link_down; struct mdio_if_info mdio; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..af7b5a4d8ba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp) { struct bnx2x_link_report_data cur_data; + if (bp->force_link_down) { + bp->link_vars.link_up = 0; + return; + } + /* reread mf_cfg */ if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); @@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->pending_max = 0; } + bp->force_link_down = false; if (bp->port.pmf) { rc = bnx2x_initial_phy_init(bp, load_mode); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..57348f2b49a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); + /* Immediately indicate link as down */ + bp->link_vars.link_up = 0; + bp->force_link_down = true; + netif_carrier_off(bp->dev); + BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); /* When ret value shows failure of allocation failure, * the nic is rebooted again. If open still fails, a error diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 176fc9f4d7de..4394c1162be4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) } vnic->uc_filter_count = 1; - vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + vnic->rx_mask = 0; + if (bp->dev->flags & IFF_BROADCAST) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; @@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); } -void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) +static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) { bp->hw_resc.max_irqs = max_irqs; } @@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_request_irq(bp); if (rc) { netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); - goto open_err; + goto open_err_irq; } } @@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) open_err: bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); + +open_err_irq: bnxt_del_napi(bp); open_err_free_mem: @@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev) mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | - CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); + CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; uc_update = bnxt_uc_list_updated(bp); + if (dev->flags & IFF_BROADCAST) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if (dev->flags & IFF_ALLMULTI) { mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; @@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) int rx, tx, cp; _bnxt_get_max_rings(bp, &rx, &tx, &cp); + *max_rx = rx; + *max_tx = tx; if (!rx || !tx || !cp) return -ENOMEM; - *max_rx = rx; - *max_tx = tx; return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); } @@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, /* Not enough rings, try disabling agg rings. */ bp->flags &= ~BNXT_FLAG_AGG_RINGS; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) + if (rc) { + /* set BNXT_FLAG_AGG_RINGS back for consistency */ + bp->flags |= BNXT_FLAG_AGG_RINGS; return rc; + } bp->flags |= BNXT_FLAG_NO_AGG_RINGS; bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9b14eb610b9f..91575ef97c8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); -void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 795f45024c20..491bd40a254d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -27,6 +27,15 @@ #define BNXT_FID_INVALID 0xffff #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) +#define is_vlan_pcp_wildcarded(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) +#define is_vlan_pcp_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) +#define is_vlan_pcp_zero(vlan_tci) \ + ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) +#define is_vid_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) + /* Return the dst fid of the func for flow forwarding * For PFs: src_fid is the fid of the PF * For VF-reps: src_fid the fid of the VF @@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len) return true; } +static bool is_vlan_tci_allowed(__be16 vlan_tci_mask, + __be16 vlan_tci) +{ + /* VLAN priority must be either exactly zero or fully wildcarded and + * VLAN id must be exact match. + */ + if (is_vid_exactmatch(vlan_tci_mask) && + ((is_vlan_pcp_exactmatch(vlan_tci_mask) && + is_vlan_pcp_zero(vlan_tci)) || + is_vlan_pcp_wildcarded(vlan_tci_mask))) + return true; + + return false; +} + static bool bits_set(void *key, int len) { const u8 *p = key; @@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) /* Currently VLAN fields cannot be partial wildcard */ if (bits_set(&flow->l2_key.inner_vlan_tci, sizeof(flow->l2_key.inner_vlan_tci)) && - !is_exactmatch(&flow->l2_mask.inner_vlan_tci, - sizeof(flow->l2_mask.inner_vlan_tci))) { - netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); + !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, + flow->l2_key.inner_vlan_tci)) { + netdev_info(bp->dev, "Unsupported VLAN TCI\n"); return false; } if (bits_set(&flow->l2_key.inner_vlan_tpid, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 347e4f946eb2..840f6e505f73 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, edev->ulp_tbl[ulp_id].msix_requested = avail_msix; } bnxt_fill_msix_vecs(bp, ent); - bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix); bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; return avail_msix; @@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) msix_requested = edev->ulp_tbl[ulp_id].msix_requested; bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); edev->ulp_tbl[ulp_id].msix_requested = 0; - bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested); edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; if (netif_running(dev)) { bnxt_close_nic(bp, true, false); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..4fd829b5e65d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, id_tbl->max = size; id_tbl->next = next; spin_lock_init(&id_tbl->lock); - id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); + id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); if (!id_tbl->table) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3be87efdc93d..aa1374d0af93 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6,11 +6,15 @@ * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2005-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Limited. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries. * * Firmware is: * Derived from proprietary unpublished source code, * Copyright (C) 2000-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Ltd. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries. * * Permission is hereby granted for the distribution of this firmware * data in hexadecimal or equivalent format, provided this copyright @@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_restore_clk(tp); + /* Increase the core clock speed to fix tx timeout issue for 5762 + * with 100Mbps link speed. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | + TG3_CPMU_MAC_ORIDE_ENABLE); + } + /* Reprobe ASF enable state. */ tg3_flag_clear(tp, ENABLE_ASF); tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 1d61aa3efda1..a772a33b685c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -7,6 +7,8 @@ * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2007-2016 Broadcom Corporation. * Copyright (C) 2016-2017 Broadcom Limited. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries. */ #ifndef _T3_H diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 86659823b259..3d45f4c92cf6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -166,6 +166,7 @@ #define GEM_DCFG6 0x0294 /* Design Config 6 */ #define GEM_DCFG7 0x0298 /* Design Config 7 */ #define GEM_DCFG8 0x029C /* Design Config 8 */ +#define GEM_DCFG10 0x02A4 /* Design Config 10 */ #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ @@ -490,6 +491,12 @@ #define GEM_SCR2CMP_OFFSET 0 #define GEM_SCR2CMP_SIZE 8 +/* Bitfields in DCFG10 */ +#define GEM_TXBD_RDBUFF_OFFSET 12 +#define GEM_TXBD_RDBUFF_SIZE 4 +#define GEM_RXBD_RDBUFF_OFFSET 8 +#define GEM_RXBD_RDBUFF_SIZE 4 + /* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCR_SIZE 16 @@ -635,6 +642,7 @@ #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_GEM_HAS_PTP 0x00000040 +#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -1203,6 +1211,9 @@ struct macb { unsigned int max_tuples; struct tasklet_struct hresp_err_tasklet; + + int rx_bd_rd_prefetch; + int tx_bd_rd_prefetch; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3e93df5d4e3b..a6c911bb5ce2 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp) { struct macb_queue *queue; unsigned int q; + int size; - queue = &bp->queues[0]; bp->macbgem_ops.mog_free_rx_buffers(bp); - if (queue->rx_ring) { - dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), - queue->rx_ring, queue->rx_ring_dma); - queue->rx_ring = NULL; - } for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { kfree(queue->tx_skb); queue->tx_skb = NULL; if (queue->tx_ring) { - dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), + size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; + dma_free_coherent(&bp->pdev->dev, size, queue->tx_ring, queue->tx_ring_dma); queue->tx_ring = NULL; } + if (queue->rx_ring) { + size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; + dma_free_coherent(&bp->pdev->dev, size, + queue->rx_ring, queue->rx_ring_dma); + queue->rx_ring = NULL; + } } } @@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp) int size; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - size = TX_RING_BYTES(bp); + size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &queue->tx_ring_dma, GFP_KERNEL); @@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp) if (!queue->tx_skb) goto out_err; - size = RX_RING_BYTES(bp); + size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &queue->rx_ring_dma, GFP_KERNEL); if (!queue->rx_ring) @@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev) int err; u32 reg; + bp->queues[0].bp = bp; + dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &macb_ethtool_ops; @@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = { static const struct macb_config zynqmp_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | - MACB_CAPS_GEM_HAS_PTP, + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev) void __iomem *mem; const char *mac; struct macb *bp; - int err; + int err, val; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem = devm_ioremap_resource(&pdev->dev, regs); @@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev) else dev->max_mtu = ETH_DATA_LEN; + if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { + val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); + if (val) + bp->rx_bd_rd_prefetch = (2 << (val - 1)) * + macb_dma_desc_get_size(bp); + + val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); + if (val) + bp->tx_bd_rd_prefetch = (2 << (val - 1)) * + macb_dma_desc_get_size(bp); + } + mac = of_get_mac_address(np); if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 043e3c11c42b..92d88c5f76fb 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" - depends on 64BIT + depends on 64BIT && PCI select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -28,13 +28,13 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" imply CAVIUM_PTP - depends on 64BIT + depends on 64BIT && PCI ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" - depends on 64BIT + depends on 64BIT && PCI select PHYLIB select MDIO_THUNDER select THUNDER_NIC_RGX @@ -44,7 +44,7 @@ config THUNDER_NIC_BGX config THUNDER_NIC_RGX tristate "Thunder MAC interface driver (RGX)" - depends on 64BIT + depends on 64BIT && PCI select PHYLIB select MDIO_THUNDER ---help--- @@ -53,7 +53,7 @@ config THUNDER_NIC_RGX config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" - depends on 64BIT + depends on 64BIT && PCI imply PTP_1588_CLOCK default y ---help--- @@ -65,7 +65,7 @@ config CAVIUM_PTP config LIQUIDIO tristate "Cavium LiquidIO support" - depends on 64BIT + depends on 64BIT && PCI depends on MAY_USE_DEVLINK imply PTP_1588_CLOCK select FW_LOADER diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8a815bb57177..7e8454d3b1ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console) */ #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 +/* time to wait for possible in-flight requests in milliseconds */ +#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) + struct lio_trusted_vf_ctx { struct completion complete; int status; @@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) force_io_queues_off(oct); /* To allow for in-flight requests */ - schedule_timeout_uninterruptible(100); + schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 3f6afb54a5eb..bb43ddb7539e 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) { struct octeon_mgmt *p = netdev_priv(netdev); - int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; netdev->mtu = new_mtu; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + /* HW lifts the limit if the frame is VLAN tagged + * (+4 bytes per each tag, up to two tags) + */ + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); + /* Set the hardware to truncate packets larger than the MTU. The jabber + * register must be set to a multiple of 8 bytes, so round up. JABBER is + * an unconditional limit, so we need to account for two possible VLAN + * tags. + */ cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, - (size_without_fcs + 7) & 0xfff8); + (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 7b795edd9d3a..a19172dbe6be 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -51,6 +51,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/nospec.h> #include "common.h" #include "cxgb3_ioctl.h" @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (t.qset_idx >= nqsets) return -EINVAL; + t.qset_idx = array_index_nospec(t.qset_idx, nqsets); q = &adapter->params.sge.qset[q1 + t.qset_idx]; t.rspq_size = q->rspq_size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 974a868a4824..3720c3e11ebb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap) }; unsigned int part, manufacturer; - unsigned int density, size; + unsigned int density, size = 0; u32 flashid = 0; int ret; @@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap) case 0x22: /* 256MB */ size = 1 << 28; break; - - default: - dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap) case 0x17: /* 64MB */ size = 1 << 26; break; - default: - dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap) case 0x18: /* 16MB */ size = 1 << 24; break; - default: - dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } @@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap) case 0x18: /* 16MB */ size = 1 << 24; break; - default: - dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n", - flashid, density); - return -EINVAL; } break; } - default: - dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", - flashid); - return -EINVAL; + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; } /* Store decoded Flash size and fall through into vetting code. */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..ab02057ac730 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* Default alignment for start of data in an Rx FD */ #define DPAA_FD_DATA_ALIGNMENT 16 +/* The DPAA requires 256 bytes reserved and mapped for the SGT */ +#define DPAA_SGT_SIZE 256 + /* Values for the L3R field of the FM Parse Results */ /* L3 Type field: First IP Present IPv4 */ @@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + - sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_unmap_single(dev, addr, + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, dma_dir); /* The sgt buffer has been allocated with netdev_alloc_frag(), @@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, void *sgt_buf; /* get a page frag to store the SGTable */ - sz = SKB_DATA_ALIGN(priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags)); + sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); sgt_buf = netdev_alloc_frag(sz); if (unlikely(!sgt_buf)) { netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", @@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, skbh = (struct sk_buff **)buffer_start; *skbh = skb; - addr = dma_map_single(dev, buffer_start, priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); + addr = dma_map_single(dev, buffer_start, + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); if (unlikely(dma_mapping_error(dev, addr))) { dev_err(dev, "DMA mapping failed"); err = -EINVAL; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..ecbf6187e13a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -324,6 +324,10 @@ struct fman_port_qmi_regs { #define HWP_HXS_PHE_REPORT 0x00000800 #define HWP_HXS_PCAC_PSTAT 0x00000100 #define HWP_HXS_PCAC_PSTOP 0x00000001 +#define HWP_HXS_TCP_OFFSET 0xA +#define HWP_HXS_UDP_OFFSET 0xB +#define HWP_HXS_SH_PAD_REM 0x80000000 + struct fman_port_hwp_regs { struct { u32 ssa; /* Soft Sequence Attachment */ @@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) iowrite32be(0xffffffff, ®s->pmda[i].lcv); } + /* Short packet padding removal from checksum calculation */ + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); + start_port_hwp(port); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index e2e5cdc7119c..4c0f7eda1166 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; + irq_set_affinity_hint(rq->irq, NULL); free_irq(rq->irq, rxq); rx_del_napi(rxq); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d0e196bff081..ffe7acbeaa22 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, return; failure: - dev_info(dev, "replenish pools failure\n"); + if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) + dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); pool->free_map[pool->next_free] = index; pool->rx_buff[index].skb = NULL; @@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) &tx_crq); } if (lpar_rc != H_SUCCESS) { - dev_err(dev, "tx failed with code %ld\n", lpar_rc); + if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) + dev_err_ratelimited(dev, "tx: send failed\n"); dev_kfree_skb_any(skb); tx_buff->skb = NULL; @@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = ibmvnic_login(netdev); if (rc) { - adapter->state = VNIC_PROBED; - return 0; + adapter->state = reset_state; + return rc; } if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || @@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) return crq; } +static void print_subcrq_error(struct device *dev, int rc, const char *func) +{ + switch (rc) { + case H_PARAMETER: + dev_warn_ratelimited(dev, + "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", + func, rc); + break; + case H_CLOSED: + dev_warn_ratelimited(dev, + "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", + func, rc); + break; + default: + dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); + break; + } +} + static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, union sub_crq *sub_crq) { @@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, cpu_to_be64(u64_crq[2]), cpu_to_be64(u64_crq[3])); - if (rc) { - if (rc == H_CLOSED) - dev_warn(dev, "CRQ Queue closed\n"); - dev_err(dev, "Send error (rc=%d)\n", rc); - } + if (rc) + print_subcrq_error(dev, rc, __func__); return rc; } @@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, cpu_to_be64(remote_handle), ioba, num_entries); - if (rc) { - if (rc == H_CLOSED) - dev_warn(dev, "CRQ Queue closed\n"); - dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); - } + if (rc) + print_subcrq_error(dev, rc, __func__); return rc; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ed6dbcfd4e96..b151ae316546 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED 1 -#define I40E_XDP_TX 2 +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); @@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; break; default: bpf_warn_invalid_xdp_action(act); @@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - bool failure = false, xdp_xmit = false; + unsigned int xdp_xmit = 0; + bool failure = false; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -I40E_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; i40e_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & I40E_XDP_TX) { struct i40e_ring *xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; i40e_xdp_ring_update_tail(xdp_ring); - xdp_do_flush_map(); } rx_ring->skb = skb; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3f5c350716bb..0bd1294ba517 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; + /* Record lower 32 bits of MAC address and then make + * sure that write is flushed to hardware before writing + * the upper 16 bits and setting the valid bit. + */ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return 0; @@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + /* Clear the address valid bit and upper 16 bits of the address + * before clearing the lower bits. This way we aren't updating + * a live filter. + */ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index c116f459945d..da4322e4daed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, } itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { + if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", __func__, itd->sa_idx, xs->xso.offload_handle); return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..62e57b05a0ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf); @@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9f54ccbddea7..3360f7b9ee73 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, { const struct mlx4_en_frag_info *frag_info = priv->frag_info; unsigned int truesize = 0; + bool release = true; int nr, frag_size; struct page *page; dma_addr_t dma; - bool release; /* Collect used fragments while replacing them in the HW descriptors */ for (nr = 0;; frags++) { @@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, release = page_count(page) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); - } else { + } else if (!priv->rx_headroom) { + /* rx_headroom for non XDP setup is always 0. + * When XDP is set, the above condition will + * guarantee page is always released. + */ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); frags->page_offset += sz_align; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..384c1fa49081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work) unsigned long flags; bool poll_cmd = ent->polling; int alloc_ret; + int cmd_mode; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + cmd_mode = cmd->mode; if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); @@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); @@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - char outlen_str[8]; + char outlen_str[8] = {0}; int outlen; void *ptr; int err; @@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, if (copy_from_user(outlen_str, buf, count)) return -EFAULT; - outlen_str[7] = 0; - err = sscanf(outlen_str, "%d", &outlen); if (err < 0) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..dae4156a710d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); #if IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_VPORT_MANAGER(mdev)) + if (MLX5_ESWITCH_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_VPORT_MANAGER(mdev)) { + if (MLX5_ESWITCH_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..2b8040a3cdbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; @@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_eswitch_rep *rep; + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) + return false; + + rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f63dfbcd29fe..b79d74860a30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) +#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!ESW_ALLOWED(esw)) - return 0; - - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u64 node_guid; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) return -EINVAL; @@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, { struct mlx5_vport *evport; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..91f1209886ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..f1a86cea86a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -32,6 +32,7 @@ #include <linux/mutex.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/eswitch.h> #include "mlx5_core.h" #include "fs_core.h" @@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { err = init_fdb_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index afd9f4fa22f4..41ad24f0de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -32,6 +32,7 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> +#include <linux/mlx5/eswitch.h> #include <linux/module.h> #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" @@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) } if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_CAP_GEN(dev, eswitch_flow_table)) { + MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..98359559c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -33,6 +33,7 @@ #include <linux/etherdevice.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/eswitch.h> #include "mlx5_core.h" #include "lib/mpfs.h" @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) u32 index; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fa9d0760dd36..31a9cbd85689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; @@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } +enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } out: - mlx5_eswitch_disable_sriov(dev->priv.eswitch); + if (MLX5_ESWITCH_MANAGER(dev)) + mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 719cecb182c6..7eecd5b07bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; - if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) - return -EOPNOTSUPP; in = kvzalloc(inlen, GFP_KERNEL); if (!in) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6aaaf3d9ba31..77b2adb29341 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) kfree(mlxsw_sp_rt6); } +static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) +{ + /* RTF_CACHE routes are ignored */ + return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; +} + static struct fib6_info * mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) { @@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct fib6_info *nrt, bool append) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; - if (!append) + if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { @@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, break; if (rt->fib6_metric < nrt->fib6_metric) continue; - if (rt->fib6_metric == nrt->fib6_metric) + if (rt->fib6_metric == nrt->fib6_metric && + mlxsw_sp_fib6_rt_can_mp(rt)) return fib6_entry; if (rt->fib6_metric > nrt->fib6_metric) break; @@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, const struct fib6_info *nrt, bool replace) { - struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); @@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, continue; if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (replace && rt->fib6_metric == nrt->fib6_metric) - return fib6_entry; + if (replace && rt->fib6_metric == nrt->fib6_metric) { + if (mlxsw_sp_fib6_rt_can_mp(rt) == + mlxsw_sp_fib6_rt_can_mp(nrt)) + return fib6_entry; + if (mlxsw_sp_fib6_rt_can_mp(nrt)) + fallback = fallback ?: fib6_entry; + } if (rt->fib6_metric > nrt->fib6_metric) - return fib6_entry; + return fallback ?: fib6_entry; } - return NULL; + return fallback; } static int @@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct fib6_info *rt, bool replace, - bool append) + struct fib6_info *rt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, /* Before creating a new entry, try to append route to an existing * multipath entry. */ - fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); if (fib6_entry) { err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); if (err) @@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, return 0; } - /* We received an append event, yet did not find any route to - * append to. - */ - if (WARN_ON(append)) { - err = -EINVAL; - goto err_fib6_entry_append; - } - fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); if (IS_ERR(fib6_entry)) { err = PTR_ERR(fib6_entry); @@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, err_fib6_node_entry_link: mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); err_fib6_entry_create: -err_fib6_entry_append: err_fib6_entry_nexthop_add: mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; @@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - bool replace, append; + bool replace; int err; rtnl_lock(); @@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; - append = fib_work->event == FIB_EVENT_ENTRY_APPEND; err = mlxsw_sp_router_fib6_add(mlxsw_sp, - fib_work->fen6_info.rt, replace, - append); + fib_work->fen6_info.rt, replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); mlxsw_sp_rt6_release(fib_work->fen6_info.rt); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..40216d56dddc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ret = nfp_net_bpf_offload(nn, prog, running, extack); /* Stop offload if replace not possible */ - if (ret && prog) - nfp_bpf_xdp_offload(app, nn, NULL, extack); + if (ret) + return ret; - nn->dp.bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = !!prog; return ret; } @@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 91935405f586..84f7a5dbea9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, NFP_FLOWER_MASK_MPLS_Q; frame->mpls_lse = cpu_to_be32(t_mpls); + } else if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC)) { + /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q + * bit, which indicates an mpls ether type but without any + * mpls fields. + */ + struct flow_dissector_key_basic *key_basic; + + key_basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || + key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) + frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..525057bee0ed 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, case cpu_to_be16(ETH_P_ARP): return -EOPNOTSUPP; + case cpu_to_be16(ETH_P_MPLS_UC): + case cpu_to_be16(ETH_P_MPLS_MC): + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + key_layer |= NFP_FLOWER_LAYER_MAC; + key_size += sizeof(struct nfp_flower_mac_mpls); + } + break; + /* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break; @@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 46b76d5a726c..152283d7e59c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); pf->limit_vfs = ~0; - pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */ /* Allow any setting for backwards compatibility if symbol not found */ if (err == -ENOENT) return 0; @@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, err = nfp_net_pci_probe(pf); if (err) - goto err_sriov_unlimit; + goto err_fw_unload; err = nfp_hwmon_register(pf); if (err) { @@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); -err_sriov_unlimit: - pci_sriov_set_totalvfs(pf->pdev, 0); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); @@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev) nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); - pci_sriov_set_totalvfs(pf->pdev, 0); nfp_net_pci_remove(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1..37a6d7822a38 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); - if (err < sizeof(*fwinf)) + if (err < (int)sizeof(*fwinf)) goto err_release; if (!nffw_res_flg_init_get(fwinf)) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 00db3401b898..1dfaccd151f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -502,6 +502,7 @@ enum BAR_ID { struct qed_nvm_image_info { u32 num_images; struct bist_nvm_image_att *image_att; + bool valid; }; #define DRV_MODULE_VERSION \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f0b01385d5cb..e0680ce91328 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, - ARRAY_SIZE(p_local->local_chassis_id)); + sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, - ARRAY_SIZE(p_local->local_port_id)); + sizeof(p_local->local_port_id)); } static void @@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, - ARRAY_SIZE(p_remote->peer_chassis_id)); + sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, - ARRAY_SIZE(p_remote->peer_port_id)); + sizeof(p_remote->peer_port_id)); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index a14e48489029..4340c4c90bcb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, format_idx = header & MFW_TRACE_EVENTID_MASK; /* Skip message if its index doesn't exist in the meta data */ - if (format_idx > s_mcp_trace_meta.formats_num) { + if (format_idx >= s_mcp_trace_meta.formats_num) { u8 format_size = (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> MFW_TRACE_PRM_SIZE_SHIFT); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 329781cda77f..e5249b4741d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, - QED_OV_ESWITCH_VEB); + QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 5c10fd7210c3..758a9a5127fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, goto err2; } - DP_INFO(cdev, "qed_probe completed successffuly\n"); + DP_INFO(cdev, "qed_probe completed successfully\n"); return cdev; @@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4e0b443c9519..9d9e533bccdc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; + /* nvm_info needs to be updated */ + p_hwfn->nvm_info.valid = false; + return 0; } @@ -2555,11 +2558,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) { - struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; + struct qed_nvm_image_info nvm_info; struct qed_ptt *p_ptt; int rc; u32 i; + if (p_hwfn->nvm_info.valid) + return 0; + p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); @@ -2567,29 +2573,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) } /* Acquire from MFW the amount of available images */ - nvm_info->num_images = 0; + nvm_info.num_images = 0; rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, - p_ptt, &nvm_info->num_images); + p_ptt, &nvm_info.num_images); if (rc == -EOPNOTSUPP) { DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); goto out; - } else if (rc || !nvm_info->num_images) { + } else if (rc || !nvm_info.num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); goto err0; } - nvm_info->image_att = kmalloc_array(nvm_info->num_images, - sizeof(struct bist_nvm_image_att), - GFP_KERNEL); - if (!nvm_info->image_att) { + nvm_info.image_att = kmalloc_array(nvm_info.num_images, + sizeof(struct bist_nvm_image_att), + GFP_KERNEL); + if (!nvm_info.image_att) { rc = -ENOMEM; goto err0; } /* Iterate over images and get their attributes */ - for (i = 0; i < nvm_info->num_images; i++) { + for (i = 0; i < nvm_info.num_images; i++) { rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, - &nvm_info->image_att[i], i); + &nvm_info.image_att[i], i); if (rc) { DP_ERR(p_hwfn, "Failed getting image index %d attributes\n", i); @@ -2597,14 +2603,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) } DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, - nvm_info->image_att[i].len); + nvm_info.image_att[i].len); } out: + /* Update hwfn's nvm_info */ + if (nvm_info.num_images) { + p_hwfn->nvm_info.num_images = nvm_info.num_images; + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = nvm_info.image_att; + p_hwfn->nvm_info.valid = true; + } + qed_ptt_release(p_hwfn, p_ptt); return 0; err1: - kfree(nvm_info->image_att); + kfree(nvm_info.image_att); err0: qed_ptt_release(p_hwfn, p_ptt); return rc; @@ -2641,6 +2655,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } + qed_mcp_nvm_info_populate(p_hwfn); for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f01bf52bc381..fd59cf45f4be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { - struct qed_hwfn *hwfn = &cdev->hwfns[j]; - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, @@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + return num; err: diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 02adb513f475..013ff567283c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; + if (!ptp) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; + } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 891f03a7a33d..8d7b9bb910f2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &data); + if (ret) + return ret; switch (data) { case QLC_83XX_FLASH_SECTOR_ERASE_CMD: diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5803cd6db406..206f0266463e 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev) return ret; } - netif_start_queue(qca->net_dev); + /* SPI thread takes care of TX queue */ return 0; } @@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ qca->sync = QCASPI_SYNC_UNKNOWN; + + if (qca->spi_thread) + wake_up_process(qca->spi_thread); } static int @@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi) if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { - dev_info(&spi->dev, "Invalid clkspeed: %d\n", - qcaspi_clkspeed); + dev_err(&spi->dev, "Invalid clkspeed: %d\n", + qcaspi_clkspeed); return -EINVAL; } if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { - dev_info(&spi->dev, "Invalid burst len: %d\n", - qcaspi_burst_len); + dev_err(&spi->dev, "Invalid burst len: %d\n", + qcaspi_burst_len); return -EINVAL; } if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { - dev_info(&spi->dev, "Invalid pluggable: %d\n", - qcaspi_pluggable); + dev_err(&spi->dev, "Invalid pluggable: %d\n", + qcaspi_pluggable); return -EINVAL; } @@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi) } if (register_netdev(qcaspi_devs)) { - dev_info(&spi->dev, "Unable to register net device %s\n", - qcaspi_devs->name); + dev_err(&spi->dev, "Unable to register net device %s\n", + qcaspi_devs->name); free_netdev(qcaspi_devs); return -EFAULT; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f4cae2be0fda..a3f69901ac87 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7789,6 +7789,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; tp->cp_cmd |= RxChkSum | RxVlan; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 68f122140966..0d811c02ff34 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link) + ravb_rcv_snd_disable(ndev); if (phydev->link) { if (phydev->duplex != priv->duplex) { @@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; - if (priv->no_avb_link) - ravb_rcv_snd_enable(ndev); } } else if (priv->link) { new_state = true; priv->link = 0; priv->speed = 0; priv->duplex = -1; - if (priv->no_avb_link) - ravb_rcv_snd_disable(ndev); } + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link && phydev->link) + ravb_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + if (new_state && netif_msg_link(priv)) phy_print_status(phydev); } @@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev) return 0; } -static int ravb_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *cmd) -{ - struct ravb_private *priv = netdev_priv(ndev); - unsigned long flags; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&priv->lock, flags); - phy_ethtool_ksettings_get(ndev->phydev, cmd); - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - -static int ravb_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *cmd) -{ - struct ravb_private *priv = netdev_priv(ndev); - unsigned long flags; - int error; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable TX and RX */ - ravb_rcv_snd_disable(ndev); - - error = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (error) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - priv->duplex = 1; - else - priv->duplex = 0; - - ravb_set_duplex(ndev); - -error_exit: - mdelay(1); - - /* Enable TX and RX */ - ravb_rcv_snd_enable(ndev); - - mmiowb(); - spin_unlock_irqrestore(&priv->lock, flags); - - return error; -} - -static int ravb_nway_reset(struct net_device *ndev) -{ - struct ravb_private *priv = netdev_priv(ndev); - int error = -ENODEV; - unsigned long flags; - - if (ndev->phydev) { - spin_lock_irqsave(&priv->lock, flags); - error = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&priv->lock, flags); - } - - return error; -} - static u32 ravb_get_msglevel(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops ravb_ethtool_ops = { - .nway_reset = ravb_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_msglevel = ravb_get_msglevel, .set_msglevel = ravb_set_msglevel, .get_link = ethtool_op_get_link, @@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ringparam = ravb_get_ringparam, .set_ringparam = ravb_set_ringparam, .get_ts_info = ravb_get_ts_info, - .get_link_ksettings = ravb_get_link_ksettings, - .set_link_ksettings = ravb_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_wol = ravb_get_wol, .set_wol = ravb_set_wol, }; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e9007b613f17..5614fd231bbe 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; + unsigned long flags; int new_state = 0; + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + if (phydev->link) { if (phydev->duplex != mdp->duplex) { new_state = 1; @@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_enable(ndev); } } else if (mdp->link) { new_state = 1; mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_disable(ndev); } + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) + sh_eth_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&mdp->lock, flags); + if (new_state && netif_msg_link(mdp)) phy_print_status(phydev); } @@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev) return 0; } -static int sh_eth_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *cmd) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - phy_ethtool_ksettings_get(ndev->phydev, cmd); - spin_unlock_irqrestore(&mdp->lock, flags); - - return 0; -} - -static int sh_eth_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *cmd) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - - /* disable tx and rx */ - sh_eth_rcv_snd_disable(ndev); - - ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (ret) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - mdp->duplex = 1; - else - mdp->duplex = 0; - - if (mdp->cd->set_duplex) - mdp->cd->set_duplex(ndev); - -error_exit: - mdelay(1); - - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; -} - /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the * version must be bumped as well. Just adding registers up to that * limit is fine, as long as the existing register indices don't @@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, pm_runtime_put_sync(&mdp->pdev->dev); } -static int sh_eth_nway_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - - if (!ndev->phydev) - return -ENODEV; - - spin_lock_irqsave(&mdp->lock, flags); - ret = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; -} - static u32 sh_eth_get_msglevel(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) static const struct ethtool_ops sh_eth_ethtool_ops = { .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, - .nway_reset = sh_eth_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_msglevel = sh_eth_get_msglevel, .set_msglevel = sh_eth_set_msglevel, .get_link = ethtool_op_get_link, @@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .get_sset_count = sh_eth_get_sset_count, .get_ringparam = sh_eth_get_ringparam, .set_ringparam = sh_eth_set_ringparam, - .get_link_ksettings = sh_eth_get_link_ksettings, - .set_link_ksettings = sh_eth_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_wol = sh_eth_get_wol, .set_wol = sh_eth_set_wol, }; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 23f0785c0573..7eeac3d6cfe8 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, return -EPROTONOSUPPORT; } -static s32 efx_ef10_filter_insert(struct efx_nic *efx, - struct efx_filter_spec *spec, - bool replace_equal) +static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) { DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, bool is_mc_recip; s32 rc; - down_read(&efx->filter_sem); + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); table = efx->filter_state; down_write(&table->lock); @@ -4498,10 +4498,22 @@ out_unlock: if (rss_locked) mutex_unlock(&efx->rss_lock); up_write(&table->lock); - up_read(&efx->filter_sem); return rc; } +static s32 efx_ef10_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + s32 ret; + + down_read(&efx->filter_sem); + ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal); + up_read(&efx->filter_sem); + + return ret; +} + static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) { /* no need to do anything here on EF10 */ @@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); - rc = efx_ef10_filter_insert(efx, &spec, true); + rc = efx_ef10_filter_insert_locked(efx, &spec, true); if (rc < 0) { if (rollback) { netif_info(efx, drv, efx->net_dev, @@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); - rc = efx_ef10_filter_insert(efx, &spec, true); + rc = efx_ef10_filter_insert_locked(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, "Broadcast filter insert failed rc=%d\n", rc); @@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, if (vlan->vid != EFX_FILTER_VID_UNSPEC) efx_filter_set_eth_local(&spec, vlan->vid, NULL); - rc = efx_ef10_filter_insert(efx, &spec, true); + rc = efx_ef10_filter_insert_locked(efx, &spec, true); if (rc < 0) { const char *um = multicast ? "Multicast" : "Unicast"; const char *encap_name = ""; @@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); - rc = efx_ef10_filter_insert(efx, &spec, true); + rc = efx_ef10_filter_insert_locked(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, "Broadcast filter insert failed rc=%d\n", diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 570ec72266f3..ce3a177081a8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx) up_write(&efx->filter_sem); } -static void efx_restore_filters(struct efx_nic *efx) -{ - down_read(&efx->filter_sem); - efx->type->filter_table_restore(efx); - up_read(&efx->filter_sem); -} /************************************************************************** * @@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) efx_disable_interrupts(efx); mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); mutex_lock(&efx->rss_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && method != RESET_TYPE_DATAPATH) @@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (efx->type->rx_restore_rss_contexts) efx->type->rx_restore_rss_contexts(efx); mutex_unlock(&efx->rss_lock); - down_read(&efx->filter_sem); - efx_restore_filters(efx); - up_read(&efx->filter_sem); + efx->type->filter_table_restore(efx); + up_write(&efx->filter_sem); if (efx->type->sriov_reset) efx->type->sriov_reset(efx); @@ -2764,6 +2758,7 @@ fail: efx->port_initialized = false; mutex_unlock(&efx->rss_lock); + up_write(&efx->filter_sem); mutex_unlock(&efx->mac_lock); return rc; @@ -3473,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx) efx_init_napi(efx); + down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise NIC\n"); @@ -3765,7 +3762,9 @@ static int efx_pm_resume(struct device *dev) rc = efx->type->reset(efx, RESET_TYPE_ALL); if (rc) return rc; + down_write(&efx->filter_sem); rc = efx->type->init(efx); + up_write(&efx->filter_sem); if (rc) return rc; rc = efx_pm_thaw(dev); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 8edf20967c82..e045a5d6b938 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) if (!state) return -ENOMEM; efx->filter_state = state; + init_rwsem(&state->lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table->id = EFX_FARCH_FILTER_TABLE_RX_IP; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 2e6e2a96b4f2..f9a61f90cfbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -37,7 +37,7 @@ * is done in the "stmmac files" */ -/* struct emac_variant - Descrive dwmac-sun8i hardware variant +/* struct emac_variant - Describe dwmac-sun8i hardware variant * @default_syscon_value: The default value of the EMAC register in syscon * This value is used for disabling properly EMAC * and used as a good starting value in case of the diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..65bc3556bd8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_RBSZ_MASK; + value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, @@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; const struct stmmac_dma_ops dwmac410_dma_ops = { @@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index c63c1fe3f26b..22a4a6dbb1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -120,6 +120,8 @@ /* DMA Rx Channel X Control register defines */ #define DMA_CONTROL_SR BIT(0) +#define DMA_RBSZ_MASK GENMASK(14, 1) +#define DMA_RBSZ_SHIFT 1 /* Interrupt status per channel */ #define DMA_CHAN_STATUS_REB GENMASK(21, 19) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..fe8b536b13f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; #define stmmac_reset(__priv, __args...) \ @@ -235,6 +236,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_set_dma_bfsize(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_bfsize, __args) struct mac_device_info; struct net_device; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cba46b62a1cd..60f59abab009 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); + stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, + chan); } for (chan = 0; chan < tx_channels_count; chan++) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6d141f3931eb..72da77b94ecd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) /** * stmmac_axi_setup - parse DT parameters for programming the AXI register * @pdev: platform device - * @priv: driver private struct. * Description: * if required, from device-tree the AXI internal register can be tuned * by using platform parameters. diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..ada33c2d9ac2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1a924b867b07..4b6e308199d2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -void rndis_set_subchannel(struct work_struct *w); +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5d5bd513847f..31c3d77b4733 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } +/* Worker to setup sub channels on initial setup + * Initial hotplug event occurs in softirq context + * and can't wait for channels. + */ +static void netvsc_subchan_work(struct work_struct *w) +{ + struct netvsc_device *nvdev = + container_of(w, struct netvsc_device, subchan_work); + struct rndis_device *rdev; + int i, ret; + + /* Avoid deadlock with device removal already under RTNL */ + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (rdev) { + ret = rndis_set_subchannel(rdev->ndev, nvdev); + if (ret == 0) { + netif_device_attach(rdev->ndev); + } else { + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + rtnl_unlock(); +} + static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void) init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); return net_device; } @@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) struct hv_device *device = netvsc_channel_to_device(channel); struct net_device *ndev = hv_get_drvdata(device); int work_done = 0; + int ret; /* If starting a new interval */ if (!nvchan->desc) @@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If send of pending receive completions suceeded - * and did not exhaust NAPI budget this time - * and not doing busy poll + /* Send any pending receive completions */ + ret = send_recv_completions(ndev, net_device, nvchan); + + /* If it did not exhaust NAPI budget this time + * and not doing busy poll * then re-enable host interrupts - * and reschedule if ring is not empty. + * and reschedule if ring is not empty + * or sending receive completion failed. */ - if (send_recv_completions(ndev, net_device, nvchan) == 0 && - work_done < budget && + if (work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound) && + (ret || hv_end_read(&channel->inbound)) && napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); __napi_schedule(napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe2256bf1d13..dd1d6e115145 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev, if (IS_ERR(nvdev)) return PTR_ERR(nvdev); - /* Note: enable and attach happen when sub-channels setup */ + if (nvdev->num_chn > 1) { + ret = rndis_set_subchannel(ndev, nvdev); + + /* if unavailable, just proceed with one queue */ + if (ret) { + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + /* In any case device is now ready */ + netif_device_attach(ndev); + /* Note: enable and attach happen when sub-channels setup */ netif_carrier_off(ndev); if (netif_running(ndev)) { @@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + if (nvdev->num_chn > 1) + schedule_work(&nvdev->subchan_work); + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5428bb261102..408ece27131c 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -void rndis_set_subchannel(struct work_struct *w) +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) { - struct netvsc_device *nvdev - = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; - struct net_device_context *ndev_ctx; - struct rndis_device *rdev; - struct net_device *ndev; - struct hv_device *hv_dev; + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hv_dev = ndev_ctx->device_ctx; + struct rndis_device *rdev = nvdev->extension; int i, ret; - if (!rtnl_trylock()) { - schedule_work(w); - return; - } - - rdev = nvdev->extension; - if (!rdev) - goto unlock; /* device was removed */ - - ndev = rdev->ndev; - ndev_ctx = netdev_priv(ndev); - hv_dev = ndev_ctx->device_ctx; + ASSERT_RTNL(); memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; @@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); - goto failed; + return ret; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); - goto failed; + return -EIO; } nvdev->num_chn = 1 + @@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; - netif_device_attach(ndev); - rtnl_unlock(); - return; - -failed: - /* fallback to only primary channel */ - for (i = 1; i < nvdev->num_chn; i++) - netif_napi_del(&nvdev->chan_table[i].napi); - - nvdev->max_chn = 1; - nvdev->num_chn = 1; - - netif_device_attach(ndev); -unlock: - rtnl_unlock(); + return 0; } static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, @@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); - if (net_device->num_chn > 1) - schedule_work(&net_device->subchan_work); + return net_device; out: - /* if unavailable, just proceed with one queue */ - if (ret) { - net_device->max_chn = 1; - net_device->num_chn = 1; - } - - /* No sub channels, device is ready */ - if (net_device->num_chn == 1) - netif_device_attach(net); - - return net_device; + /* setting up multiple channels failed */ + net_device->max_chn = 1; + net_device->num_chn = 1; + return 0; err_dev_remv: rndis_filter_device_remove(dev, net_device); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 64f1b1e77bc0..23a52b9293f3 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -275,6 +275,8 @@ struct adf7242_local { struct spi_message stat_msg; struct spi_transfer stat_xfer; struct dentry *debugfs_root; + struct delayed_work work; + struct workqueue_struct *wqueue; unsigned long flags; int tx_stat; bool promiscuous; @@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp) /* Wait until the ACK is sent */ adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); adf7242_clear_irqstat(lp); + mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); return adf7242_cmd(lp, CMD_RC_RX); } +static void adf7242_rx_cal_work(struct work_struct *work) +{ + struct adf7242_local *lp = + container_of(work, struct adf7242_local, work.work); + + /* Reissuing RC_RX every 400ms - to adjust for offset + * drift in receiver (datasheet page 61, OCL section) + */ + + if (!test_bit(FLAG_XMIT, &lp->flags)) { + adf7242_cmd(lp, CMD_RC_PHY_RDY); + adf7242_cmd_rx(lp); + } +} + static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) { struct adf7242_local *lp = hw->priv; @@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw) enable_irq(lp->spi->irq); set_bit(FLAG_START, &lp->flags); - return adf7242_cmd(lp, CMD_RC_RX); + return adf7242_cmd_rx(lp); } static void adf7242_stop(struct ieee802154_hw *hw) @@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw) struct adf7242_local *lp = hw->priv; disable_irq(lp->spi->irq); + cancel_delayed_work_sync(&lp->work); adf7242_cmd(lp, CMD_RC_IDLE); clear_bit(FLAG_START, &lp->flags); adf7242_clear_irqstat(lp); @@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel) adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); - return adf7242_cmd(lp, CMD_RC_RX); + if (test_bit(FLAG_START, &lp->flags)) + return adf7242_cmd_rx(lp); + else + return adf7242_cmd(lp, CMD_RC_PHY_RDY); } static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, @@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) /* ensure existing instances of the IRQ handler have completed */ disable_irq(lp->spi->irq); set_bit(FLAG_XMIT, &lp->flags); + cancel_delayed_work_sync(&lp->work); reinit_completion(&lp->tx_complete); adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_clear_irqstat(lp); @@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) unsigned int xmit; u8 irq1; + mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) @@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi) spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); spi_set_drvdata(spi, lp); + INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); + lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), + WQ_MEM_RECLAIM); ret = adf7242_hw_init(lp); if (ret) @@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi) if (!IS_ERR_OR_NULL(lp->debugfs_root)) debugfs_remove_recursive(lp->debugfs_root); + cancel_delayed_work_sync(&lp->work); + destroy_workqueue(lp->wqueue); + ieee802154_unregister_hw(lp->hw); mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 77abedf0b524..3d9e91579866 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) static int at86rf230_ed(struct ieee802154_hw *hw, u8 *level) { - BUG_ON(!level); + WARN_ON(!level); *level = 0xbe; return 0; } @@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for saddr\n"); + dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__); __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); } @@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for pan id\n"); + dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__); __at86rf230_write(lp, RG_PAN_ID_0, pan); __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); } @@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, u8 i, addr[8]; memcpy(addr, &filt->ieee_addr, 8); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for IEEE addr\n"); + dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__); for (i = 0; i < 8; i++) __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for panc change\n"); + dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__); if (filt->pan_coord) at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); else @@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw, return at86rf230_write_subreg(lp, SR_CCA_MODE, val); } - static int at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 0d673f7682ee..176395e4b7bb 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -49,7 +49,7 @@ struct fakelb_phy { static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { - BUG_ON(!level); + WARN_ON(!level); *level = 0xbe; return 0; diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index de0d7f28a181..e428277781ac 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -15,10 +15,11 @@ */ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/regmap.h> diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 23c1d6600241..4a949569ec4c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) { struct ipvl_dev *ipvlan; struct net_device *mdev = port->dev; - int err = 0; + unsigned int flags; + int err; ASSERT_RTNL(); if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { + err = dev_change_flags(ipvlan->dev, + flags | IFF_NOARP); + } else { + err = dev_change_flags(ipvlan->dev, + flags & ~IFF_NOARP); + } + if (unlikely(err)) + goto fail; + } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); @@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) mdev->l3mdev_ops = &ipvl_l3mdev_ops; mdev->priv_flags |= IFF_L3MDEV_MASTER; } else - return err; + goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ mdev->priv_flags &= ~IFF_L3MDEV_MASTER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } - list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) - ipvlan->dev->flags |= IFF_NOARP; - else - ipvlan->dev->flags &= ~IFF_NOARP; - } port->mode = nval; } + return 0; + +fail: + /* Undo the flags changes that have been done so far. */ + list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (port->mode == IPVLAN_MODE_L3 || + port->mode == IPVLAN_MODE_L3S) + dev_change_flags(ipvlan->dev, flags | IFF_NOARP); + else + dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); + } + return err; } diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 081d99aa3985..49ac678eb2dc 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); } return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index b8f57e9b9379..1cd439bdf608 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -130,8 +130,9 @@ #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) -#define MII_88E1121_PHY_LED_CTRL 16 +#define MII_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 +#define MII_88E1510_PHY_LED_DEF 0x1177 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -632,8 +633,40 @@ error: return err; } +static void marvell_config_led(struct phy_device *phydev) +{ + u16 def_config; + int err; + + switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) { + /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R): + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S): + def_config = MII_88E1121_PHY_LED_DEF; + break; + /* Default PHY LED config: + * LED[0] .. 1000Mbps Link + * LED[1] .. 100Mbps Link + * LED[2] .. Blink, Activity + */ + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510): + def_config = MII_88E1510_PHY_LED_DEF; + break; + default: + return; + } + + err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL, + def_config); + if (err < 0) + pr_warn("Fail to config marvell phy LED.\n"); +} + static int marvell_config_init(struct phy_device *phydev) { + /* Set defalut LED */ + marvell_config_led(phydev); + /* Set registers from marvell,reg-init DT property */ return marvell_of_reg_init(phydev); } @@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev) return genphy_soft_reset(phydev); } -static int m88e1121_config_init(struct phy_device *phydev) -{ - int err; - - /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ - err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, - MII_88E1121_PHY_LED_CTRL, - MII_88E1121_PHY_LED_DEF); - if (err < 0) - return err; - - /* Set marvell,reg-init configuration from device tree */ - return marvell_config_init(phydev); -} - static int m88e1318_config_init(struct phy_device *phydev) { if (phy_interrupt_is_valid(phydev)) { @@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev) return err; } - return m88e1121_config_init(phydev); + return marvell_config_init(phydev); } static int m88e1510_config_init(struct phy_device *phydev) @@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = &m88e1121_probe, - .config_init = &m88e1121_config_init, + .config_init = &marvell_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index bd0f339f69fd..b9f5f40a7ac1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - /* The default values for phydev->supported are provided by the PHY - * driver "features" member, we want to reset to sane defaults first - * before supporting higher speeds. - */ - phydev->supported &= PHY_DEFAULT_FEATURES; + phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | + PHY_10BT_FEATURES); switch (max_speed) { default: diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index d437f4f5ed52..740655261e5b 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus) } if (bus->started) bus->socket_ops->start(bus->sfp); - bus->netdev->sfp_bus = bus; bus->registered = true; return 0; } @@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } - bus->netdev->sfp_bus = NULL; bus->registered = false; } @@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus) } EXPORT_SYMBOL_GPL(sfp_upstream_stop); +static void sfp_upstream_clear(struct sfp_bus *bus) +{ + bus->upstream_ops = NULL; + bus->upstream = NULL; + bus->netdev->sfp_bus = NULL; + bus->netdev = NULL; +} + /** * sfp_register_upstream() - Register the neighbouring device * @fwnode: firmware node for the SFP bus @@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, bus->upstream_ops = ops; bus->upstream = upstream; bus->netdev = ndev; + ndev->sfp_bus = bus; - if (bus->sfp) + if (bus->sfp) { ret = sfp_register_bus(bus); + if (ret) + sfp_upstream_clear(bus); + } rtnl_unlock(); } @@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus) rtnl_lock(); if (bus->sfp) sfp_unregister_bus(bus); - bus->upstream = NULL; - bus->netdev = NULL; + sfp_upstream_clear(bus); rtnl_unlock(); sfp_bus_put(bus); @@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus) } EXPORT_SYMBOL_GPL(sfp_module_remove); +static void sfp_socket_clear(struct sfp_bus *bus) +{ + bus->sfp_dev = NULL; + bus->sfp = NULL; + bus->socket_ops = NULL; +} + struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, const struct sfp_socket_ops *ops) { @@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, bus->sfp = sfp; bus->socket_ops = ops; - if (bus->netdev) + if (bus->netdev) { ret = sfp_register_bus(bus); + if (ret) + sfp_socket_clear(bus); + } rtnl_unlock(); } @@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus) rtnl_lock(); if (bus->netdev) sfp_unregister_bus(bus); - bus->sfp_dev = NULL; - bus->sfp = NULL; - bus->socket_ops = NULL; + sfp_socket_clear(bus); rtnl_unlock(); sfp_bus_put(bus); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index de51e8f70f44..ce61231e96ea 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a192a017cc68..f5727baac84a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_TX: get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp)) + if (tun_xdp_tx(tun->dev, &xdp) < 0) goto err_redirect; rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 3d4f7959dabb..b1b3d8f7e67d 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev) priv->presvd_phy_advertise); /* Restore BMCR */ + if (priv->presvd_phy_bmcr & BMCR_ANENABLE) + priv->presvd_phy_bmcr |= BMCR_ANRESTART; + asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, priv->presvd_phy_bmcr); - mii_nway_restart(&dev->mii); priv->presvd_phy_advertise = 0; priv->presvd_phy_bmcr = 0; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..ed10d49eb5e0 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -64,6 +64,7 @@ #define DEFAULT_RX_CSUM_ENABLE (true) #define DEFAULT_TSO_CSUM_ENABLE (true) #define DEFAULT_VLAN_FILTER_ENABLE (true) +#define DEFAULT_VLAN_RX_OFFLOAD (true) #define TX_OVERHEAD (8) #define RXW_PADDING 2 @@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev, } if (features & NETIF_F_HW_VLAN_CTAG_RX) + pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; + else + pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; else pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; @@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf |= FCT_TX_CTL_EN_; ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); - ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, + dev->net->mtu + VLAN_ETH_HLEN); ret = lan78xx_read_reg(dev, MAC_RX, &buf); buf |= MAC_RX_RXEN_; @@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (DEFAULT_TSO_CSUM_ENABLE) dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + if (DEFAULT_VLAN_RX_OFFLOAD) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; + + if (DEFAULT_VLAN_FILTER_ENABLE) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->net->hw_features = dev->net->features; ret = lan78xx_setup_irq_domain(dev); @@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { + /* HW Checksum offload appears to be flawed if used when not stripping + * VLAN headers. Drop back to S/W checksums under these conditions. + */ if (!(dev->net->features & NETIF_F_RXCSUM) || - unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { + unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || + ((rx_cmd_a & RX_CMD_A_FVTG_) && + !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); @@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, } } +static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, + struct sk_buff *skb, + u32 rx_cmd_a, u32 rx_cmd_b) +{ + if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rx_cmd_a & RX_CMD_A_FVTG_)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_cmd_b & 0xffff)); +} + static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { int status; @@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) if (skb->len == size) { lan78xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb, + rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ skb->truesize = size + sizeof(struct sk_buff); @@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) skb_set_tail_pointer(skb2, size); lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); skb_trim(skb2, skb2->len - 4); /* remove fcs */ skb2->truesize = size + sizeof(struct sk_buff); @@ -3313,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) pkt_cnt = 0; count = 0; length = 0; + spin_lock_irqsave(&tqp->lock, flags); for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { if (skb_is_gso(skb)) { if (pkt_cnt) { @@ -3321,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) } count = 1; length = skb->len - TX_OVERHEAD; - skb2 = skb_dequeue(tqp); + __skb_unlink(skb, tqp); + spin_unlock_irqrestore(&tqp->lock, flags); goto gso_skb; } @@ -3330,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); pkt_cnt++; } + spin_unlock_irqrestore(&tqp->lock, flags); /* copy to a single skb */ skb = alloc_skb(skb_totallen, GFP_ATOMIC); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8fac8e132c5b..38502809420b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1253,6 +1253,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..2a58607a6aea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - napi_disable(&tp->napi); + if (!test_bit(RTL8152_UNPLUG, &tp->flags)) + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f565bd574da..48ba80a8ca5c 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev) (netdev->flags & IFF_ALLMULTI)) { rx_creg &= 0xfffe; rx_creg |= 0x0002; - dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); + dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ rx_creg &= 0x00fc; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 7a6a1fe79309..05553d252446 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -82,6 +82,9 @@ static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); +static int smsc75xx_link_ok_nopm(struct usbnet *dev); +static int smsc75xx_phy_gig_workaround(struct usbnet *dev); + static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { @@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) return -EIO; } + /* phy workaround for gig link */ + smsc75xx_phy_gig_workaround(dev); + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); @@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) return -EIO; } +static int smsc75xx_phy_gig_workaround(struct usbnet *dev) +{ + struct mii_if_info *mii = &dev->mii; + int ret = 0, timeout = 0; + u32 buf, link_up = 0; + + /* Set the phy in Gig loopback */ + smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040); + + /* Wait for the link up */ + do { + link_up = smsc75xx_link_ok_nopm(dev); + usleep_range(10000, 20000); + timeout++; + } while ((!link_up) && (timeout < 1000)); + + if (timeout >= 1000) { + netdev_warn(dev->net, "Timeout waiting for PHY link up\n"); + return -EIO; + } + + /* phy reset */ + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); + return ret; + } + + buf |= PMT_CTL_PHY_RST; + + ret = smsc75xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); + return ret; + } + + timeout = 0; + do { + usleep_range(10000, 20000); + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", + ret); + return ret; + } + timeout++; + } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); + return -EIO; + } + + return 0; +} + static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b6c9a2af3732..53085c63277b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 +/* Separating two types of XDP xmit */ +#define VIRTIO_XDP_TX BIT(0) +#define VIRTIO_XDP_REDIR BIT(1) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; rcu_read_unlock(); goto xdp_xmit; default: @@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -939,7 +943,8 @@ xdp_xmit: } static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, bool *xdp_xmit) + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) +static int virtnet_receive(struct receive_queue *rq, int budget, + unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0, bytes = 0; @@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; unsigned int received, qp; - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit) { + if (xdp_xmit & VIRTIO_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & VIRTIO_XDP_TX) { qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; virtqueue_kick(sq->vq); - xdp_do_flush_map(); } return received; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..f6bb1d54d4bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, flush = 0; out: - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e9c2fb318c03..836e0a47b94a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) ath10k_mac_max_vht_nss(vht_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", - sta->addr, bw); + enum wmi_phy_mode mode; + + mode = chan_to_phymode(&def); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", + sta->addr, bw, mode); + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + WMI_PEER_PHYMODE, mode); + if (err) { + ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", + sta->addr, mode, err); + goto exit; + } err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_CHAN_WIDTH, bw); @@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) sta->addr); } +exit: mutex_unlock(&ar->conf_mutex); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index b48db54e9865..d68afb65402a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6144,6 +6144,7 @@ enum wmi_peer_param { WMI_PEER_NSS = 0x5, WMI_PEER_USE_4ADDR = 0x6, WMI_PEER_DEBUG = 0xa, + WMI_PEER_PHYMODE = 0xd, WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ }; diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c index 1279064a3b71..51a038022c8b 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.c +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c99a191e8d69..a907d7b065fa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Enter\n"); if (bus) { + /* Stop watchdog task */ + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + /* De-register interrupt handler */ brcmf_sdiod_intr_unregister(bus->sdiodev); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 6e3cf9817730..88f4c89f89ba 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) MWIFIEX_FUNC_SHUTDOWN); } - if (adapter->workqueue) - flush_workqueue(adapter->workqueue); - - mwifiex_usb_free(card); - mwifiex_dbg(adapter, FATAL, "%s: removing card\n", __func__); mwifiex_remove_card(adapter); @@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + mwifiex_usb_free(card); + mwifiex_usb_cleanup_tx_aggr(adapter); card->adapter = NULL; diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index 9d2f9a776ef1..b804abd464ae 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev) */ spin_lock_bh(&dev->con_mon_lock); avg_rssi = ewma_rssi_read(&dev->avg_rssi); - WARN_ON_ONCE(avg_rssi == 0); + spin_unlock_bh(&dev->con_mon_lock); + if (avg_rssi == 0) + return; + avg_rssi = -avg_rssi; if (avg_rssi <= -70) val -= 0x20; else if (avg_rssi <= -60) val -= 0x10; - spin_unlock_bh(&dev->con_mon_lock); if (val != mt7601u_bbp_rr(dev, 66)) mt7601u_bbp_wr(dev, 66, val); diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 220e2b710208..ae0ca8006849 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, vif = qtnf_mac_get_base_vif(mac); if (!vif) { pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - ret = -EFAULT; - goto out; + return -EFAULT; } if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 39c817eddd78..54c9f6ab0c8c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) } -void rtl_deinit_deferred_work(struct ieee80211_hw *hw) +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) { struct rtl_priv *rtlpriv = rtl_priv(hw); del_timer_sync(&rtlpriv->works.watchdog_timer); - cancel_delayed_work(&rtlpriv->works.watchdog_wq); - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); - cancel_delayed_work(&rtlpriv->works.ps_work); - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); - cancel_delayed_work(&rtlpriv->works.fwevt_wq); - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); + if (ips_wq) + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + else + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ps_work); + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq); + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq); + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq); } EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 912f205779c3..a7ae40eaa3cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw); void rtl_deinit_rfkill(struct ieee80211_hw *hw); void rtl_watch_dog_timer_callback(struct timer_list *t); -void rtl_deinit_deferred_work(struct ieee80211_hw *hw); +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq); bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index cfea57efa7f4..4bf7967590ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -130,7 +130,6 @@ found_alt: firmware->size); rtlpriv->rtlhal.wowlan_fwsize = firmware->size; } - rtlpriv->rtlhal.fwsize = firmware->size; release_firmware(firmware); } @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) /* reset sec info */ rtl_cam_reset_sec_info(hw); - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); } rtlpriv->intf_ops->adapter_stop(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index ae13bcfb3bf0..5d1fda16fc8c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } rtlpriv->cfg->ops->disable_interrupt(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 71af24e2e051..479a4cfc245d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); /*<1> Stop all timer */ - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, true); /*<2> Disable Interrupt */ rtlpriv->cfg->ops->disable_interrupt(hw); @@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); mutex_lock(&rtlpriv->locks.ips_mutex); if (ppsc->inactiveps) { diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index f9faffc498bc..2ac5004d7a40 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } /*deinit rfkill */ diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index d5553c47014f..5d823e965883 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb) struct sk_buff *skb = NULL; if (!urb->status) { - skb = alloc_skb(urb->actual_length, GFP_KERNEL); + skb = alloc_skb(urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&phy->udev->dev, "failed to alloc memory\n"); } else { @@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ - rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); + rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 2e96b34bc936..fb667bf469c7 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, return -EIO; if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) return -EIO; + return 0; } if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 68940356cad3..8b1fd7f1a224 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_DAX, q); + if (pmem->pfn_flags & PFN_MAP) + blk_queue_flag_set(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 46df030b2c3f..bf65501e6ed6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -100,6 +100,22 @@ static struct class *nvme_subsys_class; static void nvme_ns_remove(struct nvme_ns *ns); static int nvme_revalidate_disk(struct gendisk *disk); static void nvme_put_subsystem(struct nvme_subsystem *subsys); +static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, + unsigned nsid); + +static void nvme_set_queue_dying(struct nvme_ns *ns) +{ + /* + * Revalidating a dead namespace sets capacity to 0. This will end + * buffered writers dirtying pages that can't be synced. + */ + if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) + return; + revalidate_disk(ns->disk); + blk_set_queue_dying(ns->queue); + /* Forcibly unquiesce queues to avoid blocking dispatch */ + blk_mq_unquiesce_queue(ns->queue); +} static void nvme_queue_scan(struct nvme_ctrl *ctrl) { @@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count); static void nvme_enable_aen(struct nvme_ctrl *ctrl) { - u32 result; + u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; int status; - status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, - ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result); + if (!supported_aens) + return; + + status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, + NULL, 0, &result); if (status) dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", - ctrl->oaes & NVME_AEN_SUPPORTED); + supported_aens); } static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) @@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static void nvme_update_formats(struct nvme_ctrl *ctrl) { - struct nvme_ns *ns, *next; - LIST_HEAD(rm_list); + struct nvme_ns *ns; - down_write(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) { - if (ns->disk && nvme_revalidate_disk(ns->disk)) { - list_move_tail(&ns->list, &rm_list); - } - } - up_write(&ctrl->namespaces_rwsem); + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + if (ns->disk && nvme_revalidate_disk(ns->disk)) + nvme_set_queue_dying(ns); + up_read(&ctrl->namespaces_rwsem); - list_for_each_entry_safe(ns, next, &rm_list, list) - nvme_ns_remove(ns); + nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); } static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) @@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, + (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 0, &cmd.result, timeout); nvme_passthru_end(ctrl, effects); @@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, down_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { - if (ns->head->ns_id > nsid) + if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) list_move_tail(&ns->list, &rm_list); } up_write(&ctrl->namespaces_rwsem); @@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) if (ctrl->admin_q) blk_mq_unquiesce_queue(ctrl->admin_q); - list_for_each_entry(ns, &ctrl->namespaces, list) { - /* - * Revalidating a dead namespace sets capacity to 0. This will - * end buffered writers dirtying pages that can't be synced. - */ - if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) - continue; - revalidate_disk(ns->disk); - blk_set_queue_dying(ns->queue); + list_for_each_entry(ns, &ctrl->namespaces, list) + nvme_set_queue_dying(ns); - /* Forcibly unquiesce queues to avoid blocking dispatch */ - blk_mq_unquiesce_queue(ns->queue); - } up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_kill_queues); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ba943f211687..ddd441b1516a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) quirks |= check_vendor_combination_bug(pdev); - result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, - quirks); - if (result) - goto release_pools; - /* * Double check that our mempool alloc size will cover the biggest * command we support. @@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto release_pools; } + result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, + quirks); + if (result) + goto release_mempool; + dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); nvme_get_ctrl(&dev->ctrl); @@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; + release_mempool: + mempool_destroy(dev->iod_mempool); release_pools: nvme_release_prp_pools(dev); unmap: diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 9544625c0b7d..518c5b09038c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -732,8 +732,11 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_cleanup_queue(ctrl->ctrl.admin_q); nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); } - nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } nvme_rdma_free_queue(&ctrl->queues[0]); } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b5b0cdc21d01..514d1dfc5630 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) return cell; } + /* NULL cell_id only allowed for device tree; invalid otherwise */ + if (!cell_id) + return ERR_PTR(-EINVAL); + return nvmem_cell_get_from_list(cell_id); } EXPORT_SYMBOL_GPL(nvmem_cell_get); diff --git a/drivers/of/base.c b/drivers/of/base.c index 848f549164cd..466e3c8582f0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -102,7 +102,7 @@ static u32 phandle_cache_mask; * - the phandle lookup overhead reduction provided by the cache * will likely be less */ -static void of_populate_phandle_cache(void) +void of_populate_phandle_cache(void) { unsigned long flags; u32 cache_entries; @@ -134,8 +134,7 @@ out: raw_spin_unlock_irqrestore(&devtree_lock, flags); } -#ifndef CONFIG_MODULES -static int __init of_free_phandle_cache(void) +int of_free_phandle_cache(void) { unsigned long flags; @@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void) return 0; } +#if !defined(CONFIG_MODULES) late_initcall_sync(of_free_phandle_cache); #endif diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 891d780c076a..216175d11d3d 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree); #if defined(CONFIG_OF_OVERLAY) void of_overlay_mutex_lock(void); void of_overlay_mutex_unlock(void); +int of_free_phandle_cache(void); +void of_populate_phandle_cache(void); #else static inline void of_overlay_mutex_lock(void) {}; static inline void of_overlay_mutex_unlock(void) {}; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 7baa53e5b1d7..eda57ef12fd0 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree, goto err_free_overlay_changeset; } + of_populate_phandle_cache(); + ret = __of_changeset_apply_notify(&ovcs->cset); if (ret) pr_err("overlay changeset entry notify error %d\n", ret); @@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id) list_del(&ovcs->ovcs_list); + /* + * Disable phandle cache. Avoids race condition that would arise + * from removing cache entry when the associated node is deleted. + */ + of_free_phandle_cache(); + ret_apply = 0; ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); + + of_populate_phandle_cache(); + if (ret) { if (ret_apply) devicetree_state_flags |= DTSF_REVERT_FAIL; diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 535201984b8b..1b2cfe51e8d7 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o obj-$(CONFIG_PCI_ECAM) += ecam.o obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o -obj-y += controller/ -obj-y += switch/ - # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ +obj-y += controller/ +obj-y += switch/ + ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 18fa09b3ac8f..cc9fa02d32a0 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -96,7 +96,6 @@ config PCI_HOST_GENERIC depends on OF select PCI_HOST_COMMON select IRQ_DOMAIN - select PCI_DOMAINS help Say Y here if you want to support a simple generic PCI host controller, such as the one emulated by kvmtool. @@ -138,7 +137,6 @@ config PCI_VERSATILE config PCIE_IPROC tristate - select PCI_DOMAINS help This enables the iProc PCIe core controller support for Broadcom's iProc family of SoCs. An appropriate bus interface driver needs @@ -176,7 +174,6 @@ config PCIE_IPROC_MSI config PCIE_ALTERA bool "Altera PCIe controller" depends on ARM || NIOS2 || COMPILE_TEST - select PCI_DOMAINS help Say Y here if you want to enable PCIe controller support on Altera FPGA. diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 16f52c626b4b..91b0194240a5 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST depends on PCI && PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST select PCIE_DW_PLAT - default y help Enables support for the PCIe controller in the Designware IP to work in host mode. There are two instances of PCIe controller in diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 781aa03aeede..29a05759a294 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp) resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { switch (resource_type(win->res)) { case IORESOURCE_IO: - ret = pci_remap_iospace(win->res, pp->io_base); + ret = devm_pci_remap_iospace(dev, win->res, + pp->io_base); if (ret) { dev_warn(dev, "Error %d: failed to map resource %pR\n", ret, win->res); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index d3172d5d3d35..0fae816fba39 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) 0, 0xF8000000, 0, lower_32_bits(res->start), OB_PCIE_IO); - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index a1ebe9ed441f..bf5ece5d9291 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) irq = of_irq_get(intc, 0); if (irq <= 0) { dev_err(p->dev, "failed to get parent IRQ\n"); + of_node_put(intc); return irq ?: -EINVAL; } p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, &faraday_pci_irqdomain_ops, p); + of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); return -EINVAL; @@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev) dev_err(dev, "illegal IO mem size\n"); return -EINVAL; } - ret = pci_remap_iospace(io, io_base); + ret = devm_pci_remap_iospace(dev, io, io_base); if (ret) { dev_warn(dev, "error %d: failed to map resource %pR\n", ret, io); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 6cc5036ac83c..f6325f1a89e8 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_bus *pbus; struct pci_dev *pdev; struct cpumask *dest; + unsigned long flags; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct { @@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * the channel callback directly when channel->target_cpu is * the current CPU. When the higher level interrupt code * calls us with interrupt enabled, let's add the - * local_bh_disable()/enable() to avoid race. + * local_irq_save()/restore() to avoid race: + * hv_pci_onchannelcallback() can also run in tasklet. */ - local_bh_disable(); + local_irq_save(flags); if (hbus->hdev->channel->target_cpu == smp_processor_id()) hv_pci_onchannelcallback(hbus); - local_bh_enable(); + local_irq_restore(flags); if (hpdev->state == hv_pcichild_ejecting) { dev_err_once(&hbus->hdev->device, diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 68b8bfbdb867..d219404bad92 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3, v3->io_bus_addr = io->start - win->offset; dev_dbg(dev, "I/O window %pR, bus addr %pap\n", io, &v3->io_bus_addr); - ret = pci_remap_iospace(io, io_base); + ret = devm_pci_remap_iospace(dev, io, io_base); if (ret) { dev_warn(dev, "error %d: failed to map resource %pR\n", diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index 994f32061b32..f59ad2728c0b 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c @@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, switch (resource_type(res)) { case IORESOURCE_IO: - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index d854d67e873c..ffda3e8b4742 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, case IORESOURCE_IO: xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, res->start - window->offset); - ret = pci_remap_iospace(res, io_base); + ret = devm_pci_remap_iospace(dev, res, io_base); if (ret < 0) return ret; break; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 0baabe30858f..861dda69f366 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie) if (err < 0) return err; - pci_remap_iospace(&pcie->pio, pcie->io.start); + devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); return 0; } diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 874d75c9ee4a..c8febb009454 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) if (err) return err; - return phy_power_on(pcie->phy); + err = phy_power_on(pcie->phy); + if (err) + phy_exit(pcie->phy); + + return err; } static int rcar_msi_alloc(struct rcar_msi *chip) @@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) if (rcar_pcie_hw_init(pcie)) { dev_info(dev, "PCIe link down\n"); err = -ENODEV; - goto err_clk_disable; + goto err_phy_shutdown; } data = rcar_pci_read_reg(pcie, MACSR); @@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) dev_err(dev, "failed to enable MSI support: %d\n", err); - goto err_clk_disable; + goto err_phy_shutdown; } } @@ -1191,6 +1195,12 @@ err_msi_teardown: if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pcie_teardown_msi(pcie); +err_phy_shutdown: + if (pcie->phy) { + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + } + err_clk_disable: clk_disable_unprepare(pcie->bus_clk); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 6a4bbb5b3de0..fb32840ce8e6 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) PCI_NUM_INTX, &legacy_domain_ops, pcie); - + of_node_put(legacy_intc_node); if (!pcie->legacy_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index b110a3a814e3..7b1389d8e2a5 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); + of_node_put(pcie_intc_node); if (!port->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 523a8cab3bfb..825fa24427a3 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) } EXPORT_SYMBOL_GPL(pci_epf_alloc_space); +static void pci_epf_remove_cfs(struct pci_epf_driver *driver) +{ + struct config_group *group, *tmp; + + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) + return; + + mutex_lock(&pci_epf_mutex); + list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) + pci_ep_cfs_remove_epf_group(group); + list_del(&driver->epf_group); + mutex_unlock(&pci_epf_mutex); +} + /** * pci_epf_unregister_driver() - unregister the PCI EPF driver * @driver: the PCI EPF driver that has to be unregistered @@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space); */ void pci_epf_unregister_driver(struct pci_epf_driver *driver) { - struct config_group *group; - - mutex_lock(&pci_epf_mutex); - list_for_each_entry(group, &driver->epf_group, group_entry) - pci_ep_cfs_remove_epf_group(group); - list_del(&driver->epf_group); - mutex_unlock(&pci_epf_mutex); + pci_epf_remove_cfs(driver); driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); +static int pci_epf_add_cfs(struct pci_epf_driver *driver) +{ + struct config_group *group; + const struct pci_epf_device_id *id; + + if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) + return 0; + + INIT_LIST_HEAD(&driver->epf_group); + + id = driver->id_table; + while (id->name[0]) { + group = pci_ep_cfs_add_epf_group(id->name); + if (IS_ERR(group)) { + pci_epf_remove_cfs(driver); + return PTR_ERR(group); + } + + mutex_lock(&pci_epf_mutex); + list_add_tail(&group->group_entry, &driver->epf_group); + mutex_unlock(&pci_epf_mutex); + id++; + } + + return 0; +} + /** * __pci_epf_register_driver() - register a new PCI EPF driver * @driver: structure representing PCI EPF driver @@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, struct module *owner) { int ret; - struct config_group *group; - const struct pci_epf_device_id *id; if (!driver->ops) return -EINVAL; @@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, if (ret) return ret; - INIT_LIST_HEAD(&driver->epf_group); - - id = driver->id_table; - while (id->name[0]) { - group = pci_ep_cfs_add_epf_group(id->name); - mutex_lock(&pci_epf_mutex); - list_add_tail(&group->group_entry, &driver->epf_group); - mutex_unlock(&pci_epf_mutex); - id++; - } + pci_epf_add_cfs(driver); return 0; } diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 3979f89b250a..5bd6c1573295 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -7,7 +7,6 @@ * All rights reserved. * * Send feedback to <kristen.c.accardi@intel.com> - * */ #include <linux/module.h> @@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev) return 0; /* If _OSC exists, we should not evaluate OSHP */ + + /* + * If there's no ACPI host bridge (i.e., ACPI support is compiled + * into the kernel but the hardware platform doesn't support ACPI), + * there's nothing to do here. + */ host = pci_find_host_bridge(pdev->bus); root = acpi_pci_find_root(ACPI_HANDLE(&host->dev)); + if (!root) + return 0; + if (root->osc_support_set) goto no_control; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d0d73dbbd5ca..0f04ae648cf1 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev) } /** + * pci_iov_remove - clean up SR-IOV state after PF driver is detached + * @dev: the PCI device + */ +void pci_iov_remove(struct pci_dev *dev) +{ + struct pci_sriov *iov = dev->sriov; + + if (!dev->is_physfn) + return; + + iov->driver_max_VFs = iov->total_VFs; + if (iov->num_VFs) + pci_warn(dev, "driver left SR-IOV enabled after remove\n"); +} + +/** * pci_iov_update_resource - update a VF BAR * @dev: the PCI device * @resno: the resource number diff --git a/drivers/pci/of.c b/drivers/pci/of.c index d088c9147f10..69a60d6ebd73 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev, switch (resource_type(res)) { case IORESOURCE_IO: - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 65113b6eed14..89ee6a2b6eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); + /* + * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over + * system-wide suspend/resume confuses the platform firmware, so avoid + * doing that, unless the bridge has a driver that should take care of + * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint + * devices are expected to be in D3 before invoking the S3 entry path + * from the firmware, so they should not be affected by this issue. + */ + if (pci_is_bridge(dev) && !dev->driver && + acpi_target_system_state() != ACPI_STATE_S0) + return true; + if (!adev || !acpi_device_power_manageable(adev)) return false; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index c125d53033c6..6792292b5fc7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev) } pcibios_free_irq(pci_dev); pci_dev->driver = NULL; + pci_iov_remove(pci_dev); } /* Undo the runtime PM settings in local_pci_probe() */ diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 97acba712e4e..316496e99da9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res) } EXPORT_SYMBOL(pci_unmap_iospace); +static void devm_pci_unmap_iospace(struct device *dev, void *ptr) +{ + struct resource **res = ptr; + + pci_unmap_iospace(*res); +} + +/** + * devm_pci_remap_iospace - Managed pci_remap_iospace() + * @dev: Generic device to remap IO address for + * @res: Resource describing the I/O space + * @phys_addr: physical address of range to be mapped + * + * Managed pci_remap_iospace(). Map is automatically unmapped on driver + * detach. + */ +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr) +{ + const struct resource **ptr; + int error; + + ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + error = pci_remap_iospace(res, phys_addr); + if (error) { + devres_free(ptr); + } else { + *ptr = res; + devres_add(dev, ptr); + } + + return error; +} +EXPORT_SYMBOL(devm_pci_remap_iospace); + /** * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() * @dev: Generic device to remap IO address for diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c358e7a07f3f..882f1f9596df 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV int pci_iov_init(struct pci_dev *dev); void pci_iov_release(struct pci_dev *dev); +void pci_iov_remove(struct pci_dev *dev); void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); @@ -325,6 +326,9 @@ static inline void pci_iov_release(struct pci_dev *dev) { } +static inline void pci_iov_remove(struct pci_dev *dev) +{ +} static inline void pci_restore_iov_state(struct pci_dev *dev) { } diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 6bdb1dad805f..0e31f1392a53 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) case PMU_TYPE_IOB: return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); case PMU_TYPE_IOB_SLOW: - return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id); + return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id); case PMU_TYPE_MCB: return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); case PMU_TYPE_MC: diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c index 35c17653c694..87618a4e90e4 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c @@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev, const struct nsp_pin_function *func; const struct nsp_pin_group *grp; - if (grp_select > pinctrl->num_groups || - func_select > pinctrl->num_functions) + if (grp_select >= pinctrl->num_groups || + func_select >= pinctrl->num_functions) return -EINVAL; func = &pinctrl->functions[func_select]; @@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev) return PTR_ERR(pinctrl->base0); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -EINVAL; pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c index e3f1ab2290fc..4c4740ffeb9c 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c @@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = { static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) { - struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); + struct mtk_pinctrl *hw = gpiochip_get_data(chip); int value, err; err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value); @@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { - struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); + struct mtk_pinctrl *hw = gpiochip_get_data(chip); mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value); } @@ -1508,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np) if (ret < 0) return ret; - ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0, - chip->ngpio); - if (ret < 0) { - gpiochip_remove(chip); - return ret; + /* Just for backward compatible for these old pinctrl nodes without + * "gpio-ranges" property. Otherwise, called directly from a + * DeviceTree-supported pinctrl driver is DEPRECATED. + * Please see Section 2.1 of + * Documentation/devicetree/bindings/gpio/gpio.txt on how to + * bind pinctrl and gpio drivers via the "gpio-ranges" property. + */ + if (!of_find_property(np, "gpio-ranges", NULL)) { + ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0, + chip->ngpio); + if (ret < 0) { + gpiochip_remove(chip); + return ret; + } } return 0; @@ -1695,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev) mtk_desc.custom_conf_items = mtk_conf_items; #endif - hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw); - if (IS_ERR(hw->pctrl)) - return PTR_ERR(hw->pctrl); + err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw, + &hw->pctrl); + if (err) + return err; /* Setup groups descriptions per SoC types */ err = mtk_build_groups(hw); if (err) { dev_err(&pdev->dev, "Failed to build groups\n"); - return 0; + return err; } /* Setup functions descriptions per SoC types */ @@ -1713,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev) return err; } - err = mtk_build_gpiochip(hw, pdev->dev.of_node); - if (err) { - dev_err(&pdev->dev, "Failed to add gpio_chip\n"); + /* For able to make pinctrl_claim_hogs, we must not enable pinctrl + * until all groups and functions are being added one. + */ + err = pinctrl_enable(hw->pctrl); + if (err) return err; - } err = mtk_build_eint(hw, pdev); if (err) dev_warn(&pdev->dev, "Failed to add EINT, but pinctrl still can work\n"); + /* Build gpiochip should be after pinctrl_enable is done */ + err = mtk_build_gpiochip(hw, pdev->dev.of_node); + if (err) { + dev_err(&pdev->dev, "Failed to add gpio_chip\n"); + return err; + } + platform_set_drvdata(pdev, hw); return 0; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index a1d7156d0a43..6a1b6058b991 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); - ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input); + ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c index b02caf316711..eeb58b3bbc9a 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c @@ -21,15 +21,13 @@ #include "core.h" #include "sh_pfc.h" -#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH - #define CPU_ALL_PORT(fn, sfx) \ - PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ - PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \ - PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ - PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ - PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \ - PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS) + PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_28(1, fn, sfx), \ + PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_6(4, fn, sfx), \ + PORT_GP_15(5, fn, sfx) /* * F_() : just information * FM() : macro for FN_xxx / xxx_MARK diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index f1fa8612db40..06978c14c83b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -2185,7 +2185,7 @@ static int __init dell_init(void) dell_fill_request(&buffer, token->location, 0, 0, 0); ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_AC); - if (ret) + if (ret == 0) max_intensity = buffer.output[3]; } diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 547dbdac9d54..01b0e2bb3319 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, case PTP_PF_PHYSYNC: if (chan != 0) return -EINVAL; + break; default: return -EINVAL; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 6d4012dd6922..bac1eeb3d312 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; /* full-function RTCs won't have such missing fields */ - if (rtc_valid_tm(&alarm->time) == 0) + if (rtc_valid_tm(&alarm->time) == 0) { + rtc_add_offset(rtc, &alarm->time); return 0; + } /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); @@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) if (err) return err; - rtc_subtract_offset(rtc, &alarm->time); scheduled = rtc_tm_to_time64(&alarm->time); /* Make sure we're not setting alarms in the past */ @@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ + rtc_subtract_offset(rtc, &alarm->time); + if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->set_alarm) @@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) mutex_unlock(&rtc->ops_lock); - rtc_add_offset(rtc, &alarm->time); return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index 097a4d4e2aba..1925aaf09093 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c @@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, } retval = rtc_register_device(mrst_rtc.rtc); - if (retval) { - retval = PTR_ERR(mrst_rtc.rtc); + if (retval) goto cleanup0; - } dev_dbg(dev, "initialised\n"); return 0; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d3a38c421503..a9f60d0ee02e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -41,6 +41,15 @@ #define DASD_DIAG_MOD "dasd_diag_mod" +static unsigned int queue_depth = 32; +static unsigned int nr_hw_queues = 4; + +module_param(queue_depth, uint, 0444); +MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); + +module_param(nr_hw_queues, uint, 0444); +MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); + /* * SECTION: exported variables of dasd.c */ @@ -3115,8 +3124,8 @@ static int dasd_alloc_queue(struct dasd_block *block) block->tag_set.ops = &dasd_mq_ops; block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); - block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; - block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; + block->tag_set.nr_hw_queues = nr_hw_queues; + block->tag_set.queue_depth = queue_depth; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; rc = blk_mq_alloc_tag_set(&block->tag_set); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 976b6bd4fb05..de6b96036aa4 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -228,14 +228,6 @@ struct dasd_ccw_req { #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ -/* - * There is no reliable way to determine the number of available CPUs on - * LPAR but there is no big performance difference between 1 and the - * maximum CPU number. - * 64 is a good trade off performance wise. - */ -#define DASD_NR_HW_QUEUES 64 -#define DASD_MAX_LCU_DEV 256 #define DASD_REQ_PER_DEV 4 /* Signature for error recovery functions. */ diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5fec55bf60..a246a618f9a4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -829,6 +829,17 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + /** * qeth_get_elements_for_range() - find number of SBALEs to cover range. * @start: Start of the address range. @@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, __u16, __u16, enum qeth_prot_versions); int qeth_set_features(struct net_device *, netdev_features_t); -void qeth_recover_features(struct net_device *dev); +void qeth_enable_hw_features(struct net_device *dev); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8e1474f1ffac..d01ac29fd986 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; @@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; + unsigned int i; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); @@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; - qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + if (aob->sba[i] && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + (void *) aob->sba[i]); + } + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); - /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, + QETH_MAX_BUFFER_ELEMENTS(card)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); @@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_RDEV(card), &id); + ccw_device_get_id(CARD_DDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; @@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ NETIF_F_IPV6_CSUM) /** - * qeth_recover_features() - Restore device features after recovery - * @dev: the recovering net_device - * - * Caller must hold rtnl lock. + * qeth_enable_hw_features() - (Re-)Enable HW functions for device features + * @dev: a net_device */ -void qeth_recover_features(struct net_device *dev) +void qeth_enable_hw_features(struct net_device *dev) { - netdev_features_t features = dev->features; struct qeth_card *card = dev->ml_priv; + netdev_features_t features; + rtnl_lock(); + features = dev->features; /* force-off any feature that needs an IPA sequence. * netdev_update_features() will restart them. */ dev->features &= ~QETH_HW_FEATURES; netdev_update_features(dev); - - if (features == dev->features) - return; - dev_warn(&card->gdev->dev, - "Device recovery failed to restore all offload features\n"); + if (features != dev->features) + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); + rtnl_unlock(); } -EXPORT_SYMBOL_GPL(qeth_recover_features); +EXPORT_SYMBOL_GPL(qeth_enable_hw_features); int qeth_set_features(struct net_device *dev, netdev_features_t features) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a7cb37da6a21..2487f0aeb165 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; int rc; @@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; int rc; @@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } + /* avoid racing against concurrent state change: */ + if (!mutex_trylock(&card->conf_mutex)) + return -EAGAIN; + if (!qeth_card_hw_is_reachable(card)) { ether_addr_copy(dev->dev_addr, addr->sa_data); - return 0; + goto out_unlock; } /* don't register the same address twice */ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; + goto out_unlock; /* add the new address, switch over, drop the old */ rc = qeth_l2_send_setmac(card, addr->sa_data); if (rc) - return rc; + goto out_unlock; ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(dev->dev_addr, addr->sa_data); if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) qeth_l2_remove_mac(card, old_addr); card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - return 0; + +out_unlock: + mutex_unlock(&card->conf_mutex); + return rc; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { @@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } /* this also sets saved unicast addresses */ qeth_l2_set_rx_mode(card->dev); - rtnl_lock(); - qeth_recover_features(card->dev); - rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e7fa479adf47..5905dc63e256 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_on(card->dev); else netif_carrier_off(card->dev); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); if (recovery_mode) @@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) else dev_open(card->dev); qeth_l3_set_rx_mode(card->dev); - qeth_recover_features(card->dev); rtnl_unlock(); } qeth_trace_features(card); diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a9831bd37a73..a57f3a7d4748 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) u32 lun_count, nexus; u32 i, bus, target; u8 expose_flag, attribs; - u8 devtype; lun_count = aac_get_safw_phys_lun_count(dev); @@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) continue; if (expose_flag != 0) { - devtype = AAC_DEVTYPE_RAID_MEMBER; - goto update_devtype; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_RAID_MEMBER; + continue; } if (nexus != 0 && (attribs & 8)) { - devtype = AAC_DEVTYPE_NATIVE_RAW; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_NATIVE_RAW; dev->hba_map[bus][target].rmw_nexus = nexus; } else - devtype = AAC_DEVTYPE_ARC_RAW; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_ARC_RAW; dev->hba_map[bus][target].scan_counter = dev->scan_counter; aac_set_safw_target_qd(dev, bus, target); - -update_devtype: - dev->hba_map[bus][target].devtype = devtype; } } diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 2a3977823812..a39be94d110c 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv) { const struct cxlflash_backend_ops *ops = NULL; -#ifdef CONFIG_OCXL +#ifdef CONFIG_OCXL_BASE if (ddv->flags & CXLFLASH_OCXL_DEV) ops = &cxlflash_ocxl_ops; #endif -#ifdef CONFIG_CXL +#ifdef CONFIG_CXL_BASE if (!(ddv->flags & CXLFLASH_OCXL_DEV)) ops = &cxlflash_cxl_ops; #endif diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c index 0a95b5f25380..497a68389461 100644 --- a/drivers/scsi/cxlflash/ocxl_hw.c +++ b/drivers/scsi/cxlflash/ocxl_hw.c @@ -134,15 +134,14 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name, rc = PTR_ERR(file); dev_err(dev, "%s: alloc_file failed rc=%d\n", __func__, rc); - goto err5; + path_put(&path); + goto err3; } file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; out: return file; -err5: - path_put(&path); err4: iput(inode); err3: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 15c7f3b6f35e..58bb70b886d7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; u16 bmic_device_index = 0; - bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); - - encl_dev->sas_address = + encl_dev->eli = hpsa_get_enclosure_logical_identifier(h, scsi3addr); + bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); + if (encl_dev->target == -1 || encl_dev->lun == -1) { rc = IO_OK; goto out; @@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy) static int hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { - *identifier = rphy->identify.sas_address; + struct Scsi_Host *shost = phy_to_shost(rphy); + struct ctlr_info *h; + struct hpsa_scsi_dev_t *sd; + + if (!shost) + return -ENXIO; + + h = shost_to_hba(shost); + + if (!h) + return -ENXIO; + + sd = hpsa_find_device_by_sas_rphy(h, rphy); + if (!sd) + return -ENXIO; + + *identifier = sd->eli; + return 0; } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index fb9f5e7f8209..59e023696fff 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t { #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" unsigned char device_id[16]; /* from inquiry pg. 0x83 */ u64 sas_address; + u64 eli; /* from report diags. */ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ unsigned char model[16]; /* bytes 16-31 of inquiry data */ unsigned char rev; /* byte 2 of inquiry data */ diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 0a9b8b387bd2..02d65dce74e5 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->hrrq[i].allow_interrupts = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } - wmb(); /* Set interrupt mask to stop all new interrupts */ if (ioa_cfg->sis64) @@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) ioa_cfg->hrrq[i].allow_interrupts = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } - wmb(); if (ioa_cfg->sis64) { /* Set the adapter to the correct endian mode. */ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 90394cef0f41..0a5dd5595dd3 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) init_completion(&qedf->flogi_compl); + status = qed_ops->common->update_drv_state(qedf->cdev, true); + if (status) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + memset(&link_params, 0, sizeof(struct qed_link_params)); link_params.link_up = true; status = qed_ops->common->set_link(qedf->cdev, &link_params); @@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; + int rc; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); @@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) qed_ops->common->set_power_state(qedf->cdev, PCI_D0); pci_set_drvdata(pdev, NULL); } + + rc = qed_ops->common->update_drv_state(qedf->cdev, false); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + qed_ops->common->slowpath_stop(qedf->cdev); qed_ops->common->remove(qedf->cdev); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cf274a79e77a..091ec1207bea 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -2273,6 +2273,7 @@ kset_free: static void __qedi_remove(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); + int rval; if (qedi->tmf_thread) { flush_workqueue(qedi->tmf_thread); @@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode) if (mode == QEDI_MODE_NORMAL) qedi_free_iscsi_pf_param(qedi); + rval = qedi_ops->common->update_drv_state(qedi->cdev, false); + if (rval) + QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { qedi_ops->common->slowpath_stop(qedi->cdev); qedi_ops->common->remove(qedi->cdev); @@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) if (qedi_setup_boot_info(qedi)) QEDI_ERR(&qedi->dbg_ctx, "No iSCSI boot target configured\n"); + + rc = qedi_ops->common->update_drv_state(qedi->cdev, true); + if (rc) + QEDI_ERR(&qedi->dbg_ctx, + "Failed to send drv state to MFW\n"); + } return 0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9442e18aef6f..0f94b1d62d3f 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -361,6 +361,8 @@ struct ct_arg { dma_addr_t rsp_dma; u32 req_size; u32 rsp_size; + u32 req_allocated_size; + u32 rsp_allocated_size; void *req; void *rsp; port_id_t id; diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4bc2b66b299f..2c35b0b2baa0 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -556,7 +556,7 @@ err2: /* please ignore kernel warning. otherwise, we have mem leak. */ if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; @@ -564,7 +564,7 @@ err2: if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", @@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", @@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", @@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", @@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", @@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", @@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", @@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", @@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) { if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) /* please ignore kernel warning. otherwise, we have mem leak. */ if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "Failed to allocate ct_sns request.\n"); @@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "Failed to allocate ct_sns request.\n"); @@ -4142,14 +4152,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) */ if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -4179,14 +4189,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) /* please ignore kernel warning. Otherwise, we have mem leak. */ if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -4281,14 +4291,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, done_free_sp: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; @@ -4349,6 +4359,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xffff, "Failed to allocate ct_sns request.\n"); @@ -4366,6 +4377,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( &vha->hw->pdev->dev, rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, "Failed to allocate ct_sns request.\n"); @@ -4425,14 +4437,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) done_free_sp: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 7b675243bd16..db0e3279e07a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, conflict_fcport = qla2x00_find_fcport_by_wwpn(vha, e->port_name, 0); - ql_dbg(ql_dbg_disc, vha, 0x20e6, - "%s %d %8phC post del sess\n", - __func__, __LINE__, - conflict_fcport->port_name); - qlt_schedule_sess_for_deletion - (conflict_fcport); + if (conflict_fcport) { + qlt_schedule_sess_for_deletion + (conflict_fcport); + ql_dbg(ql_dbg_disc, vha, 0x20e6, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + conflict_fcport->port_name); + } } /* FW already picked this loop id for another fcport */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e881fce7477a..9f309e572be4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); + ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); + if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, "Failed to initialize adapter - Adapter flags %x.\n", @@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); - ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); - if (ha->mqenable) { bool mq = false; bool startit = false; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0fea2e2326be..1027b0cb7fa3 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; - struct qla_hw_data *ha = sess->vha->hw; unsigned long flags; if (sess->disc_state == DSC_DELETE_PEND) @@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) return; } - spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->deleted == QLA_SESS_DELETED) sess->logout_on_delete = 0; + spin_lock_irqsave(&sess->vha->work_lock, flags); if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + spin_unlock_irqrestore(&sess->vha->work_lock, flags); return; } sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + spin_unlock_irqrestore(&sess->vha->work_lock, flags); sess->disc_state = DSC_DELETE_PEND; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 24d7496cd9e2..364e71861bfd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void) int k = sdebug_add_host; stop_all_queued(); - free_all_queued(); for (; k; k--) sdebug_remove_adapter(); + free_all_queued(); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index a14fef11776e..2bf3bf73886e 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) * Check that all zones of the device are equal. The last zone can however * be smaller. The zone size must also be a power of two number of LBAs. * - * Returns the zone size in bytes upon success or an error code upon failure. + * Returns the zone size in number of blocks upon success or an error code + * upon failure. */ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) { @@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) unsigned char *rec; unsigned int buf_len; unsigned int list_length; - int ret; + s64 ret; u8 same; /* Get a buffer */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 53ae52dbff84..cd2fdac000c9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> +#include <linux/cred.h> /* for sg_check_file_access() */ #include "scsi.h" #include <scsi/scsi_dbg.h> @@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref); sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) +/* + * The SCSI interfaces that use read() and write() as an asynchronous variant of + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways + * to trigger read() and write() calls from various contexts with elevated + * privileges. This can lead to kernel memory corruption (e.g. if these + * interfaces are called through splice()) and privilege escalation inside + * userspace (e.g. if a process with access to such a device passes a file + * descriptor to a SUID binary as stdin/stdout/stderr). + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static int sg_check_file_access(struct file *filp, const char *caller) +{ + if (filp->f_cred != current_real_cred()) { + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EPERM; + } + if (uaccess_kernel()) { + pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EACCES; + } + return 0; +} + static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; @@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) struct sg_header *old_hdr = NULL; int retval = 0; + /* + * This could cause a response to be stranded. Close the associated + * file descriptor to free up any resources being held. + */ + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, @@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; + int retval; - if (unlikely(uaccess_kernel())) - return -EINVAL; + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 32f0748fd067..0097a939487f 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -27,9 +27,16 @@ #define GPC_PGC_SW2ISO_SHIFT 0x8 #define GPC_PGC_SW_SHIFT 0x0 +#define GPC_PGC_PCI_PDN 0x200 +#define GPC_PGC_PCI_SR 0x20c + #define GPC_PGC_GPU_PDN 0x260 #define GPC_PGC_GPU_PUPSCR 0x264 #define GPC_PGC_GPU_PDNSCR 0x268 +#define GPC_PGC_GPU_SR 0x26c + +#define GPC_PGC_DISP_PDN 0x240 +#define GPC_PGC_DISP_SR 0x24c #define GPU_VPU_PUP_REQ BIT(1) #define GPU_VPU_PDN_REQ BIT(0) @@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = { { } }; +static const struct regmap_range yes_ranges[] = { + regmap_reg_range(GPC_CNTR, GPC_CNTR), + regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR), + regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR), + regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR), +}; + +static const struct regmap_access_table access_table = { + .yes_ranges = yes_ranges, + .n_yes_ranges = ARRAY_SIZE(yes_ranges), +}; + static const struct regmap_config imx_gpc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .rd_table = &access_table, + .wr_table = &access_table, .max_register = 0x2ac, }; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index f4e3bd40c72e..6ef18cf8f243 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -39,10 +39,15 @@ #define GPC_M4_PU_PDN_FLG 0x1bc - -#define PGC_MIPI 4 -#define PGC_PCIE 5 -#define PGC_USB_HSIC 8 +/* + * The PGC offset values in Reference Manual + * (Rev. 1, 01/2018 and the older ones) GPC chapter's + * GPC_PGC memory map are incorrect, below offset + * values are from design RTL. + */ +#define PGC_MIPI 16 +#define PGC_PCIE 17 +#define PGC_USB_HSIC 20 #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 9dc02f390ba3..5856e792d09c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers" config QCOM_COMMAND_DB bool "Qualcomm Command DB" - depends on (ARCH_QCOM && OF) || COMPILE_TEST + depends on ARCH_QCOM || COMPILE_TEST + depends on OF_RESERVED_MEM help Command DB queries shared memory by key string for shared system resources. Platform drivers that require to set state of a shared diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 95120acc4d80..50d03d8b4f9a 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd) static bool has_cpg_mstp; -static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) +static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) { struct generic_pm_domain *genpd = &pd->genpd; const char *name = pd->genpd.name; struct dev_power_governor *gov = &simple_qos_governor; + int error; if (pd->flags & PD_CPU) { /* @@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) rcar_sysc_power_up(&pd->ch); finalize: - pm_genpd_init(genpd, gov, false); + error = pm_genpd_init(genpd, gov, false); + if (error) + pr_err("Failed to init PM domain %s: %d\n", name, error); + + return error; } static const struct of_device_id rcar_sysc_matches[] __initconst = { @@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void) pr_debug("%pOF: syscier = 0x%08x\n", np, syscier); iowrite32(syscier, base + SYSCIER); + /* + * First, create all PM domains + */ for (i = 0; i < info->num_areas; i++) { const struct rcar_sysc_area *area = &info->areas[i]; struct rcar_sysc_pd *pd; @@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void) pd->ch.isr_bit = area->isr_bit; pd->flags = area->flags; - rcar_sysc_pd_setup(pd); - if (area->parent >= 0) - pm_genpd_add_subdomain(domains->domains[area->parent], - &pd->genpd); + error = rcar_sysc_pd_setup(pd); + if (error) + goto out_put; domains->domains[area->isr_bit] = &pd->genpd; } + /* + * Second, link all PM domains to their parents + */ + for (i = 0; i < info->num_areas; i++) { + const struct rcar_sysc_area *area = &info->areas[i]; + + if (!area->name || area->parent < 0) + continue; + + error = pm_genpd_add_subdomain(domains->domains[area->parent], + domains->domains[area->isr_bit]); + if (error) + pr_warn("Failed to add PM subdomain %s to parent %u\n", + area->name, area->parent); + } + error = of_genpd_add_provider_onecell(np, &domains->onecell_data); out_put: diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index e8c440329708..31db510018a9 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap, struct page **tmp = pages; if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); if (buffer->flags & ION_FLAG_CACHED) pgprot = PAGE_KERNEL; diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c index ea194aa01a64..257b0daff01f 100644 --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c @@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, /* Make sure D/A update mode is direct update */ outb(0, dev->iobase + DAQP_AUX_REG); - for (i = 0; i > insn->n; i++) { + for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; int ret; diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index 45c05527a57a..faf4b4158cfa 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c @@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) return _FAIL; - if (len > MAX_IE_SZ) + if (len < 0 || len > MAX_IE_SZ) return _FAIL; pbss_network->IELength = len; diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c index 7947edb239a1..88ba5b2fea6a 100644 --- a/drivers/staging/rtlwifi/rtl8822be/hw.c +++ b/drivers/staging/rtlwifi/rtl8822be/hw.c @@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw) return; pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); - pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); + pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3); pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h index 012fb618840b..a45f0eb69d3f 100644 --- a/drivers/staging/rtlwifi/wifi.h +++ b/drivers/staging/rtlwifi/wifi.h @@ -88,6 +88,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig index 3aa981fbc8f5..e45ed08a5166 100644 --- a/drivers/staging/typec/Kconfig +++ b/drivers/staging/typec/Kconfig @@ -11,6 +11,7 @@ config TYPEC_TCPCI config TYPEC_RT1711H tristate "Richtek RT1711H Type-C chip driver" + depends on I2C select TYPEC_TCPCI help Richtek RT1711H Type-C chip driver that works with diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 01ac306131c1..10db5656fd5d 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) * Check for overflow of 8byte PRI READ_KEYS payload and * next reservation key list descriptor. */ - if ((add_len + 8) > (cmd->data_length - 8)) - break; - - put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); - off += 8; + if (off + 8 <= cmd->data_length) { + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; + } + /* + * SPC5r17: 6.16.2 READ KEYS service action + * The ADDITIONAL LENGTH field indicates the number of bytes in + * the Reservation key list. The contents of the ADDITIONAL + * LENGTH field are not altered based on the allocation length + */ add_len += 8; } spin_unlock(&dev->t10_pr.registration_lock); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 7f96dfa32b9c..d8dc3d22051f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev, } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - bool bidi) + bool bidi, uint32_t read_len) { struct se_cmd *se_cmd = cmd->se_cmd; int i, dbi; @@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; to = kmap_atomic(sg_page(sg)) + sg->offset; - while (sg_remaining > 0) { + while (sg_remaining > 0 && read_len > 0) { if (block_remaining == 0) { if (from) kunmap_atomic(from); @@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, } copy_bytes = min_t(size_t, sg_remaining, block_remaining); + if (read_len < copy_bytes) + copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, @@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, sg_remaining -= copy_bytes; block_remaining -= copy_bytes; + read_len -= copy_bytes; } kunmap_atomic(to - sg->offset); + if (read_len == 0) + break; } if (from) kunmap_atomic(from); @@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * { struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; + bool read_len_valid = false; + uint32_t read_len = se_cmd->data_length; /* * cmd has been completed already from timeout, just reclaim @@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", cmd->se_cmd); entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; - } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { + goto done; + } + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { + read_len_valid = true; + if (entry->rsp.read_len < read_len) + read_len = entry->rsp.read_len; + } + + if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); - } else if (se_cmd->se_cmd_flags & SCF_BIDI) { + if (!read_len_valid ) + goto done; + else + se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; + } + if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true); + gather_data_area(udev, cmd, true, read_len); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false); + gather_data_area(udev, cmd, false, read_len); } else if (se_cmd->data_direction == DMA_TO_DEVICE) { /* TODO: */ } else if (se_cmd->data_direction != DMA_NONE) { @@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * se_cmd->data_direction); } - target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); +done: + if (read_len_valid) { + pr_debug("read_len = %d\n", read_len); + target_complete_cmd_with_length(cmd->se_cmd, + entry->rsp.scsi_status, read_len); + } else + target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); out: cmd->se_cmd = NULL; @@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev) /* Initialise the mailbox of the ring buffer */ mb = udev->mb_addr; mb->version = TCMU_MAILBOX_VERSION; - mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; + mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN; mb->cmdr_off = CMDR_OFF; mb->cmdr_size = udev->cmdr_size; diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 6281266b8ec0..a923ebdeb73c 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, goto err_free_acl; } ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); + if (!ret) { + /* Notify userspace about the change */ + kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); + } mutex_unlock(&tb->lock); err_free_acl: diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cbe98bc2b998..431742201709 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -124,6 +124,8 @@ struct n_tty_data { struct mutex output_lock; }; +#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1)) + static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; @@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) { + smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */ return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } @@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; - ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; ldata->commit_head = 0; - ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; @@ -617,13 +618,20 @@ static size_t __process_echoes(struct tty_struct *tty) old_space = space = tty_write_room(tty); tail = ldata->echo_tail; - while (ldata->echo_commit != tail) { + while (MASK(ldata->echo_commit) != MASK(tail)) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { unsigned char op; int no_space_left = 0; /* + * Since add_echo_byte() is called without holding + * output_lock, we might see only portion of multi-byte + * operation. + */ + if (MASK(ldata->echo_commit) == MASK(tail + 1)) + goto not_yet_stored; + /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the * op code or a control character value. @@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty) unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: + if (MASK(ldata->echo_commit) == MASK(tail + 2)) + goto not_yet_stored; num_chars = echo_buf(ldata, tail + 2); /* @@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty) /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ - while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { + while (ldata->echo_commit > tail && + ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; @@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty) tail++; } + not_yet_stored: ldata->echo_tail = tail; return old_space - space; } @@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty) size_t nr, old, echoed; size_t head; + mutex_lock(&ldata->output_lock); head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; @@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty) * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; - if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) + if (nr < ECHO_COMMIT_WATERMARK || + (nr % ECHO_BLOCK > old % ECHO_BLOCK)) { + mutex_unlock(&ldata->output_lock); return; + } - mutex_lock(&ldata->output_lock); ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); @@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty) static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) { - *echo_buf_addr(ldata, ldata->echo_head++) = c; + *echo_buf_addr(ldata, ldata->echo_head) = c; + smp_wmb(); /* Matches smp_rmb() in echo_buf(). */ + ldata->echo_head++; } /** @@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty) } seen_alnums = 0; - while (ldata->read_head != ldata->canon_head) { + while (MASK(ldata->read_head) != MASK(ldata->canon_head)) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); - } while (is_continuation(c, tty) && head != ldata->canon_head); + } while (is_continuation(c, tty) && + MASK(head) != MASK(ldata->canon_head)); /* do not partially erase */ if (is_continuation(c, tty)) @@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) * This info is used to go back the correct * number of columns. */ - while (tail != ldata->canon_head) { + while (MASK(tail) != MASK(ldata->canon_head)) { tail--; c = read_buf(ldata, tail); if (c == '\t') { @@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); - while (tail != ldata->read_head) { + while (MASK(tail) != MASK(ldata->read_head)) { echo_char(read_buf(ldata, tail), tty); tail++; } @@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty) struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ - ldata = vmalloc(sizeof(*ldata)); + ldata = vzalloc(sizeof(*ldata)); if (!ldata) - goto err; + return -ENOMEM; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; - reset_buffer_flags(tty->disc_data); - ldata->column = 0; - ldata->canon_column = 0; - ldata->num_overrun = 0; - ldata->no_room = 0; - ldata->lnext = 0; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); - return 0; -err: - return -ENOMEM; } static inline int input_available_p(struct tty_struct *tty, int poll) @@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata) tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ - while (head != tail) { + while (MASK(head) != MASK(tail)) { if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index df93b727e984..9e59f4788589 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register); static void __exit serdev_exit(void) { bus_unregister(&serdev_bus_type); + ida_destroy(&ctrl_ida); } module_exit(serdev_exit); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 3296a05cda2d..f80a300b5d68 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = { /* multi-io cards handled by parport_serial */ { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ - { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ - { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */ /* Moxa Smartio MUE boards handled by 8250_moxa */ { PCI_VDEVICE(MOXA, 0x1024), }, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 1eb1a376a041..15eb6c829d39 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_screen_size > (4 << 20)) return -EINVAL; - newscreen = kmalloc(new_screen_size, GFP_USER); + newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index e8f4ac9400ea..5d421d7e8904 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->name); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->name); + +out: + mutex_unlock(&idev->info_lock); + return ret; } static DEVICE_ATTR_RO(name); @@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->version); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->version); + +out: + mutex_unlock(&idev->info_lock); + return ret; } static DEVICE_ATTR_RO(version); @@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify); static irqreturn_t uio_interrupt(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; - irqreturn_t ret = idev->info->handler(irq, idev->info); + irqreturn_t ret; + mutex_lock(&idev->info_lock); + + ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) uio_event_notify(idev->info); + mutex_unlock(&idev->info_lock); return ret; } @@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep) struct uio_device *idev; struct uio_listener *listener; int ret = 0; - unsigned long flags; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); @@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); + if (!idev->info) { + mutex_unlock(&idev->info_lock); + ret = -EINVAL; + goto err_alloc_listener; + } + if (idev->info && idev->info->open) ret = idev->info->open(idev->info, inode); - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (ret) goto err_infoopen; @@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep) int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); module_put(idev->owner); kfree(listener); @@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; __poll_t ret = 0; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) ret = -EIO; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (ret) return ret; @@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf, DECLARE_WAITQUEUE(wait, current); ssize_t retval = 0; s32 event_count; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) retval = -EIO; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (retval) return retval; @@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); + if (!idev->info) { + retval = -EINVAL; + goto out; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; goto out; @@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); return retval ? retval : sizeof(s32); } @@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) struct page *page; unsigned long offset; void *addr; + int ret = 0; + int mi; - int mi = uio_find_mem_index(vmf->vma); - if (mi < 0) - return VM_FAULT_SIGBUS; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = VM_FAULT_SIGBUS; + goto out; + } + + mi = uio_find_mem_index(vmf->vma); + if (mi < 0) { + ret = VM_FAULT_SIGBUS; + goto out; + } /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE @@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) page = vmalloc_to_page(addr); get_page(page); vmf->page = page; - return 0; + +out: + mutex_unlock(&idev->info_lock); + + return ret; } static const struct vm_operations_struct uio_logical_vm_ops = { @@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma) struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); struct uio_mem *mem; + if (mi < 0) return -EINVAL; mem = idev->info->mem + mi; @@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + goto out; + } + mi = uio_find_mem_index(vma); - if (mi < 0) - return -EINVAL; + if (mi < 0) { + ret = -EINVAL; + goto out; + } requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; - if (requested_pages > actual_pages) - return -EINVAL; + if (requested_pages > actual_pages) { + ret = -EINVAL; + goto out; + } if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); - return ret; + goto out; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_PHYS: - return uio_mmap_physical(vma); + ret = uio_mmap_physical(vma); + break; case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: - return uio_mmap_logical(vma); + ret = uio_mmap_logical(vma); + break; default: - return -EINVAL; + ret = -EINVAL; } + +out: + mutex_unlock(&idev->info_lock); + return 0; } static const struct file_operations uio_fops = { @@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - spin_lock_init(&idev->info_lock); + mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); @@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner, * FDs at the time of unregister and therefore may not be * freed until they are released. */ - ret = request_irq(info->irq, uio_interrupt, - info->irq_flags, info->name, idev); + ret = request_threaded_irq(info->irq, NULL, uio_interrupt, + info->irq_flags, info->name, idev); + if (ret) goto err_request_irq; } @@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; - unsigned long flags; if (!info || !info->uio_dev) return; @@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info) uio_free_minor(idev); + mutex_lock(&idev->info_lock); uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); - spin_lock_irqsave(&idev->info_lock, flags); idev->info = NULL; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); device_unregister(&idev->dev); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index af45aa3222b5..4638d9b066be 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci) hcd->power_budget = ci->platdata->power_budget; hcd->tpl_support = ci->platdata->tpl_support; - if (ci->phy || ci->usb_phy) + if (ci->phy || ci->usb_phy) { hcd->skip_phy_initialization = 1; + if (ci->usb_phy) + hcd->usb_phy = ci->usb_phy; + } ehci = hcd_to_ehci(hcd); ehci->caps = ci->hw_bank.cap; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7b366a6c0b49..998b32d0167e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ .driver_info = SINGLE_RX_URB, }, + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c55def2f1320..097057d2eacf 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 RGB */ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair Strafe */ + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 4a56ac772a3c..71b3b08ad516 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup { * @frame_list_sz: Frame list size * @desc_gen_cache: Kmem cache for generic descriptors * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors + * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf * * These are for peripheral mode: * @@ -1177,6 +1178,8 @@ struct dwc2_hsotg { u32 frame_list_sz; struct kmem_cache *desc_gen_cache; struct kmem_cache *desc_hsisoc_cache; + struct kmem_cache *unaligned_cache; +#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index f0d9ccf1d665..a0f82cca2d9a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, u32 index; u32 maxsize = 0; u32 mask = 0; + u8 pid = 0; maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); @@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, ((len << DEV_DMA_NBYTES_SHIFT) & mask)); if (hs_ep->dir_in) { - desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & + if (len) + pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); + else + pid = 1; + desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & DEV_DMA_ISOC_PID_MASK) | ((len % hs_ep->ep.maxpacket) ? DEV_DMA_SHORT : 0) | @@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) struct dwc2_dma_desc *desc; if (list_empty(&hs_ep->queue)) { + hs_ep->target_frame = TARGET_FRAME_INITIAL; dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); return; } @@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) */ tmp = dwc2_hsotg_read_frameno(hsotg); - dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0); - if (using_desc_dma(hsotg)) { if (ep->target_frame == TARGET_FRAME_INITIAL) { /* Start first ISO Out */ @@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) tmp = dwc2_hsotg_read_frameno(hsotg); if (using_desc_dma(hsotg)) { - dwc2_hsotg_complete_request(hsotg, hs_ep, - get_ep_head(hs_ep), 0); - hs_ep->target_frame = tmp; dwc2_gadget_incr_frame_num(hs_ep); dwc2_gadget_start_isoc_ddma(hs_ep); @@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) } ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) + if (ret) { + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, + hsotg->ctrl_req); return ret; - + } dwc2_hsotg_dump(hsotg); return 0; @@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) { usb_del_gadget_udc(&hsotg->gadget); + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); return 0; } diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index edaf0b6af4f0..b1104be3429c 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, } if (hsotg->params.host_dma) { - dwc2_writel((u32)chan->xfer_dma, - hsotg->regs + HCDMA(chan->hc_num)); + dma_addr_t dma_addr; + + if (chan->align_buf) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "align_buf\n"); + dma_addr = chan->align_buf; + } else { + dma_addr = chan->xfer_dma; + } + dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); + if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", - (unsigned long)chan->xfer_dma, chan->hc_num); + (unsigned long)dma_addr, chan->hc_num); } /* Start the split */ @@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, } } +static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, + struct dwc2_host_chan *chan) +{ + if (!hsotg->unaligned_cache || + chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE) + return -ENOMEM; + + if (!qh->dw_align_buf) { + qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache, + GFP_ATOMIC | GFP_DMA); + if (!qh->dw_align_buf) + return -ENOMEM; + } + + qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf, + DWC2_KMEM_UNALIGNED_BUF_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { + dev_err(hsotg->dev, "can't map align_buf\n"); + chan->align_buf = 0; + return -EINVAL; + } + + chan->align_buf = qh->dw_align_buf_dma; + return 0; +} + #define DWC2_USB_DMA_ALIGN 4 struct dma_aligned_buffer { @@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* Set the transfer attributes */ dwc2_hc_init_xfer(hsotg, chan, qtd); + /* For non-dword aligned buffers */ + if (hsotg->params.host_dma && qh->do_split && + chan->ep_is_in && (chan->xfer_dma & 0x3)) { + dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); + if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) { + dev_err(hsotg->dev, + "Failed to allocate memory to handle non-aligned buffer\n"); + /* Add channel back to free list */ + chan->align_buf = 0; + chan->multi_count = 0; + list_add_tail(&chan->hc_list_entry, + &hsotg->free_hc_list); + qtd->in_process = 0; + qh->channel = NULL; + return -ENOMEM; + } + } else { + /* + * We assume that DMA is always aligned in non-split + * case or split out case. Warn if not. + */ + WARN_ON_ONCE(hsotg->params.host_dma && + (chan->xfer_dma & 0x3)); + chan->align_buf = 0; + } + if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* @@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) } } + if (hsotg->params.host_dma) { + /* + * Create kmem caches to handle non-aligned buffer + * in Buffer DMA mode. + */ + hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma", + DWC2_KMEM_UNALIGNED_BUF_SIZE, 4, + SLAB_CACHE_DMA, NULL); + if (!hsotg->unaligned_cache) + dev_err(hsotg->dev, + "unable to create dwc2 unaligned cache\n"); + } + hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; @@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) return 0; error4: - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); error3: dwc2_hcd_release(hsotg); error2: @@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) usb_remove_hcd(hcd); hsotg->priv = NULL; - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); dwc2_hcd_release(hsotg); usb_put_hcd(hcd); @@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) dwc2_writel(hprt0, hsotg->regs + HPRT0); /* Wait for the HPRT0.PrtSusp register field to be set */ - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300)) + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) dev_warn(hsotg->dev, "Suspend wasn't generated\n"); /* @@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, return ret; } + dwc2_hcd_rem_wakeup(hsotg); + hsotg->hibernated = 0; hsotg->bus_suspended = 0; hsotg->lx_state = DWC2_L0; diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7db1ee7e7a77..5502a501f516 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -76,6 +76,8 @@ struct dwc2_qh; * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf + * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not + * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer @@ -133,6 +135,7 @@ struct dwc2_host_chan { u8 *xfer_buf; dma_addr_t xfer_dma; + dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; @@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time { * speed. Note that this is in "schedule slice" which * is tightly packed. * @ntd: Actual number of transfer descriptors in a list + * @dw_align_buf: Used instead of original buffer if its physical address + * is not dword-aligned + * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic @@ -350,6 +356,8 @@ struct dwc2_qh { struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; u32 ls_start_schedule_slice; u16 ntd; + u8 *dw_align_buf; + dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index fbea5e3fb947..ed7f05cf4906 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, DWC2_HC_XFER_COMPLETE, NULL); - if (!len) { + if (!len && !qtd->isoc_split_offset) { qtd->complete_split = 0; - qtd->isoc_split_offset = 0; return 0; } frame_desc->actual_length += len; + if (chan->align_buf) { + dev_vdbg(hsotg->dev, "non-aligned buffer\n"); + dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, + DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE); + memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma), + chan->qh->dw_align_buf, len); + } + qtd->isoc_split_offset += len; hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index d7c3d6c776d8..301ced1618f8 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, /* Get the map and adjust if this is a multi_tt hub */ map = qh->dwc_tt->periodic_bitmaps; if (qh->dwc_tt->usb_tt->multi) - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1); return map; } @@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) if (qh->desc_list) dwc2_hcd_qh_free_ddma(hsotg, qh); + else if (hsotg->unaligned_cache && qh->dw_align_buf) + kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf); + kfree(qh); } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ea91310113b9..103807587dc6 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev) if (!dwc->clks) return -ENOMEM; - dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev) if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); - ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); - if (ret == -EPROBE_DEFER) - return ret; - /* - * Clocks are optional, but new DT platforms should support all clocks - * as required by the DT-binding. - */ - if (ret) - dwc->num_clks = 0; + if (dev->of_node) { + dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); + + ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); + if (ret == -EPROBE_DEFER) + return ret; + /* + * Clocks are optional, but new DT platforms should support all + * clocks as required by the DT-binding. + */ + if (ret) + dwc->num_clks = 0; + } ret = reset_control_deassert(dwc->reset); if (ret) diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 6b3ccd542bd7..dbeff5e6ad14 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) reset_control_put(simple->resets); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); return 0; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index c961a94d136b..f57e7c94b8e5 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -34,6 +34,7 @@ #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e +#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index b0e67ab2f98c..a6d0203e40b6 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev) qcom->dwc3 = of_find_device_by_node(dwc3_np); if (!qcom->dwc3) { dev_err(&pdev->dev, "failed to get dwc3 platform device\n"); + ret = -ENODEV; goto depopulate; } @@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int dwc3_qcom_pm_suspend(struct device *dev) +static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); int ret = 0; @@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev) return ret; } -static int dwc3_qcom_pm_resume(struct device *dev) +static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); int ret; @@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev) return ret; } -#endif -#ifdef CONFIG_PM -static int dwc3_qcom_runtime_suspend(struct device *dev) +static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); return dwc3_qcom_suspend(qcom); } -static int dwc3_qcom_runtime_resume(struct device *dev) +static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); return dwc3_qcom_resume(qcom); } -#endif static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index f242c2bcea81..d2fa071c21b1 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) */ if (w_value && !f->get_alt) break; + + spin_lock(&cdev->lock); value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, @@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } + spin_unlock(&cdev->lock); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index dce9d12c7981..33e2030503fa 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -215,6 +215,7 @@ struct ffs_io_data { struct mm_struct *mm; struct work_struct work; + struct work_struct cancellation_work; struct usb_ep *ep; struct usb_request *req; @@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file) return 0; } +static void ffs_aio_cancel_worker(struct work_struct *work) +{ + struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, + cancellation_work); + + ENTER(); + + usb_ep_dequeue(io_data->ep, io_data->req); +} + static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; - struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + struct ffs_data *ffs = io_data->ffs; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); - - if (likely(io_data && io_data->ep && io_data->req)) - value = usb_ep_dequeue(io_data->ep, io_data->req); - else + if (likely(io_data && io_data->ep && io_data->req)) { + INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); + queue_work(ffs->io_completion_wq, &io_data->cancellation_work); + value = -EINPROGRESS; + } else { value = -EINVAL; - - spin_unlock_irq(&epfile->ffs->eps_lock); + } return value; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig index f0cdf89b8503..83ba8a2eb6af 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig +++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig @@ -2,6 +2,7 @@ config USB_ASPEED_VHUB tristate "Aspeed vHub UDC driver" depends on ARCH_ASPEED || COMPILE_TEST + depends on USB_LIBCOMPOSITE help USB peripheral controller for the Aspeed AST2500 family SoCs supporting the "vHub" functionality and USB2.0 diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 1fbfd89d0a0f..387f124a8334 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci) return 0; } -static void xhci_do_dbc_stop(struct xhci_hcd *xhci) +static int xhci_do_dbc_stop(struct xhci_hcd *xhci) { struct xhci_dbc *dbc = xhci->dbc; if (dbc->state == DS_DISABLED) - return; + return -1; writel(0, &dbc->regs->control); xhci_dbc_mem_cleanup(xhci); dbc->state = DS_DISABLED; + + return 0; } static int xhci_dbc_start(struct xhci_hcd *xhci) @@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci) static void xhci_dbc_stop(struct xhci_hcd *xhci) { + int ret; unsigned long flags; struct xhci_dbc *dbc = xhci->dbc; struct dbc_port *port = &dbc->port; @@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) xhci_dbc_tty_unregister_device(xhci); spin_lock_irqsave(&dbc->lock, flags); - xhci_do_dbc_stop(xhci); + ret = xhci_do_dbc_stop(xhci); spin_unlock_irqrestore(&dbc->lock, flags); - pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); + if (!ret) + pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); } static void diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index acbd3d7b8828..ef350c33dc4a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring( if (!ep->stream_info) return NULL; - if (stream_id > ep->stream_info->num_streams) + if (stream_id >= ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } @@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) dev = xhci->devs[slot_id]; - trace_xhci_free_virt_device(dev); - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; + trace_xhci_free_virt_device(dev); + if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index a8c1d073cba0..4b463e5202a4 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, unsigned long mask; unsigned int port; bool idle, enable; - int err; + int err = 0; memset(&rsp, 0, sizeof(rsp)); @@ -1223,10 +1223,10 @@ disable_rpm: pm_runtime_disable(&pdev->dev); usb_put_hcd(tegra->hcd); disable_xusbc: - if (!&pdev->dev.pm_domain) + if (!pdev->dev.pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC); disable_xusba: - if (!&pdev->dev.pm_domain) + if (!pdev->dev.pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA); put_padctl: tegra_xusb_padctl_put(tegra->padctl); diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 410544ffe78f..88b427434bd8 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue, TP_ARGS(ring, trb) ); +DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev), + TP_STRUCT__entry( + __field(void *, vdev) + __field(unsigned long long, out_ctx) + __field(unsigned long long, in_ctx) + __field(u8, fake_port) + __field(u8, real_port) + __field(u16, current_mel) + + ), + TP_fast_assign( + __entry->vdev = vdev; + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; + __entry->fake_port = (u8) vdev->fake_port; + __entry->real_port = (u8) vdev->real_port; + __entry->current_mel = (u16) vdev->current_mel; + ), + TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d", + __entry->vdev, __entry->in_ctx, __entry->out_ctx, + __entry->fake_port, __entry->real_port, __entry->current_mel + ) +); + +DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + DECLARE_EVENT_CLASS(xhci_log_virt_dev, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev), @@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device, TP_ARGS(vdev) ); -DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device, - TP_PROTO(struct xhci_virt_device *vdev), - TP_ARGS(vdev) -); - DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 8c8da2d657fa..2f4850f25e82 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) spin_unlock_irqrestore(&xhci->lock, flags); } +static bool xhci_pending_portevent(struct xhci_hcd *xhci) +{ + struct xhci_port **ports; + int port_index; + u32 status; + u32 portsc; + + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) + return true; + /* + * Checking STS_EINT is not enough as there is a lag between a change + * bit being set and the Port Status Change Event that it generated + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. + */ + + port_index = xhci->usb2_rhub.num_ports; + ports = xhci->usb2_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + port_index = xhci->usb3_rhub.num_ports; + ports = xhci->usb3_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + return false; +} + /* * Stop HC (not bus-specific) * @@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend); */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0, status; + u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) command = readl(&xhci->op_regs->command); command |= CMD_CRS; writel(command, &xhci->op_regs->command); + /* + * Some controllers take up to 55+ ms to complete the controller + * restore so setting the timeout to 100ms. Xhci specification + * doesn't mention any timeout value. + */ if (xhci_handshake(&xhci->op_regs->status, - STS_RESTORE, 0, 10 * 1000)) { + STS_RESTORE, 0, 100 * 1000)) { xhci_warn(xhci, "WARN: xHC restore state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { + if (xhci_pending_portevent(xhci)) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 939e2f86b595..841e89ffe2e9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -382,6 +382,10 @@ struct xhci_op_regs { #define PORT_PLC (1 << 22) /* port configure error change - port failed to configure its link partner */ #define PORT_CEC (1 << 23) +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_CEC) + + /* Cold Attach Status - xHC can set this bit to report device attached during * Sx state. Warm port reset should be perfomed to clear this bit and move port * to connected state. diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 8abb6cbbd98a..3be40eaa1ac9 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; - int retval = 0; - int bytes_read = 0; + int len = 0; char in_buffer[20]; unsigned long flags; @@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, mutex_lock(&dev->io_mutex); if (!dev->interface) { /* already disconnected */ - retval = -ENODEV; - goto exit; + mutex_unlock(&dev->io_mutex); + return -ENODEV; } spin_lock_irqsave(&dev->lock, flags); - bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); + len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); - - if (*ppos < bytes_read) { - if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos)) - retval = -EFAULT; - else { - retval = bytes_read - *ppos; - *ppos += bytes_read; - } - } - -exit: mutex_unlock(&dev->io_mutex); - return retval; + + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } static ssize_t yurex_write(struct file *file, const char __user *user_buffer, diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index bdd7a5ad3bf1..3bb1fff02bed 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - if (r < bufsize) { + if (r < (int)bufsize) { if (r >= 0) { dev_err(&dev->dev, "short control message received (%d < %u)\n", diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index eb6c26cbe579..626a29d9aa58 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ + { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ @@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ @@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ + { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ @@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ + { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */ + { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 5169624d8b11..38d43c4b7ce5 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, 3, /* get pins */ USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 0, 0, data, 1, 2000); - if (rc >= 0) + if (rc == 1) *value = *data; + else if (rc >= 0) + rc = -EIO; kfree(data); return rc; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index fdceb46d9fc6..b580b4c7fa48 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb) } dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); + if (urb->actual_length < 1) + goto out; + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 8a201dd53d36..150f43668bec 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args) u64 ts_nsec = local_clock(); unsigned long rem_nsec; + mutex_lock(&port->logbuffer_lock); if (!port->logbuffer[port->logbuffer_head]) { port->logbuffer[port->logbuffer_head] = kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL); - if (!port->logbuffer[port->logbuffer_head]) + if (!port->logbuffer[port->logbuffer_head]) { + mutex_unlock(&port->logbuffer_lock); return; + } } vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args); - mutex_lock(&port->logbuffer_lock); - if (tcpm_log_full(port)) { port->logbuffer_head = max(port->logbuffer_head - 1, 0); strcpy(tmpbuffer, "overflow"); @@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); + port->supply_voltage = mv; + port->current_limit = max_ma; + if (port->tcpc->set_current_limit) ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); @@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port) tcpm_set_attached_state(port, false); port->try_src_count = 0; port->try_snk_count = 0; - port->supply_voltage = 0; - port->current_limit = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; power_supply_changed(port->psy); @@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port) tcpm_port_is_sink(port) && time_is_after_jiffies(port->delayed_runtime)) { tcpm_set_state(port, SNK_DISCOVERY, - port->delayed_runtime - jiffies); + jiffies_to_msecs(port->delayed_runtime - + jiffies)); break; } tcpm_set_state(port, unattached_state(port), 0); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index bd5cca5632b3..8d0a6fe748bd 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work) } if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) { + typec_set_pwr_role(con->port, con->status.pwr_dir); + + switch (con->status.partner_type) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; + case UCSI_CONSTAT_PARTNER_TYPE_DFP: + typec_set_data_role(con->port, TYPEC_DEVICE); + break; + default: + break; + } + if (con->status.connected) ucsi_register_partner(con); else diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 44eb4e1ea817..a18112a83fae 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } + /* This will make sure we can use ioremap_nocache() */ + status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); + if (ACPI_FAILURE(status)) + return -ENOMEM; + /* * NOTE: The memory region for the data structures is used also in an * operation region, which means ACPI has already reserved it. Therefore diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 24ee2605b9f0..42dc1d3d71cf 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -28,5 +28,13 @@ config VFIO_PCI_INTX def_bool y if !S390 config VFIO_PCI_IGD - depends on VFIO_PCI - def_bool y if X86 + bool "VFIO PCI extensions for Intel graphics (GVT-d)" + depends on VFIO_PCI && X86 + default y + help + Support for Intel IGD specific extensions to enable direct + assignment to virtual machines. This includes exposing an IGD + specific firmware table and read-only copies of the host bridge + and LPC bridge config space. + + To enable Intel IGD assignment through vfio-pci, say Y. diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b423a309a6e0..125b58eff936 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -28,6 +28,7 @@ #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/vgaarb.h> +#include <linux/nospec.h> #include "vfio_pci_private.h" @@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data, if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; + info.index = array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vdev->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 759a5bdd40e1..7cd63b0c1a46 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container, } static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, - unsigned long tce, unsigned long size, + unsigned long tce, unsigned long shift, unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) { long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(container->mm, tce, size); + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); if (!mem) return -EINVAL; - ret = mm_iommu_ua_to_hpa(mem, tce, phpa); + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); if (ret) return -EINVAL; @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, &hpa, &mem); if (ret) pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", @@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container, entry + i); ret = tce_iommu_prereg_ua_to_hpa(container, - tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); + tce, tbl->it_page_shift, &hpa, &mem); if (ret) break; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 2c75b33db4ac..3e5b17710a4f 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, struct page *page[1]; struct vm_area_struct *vma; struct vm_area_struct *vmas[1]; + unsigned int flags = 0; int ret; + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + down_read(&mm->mmap_sem); if (mm == current->mm) { - ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), - page, vmas); + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); } else { - unsigned int flags = 0; - - if (prot & IOMMU_WRITE) - flags |= FOLL_WRITE; - - down_read(&mm->mmap_sem); ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, vmas, NULL); /* @@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ret = -EOPNOTSUPP; put_page(page[0]); } - up_read(&mm->mmap_sem); } + up_read(&mm->mmap_sem); if (ret == 1) { *pfn = page_to_pfn(page[0]); @@ -5,7 +5,6 @@ * Implements an efficient asynchronous io interface. * * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. - * Copyright 2018 Christoph Hellwig. * * See ../COPYING for licensing terms. */ @@ -165,22 +164,10 @@ struct fsync_iocb { bool datasync; }; -struct poll_iocb { - struct file *file; - __poll_t events; - struct wait_queue_head *head; - - union { - struct wait_queue_entry wait; - struct work_struct work; - }; -}; - struct aio_kiocb { union { struct kiocb rw; struct fsync_iocb fsync; - struct poll_iocb poll; }; struct kioctx *ki_ctx; @@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)) return -EINVAL; + req->file = fget(iocb->aio_fildes); if (unlikely(!req->file)) return -EBADF; @@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) return 0; } -/* need to use list_del_init so we can check if item was present */ -static inline bool __aio_poll_remove(struct poll_iocb *req) -{ - if (list_empty(&req->wait.entry)) - return false; - list_del_init(&req->wait.entry); - return true; -} - -static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) -{ - fput(iocb->poll.file); - aio_complete(iocb, mangle_poll(mask), 0); -} - -static void aio_poll_work(struct work_struct *work) -{ - struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work); - - if (!list_empty_careful(&iocb->ki_list)) - aio_remove_iocb(iocb); - __aio_poll_complete(iocb, iocb->poll.events); -} - -static int aio_poll_cancel(struct kiocb *iocb) -{ - struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); - struct poll_iocb *req = &aiocb->poll; - struct wait_queue_head *head = req->head; - bool found = false; - - spin_lock(&head->lock); - found = __aio_poll_remove(req); - spin_unlock(&head->lock); - - if (found) { - req->events = 0; - INIT_WORK(&req->work, aio_poll_work); - schedule_work(&req->work); - } - return 0; -} - -static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) -{ - struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); - struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); - struct file *file = req->file; - __poll_t mask = key_to_poll(key); - - assert_spin_locked(&req->head->lock); - - /* for instances that support it check for an event match first: */ - if (mask && !(mask & req->events)) - return 0; - - mask = file->f_op->poll_mask(file, req->events) & req->events; - if (!mask) - return 0; - - __aio_poll_remove(req); - - /* - * Try completing without a context switch if we can acquire ctx_lock - * without spinning. Otherwise we need to defer to a workqueue to - * avoid a deadlock due to the lock order. - */ - if (spin_trylock(&iocb->ki_ctx->ctx_lock)) { - list_del_init(&iocb->ki_list); - spin_unlock(&iocb->ki_ctx->ctx_lock); - - __aio_poll_complete(iocb, mask); - } else { - req->events = mask; - INIT_WORK(&req->work, aio_poll_work); - schedule_work(&req->work); - } - - return 1; -} - -static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) -{ - struct kioctx *ctx = aiocb->ki_ctx; - struct poll_iocb *req = &aiocb->poll; - __poll_t mask; - - /* reject any unknown events outside the normal event mask. */ - if ((u16)iocb->aio_buf != iocb->aio_buf) - return -EINVAL; - /* reject fields that are not defined for poll */ - if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) - return -EINVAL; - - req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; - req->file = fget(iocb->aio_fildes); - if (unlikely(!req->file)) - return -EBADF; - if (!file_has_poll_mask(req->file)) - goto out_fail; - - req->head = req->file->f_op->get_poll_head(req->file, req->events); - if (!req->head) - goto out_fail; - if (IS_ERR(req->head)) { - mask = EPOLLERR; - goto done; - } - - init_waitqueue_func_entry(&req->wait, aio_poll_wake); - aiocb->ki_cancel = aio_poll_cancel; - - spin_lock_irq(&ctx->ctx_lock); - spin_lock(&req->head->lock); - mask = req->file->f_op->poll_mask(req->file, req->events) & req->events; - if (!mask) { - __add_wait_queue(req->head, &req->wait); - list_add_tail(&aiocb->ki_list, &ctx->active_reqs); - } - spin_unlock(&req->head->lock); - spin_unlock_irq(&ctx->ctx_lock); -done: - if (mask) - __aio_poll_complete(aiocb, mask); - return 0; -out_fail: - fput(req->file); - return -EINVAL; /* same as no support for IOCB_CMD_POLL */ -} - static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, bool compat) { @@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, case IOCB_CMD_FDSYNC: ret = aio_fsync(&req->fsync, &iocb, true); break; - case IOCB_CMD_POLL: - ret = aio_poll(req, &iocb); - break; default: pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode); ret = -EINVAL; @@ -2042,6 +1896,11 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, return ret; } +struct __aio_sigset { + const sigset_t __user *sigmask; + size_t sigsetsize; +}; + SYSCALL_DEFINE6(io_pgetevents, aio_context_t, ctx_id, long, min_nr, diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile index 43fedde15c26..1f85d35ec8b7 100644 --- a/fs/autofs/Makefile +++ b/fs/autofs/Makefile @@ -2,6 +2,6 @@ # Makefile for the linux autofs-filesystem routines. # -obj-$(CONFIG_AUTOFS_FS) += autofs.o +obj-$(CONFIG_AUTOFS_FS) += autofs4.o -autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o +autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index ea4ca1445ab7..86eafda4a652 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c @@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) cmd); goto out; } + } else { + unsigned int inr = _IOC_NR(cmd); + + if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD || + inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD || + inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) { + err = -EINVAL; + goto out; + } } err = 0; @@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp, dev_t devid; int err, fd; - /* param->path has already been checked */ + /* param->path has been checked in validate_dev_ioctl() */ + if (!param->openmount.devid) return -EINVAL; @@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp, dev_t devid; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ devid = sbi->sb->s_dev; @@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, unsigned int devid, magic; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ name = param->path; type = param->ismountpoint.in.type; diff --git a/fs/autofs/init.c b/fs/autofs/init.c index cc9447e1903f..79ae07d9592f 100644 --- a/fs/autofs/init.c +++ b/fs/autofs/init.c @@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = { .kill_sb = autofs_kill_sb, }; MODULE_ALIAS_FS("autofs"); -MODULE_ALIAS("autofs4"); +MODULE_ALIAS("autofs"); static int __init init_autofs_fs(void) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0ac456b52bdd..816cc921cf36 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file) goto out_free_ph; } - len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + - ELF_MIN_ALIGN - 1); - bss = eppnt->p_memsz + eppnt->p_vaddr; + len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr); + bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr); if (bss > len) { error = vm_brk(len, bss - len); if (error) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cce6087d6880..b3e45714d28f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) struct extent_map *em; u64 start = page_offset(page); u64 end = start + PAGE_SIZE - 1; - struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; - struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree; + struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); + struct extent_io_tree *tree = &btrfs_inode->io_tree; + struct extent_map_tree *map = &btrfs_inode->extent_tree; if (gfpflags_allow_blocking(mask) && page->mapping->host->i_size > SZ_16M) { @@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) extent_map_end(em) - 1, EXTENT_LOCKED | EXTENT_WRITEBACK, 0, NULL)) { + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &btrfs_inode->runtime_flags); remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); @@ -4542,8 +4545,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, offset_in_extent = em_start - em->start; em_end = extent_map_end(em); em_len = em_end - em_start; - disko = em->block_start + offset_in_extent; flags = 0; + if (em->block_start < EXTENT_MAP_LAST_BYTE) + disko = em->block_start + offset_in_extent; + else + disko = 0; /* * bump off for our next call to get_extent diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9482f0db9d0..eba61bcb9bb3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9005,13 +9005,14 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state); -out_unlock: if (!ret2) { btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true); sb_end_pagefault(inode->i_sb); extent_changeset_free(data_reserved); return VM_FAULT_LOCKED; } + +out_unlock: unlock_page(page); out: btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0)); @@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_idx = 0; u64 root_objectid; int ret; + int ret2; bool root_log_pinned = false; bool dest_log_pinned = false; @@ -9639,7 +9641,8 @@ out_fail: dest_log_pinned = false; } } - ret = btrfs_end_transaction(trans); + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; out_notrans: if (new_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c2837a32d689..b077544b5232 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp) if (pg) { unlock_page(pg); put_page(pg); + cmp->src_pages[i] = NULL; } pg = cmp->dst_pages[i]; if (pg) { unlock_page(pg); put_page(pg); + cmp->dst_pages[i] = NULL; } } } @@ -3577,7 +3579,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, dst, dst_loff, &cmp); if (ret) - goto out_unlock; + goto out_free; loff += BTRFS_MAX_DEDUPE_LEN; dst_loff += BTRFS_MAX_DEDUPE_LEN; @@ -3587,16 +3589,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff, &cmp); +out_free: + kvfree(cmp.src_pages); + kvfree(cmp.dst_pages); + out_unlock: if (same_inode) inode_unlock(src); else btrfs_double_inode_unlock(src, dst); -out_free: - kvfree(cmp.src_pages); - kvfree(cmp.dst_pages); - return ret; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1874a6d2e6f5..c25dc47210a3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2680,8 +2680,10 @@ out: free_extent_buffer(scratch_leaf); } - if (done && !ret) + if (done && !ret) { ret = 1; + fs_info->qgroup_rescan_progress.objectid = (u64)-1; + } return ret; } @@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, if (!init_flags) { /* we're resuming qgroup rescan at mount time */ - if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)) + if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { btrfs_warn(fs_info, "qgroup rescan init failed, qgroup is not enabled"); - else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) + ret = -EINVAL; + } else if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_ON)) { btrfs_warn(fs_info, "qgroup rescan init failed, qgroup rescan is not queued"); - return -EINVAL; + ret = -EINVAL; + } + + if (ret) + return ret; } mutex_lock(&fs_info->qgroup_rescan_lock); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 572306036477..6702896cdb8f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) return ret; } - if (sctx->is_dev_replace && !is_metadata && !have_csum) { - sblocks_for_recheck = NULL; - goto nodatasum_case; - } - /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was @@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) goto out; } - if (!is_metadata && !have_csum) { + /* + * NOTE: Even for nodatasum case, it's still possible that it's a + * compressed data extent, thus scrub_fixup_nodatasum(), which write + * inode page cache onto disk, could cause serious data corruption. + * + * So here we could only read from disk, and hope our recovery could + * reach disk before the newer write. + */ + if (0 && !is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; WARN_ON(sctx->is_dev_replace); -nodatasum_case: - /* * !is_metadata and !have_csum, this means that the data * might not be COWed, that it might be modified diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e034ad9e23b4..1da162928d1a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, { int ret; + mutex_lock(&uuid_mutex); mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; @@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, ret = open_fs_devices(fs_devices, flags, holder); } mutex_unlock(&fs_devices->device_list_mutex); + mutex_unlock(&uuid_mutex); return ret; } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index ee764ac352ab..a866be999216 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) if (IS_ERR(realdn)) { pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", PTR_ERR(realdn), dn, in, ceph_vinop(in)); + dput(dn); dn = realdn; /* note realdn contains the error */ goto out; } else if (realdn) { diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index bd78da59a4fd..c923c7854027 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -423,7 +423,7 @@ struct smb_version_operations { void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, bool *); /* create lease context buffer for CREATE request */ - char * (*create_lease_buf)(u8 *, u8); + char * (*create_lease_buf)(u8 *lease_key, u8 oplock); /* parse lease context buffer and return oplock/epoch info */ __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); ssize_t (*copychunk_range)(const unsigned int, @@ -1416,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server, /* one of these for every pending CIFS request to the server */ struct mid_q_entry { struct list_head qhead; /* mids waiting on reply from this server */ + struct kref refcount; struct TCP_Server_Info *server; /* server corresponding to this mid */ __u64 mid; /* multiplex id */ __u32 pid; /* process id */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 03018be17283..1890f534c88b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern void cifs_delete_mid(struct mid_q_entry *mid); +extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); extern void cifs_wake_up_task(struct mid_q_entry *mid); extern int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index d352da325de3..93408eab92e7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { - wait_event_interruptible_timeout(server->response_q, - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); + rc = wait_event_interruptible_timeout(server->response_q, + (server->tcpStatus != CifsNeedReconnect), + 10 * HZ); + if (rc < 0) { + cifs_dbg(FYI, "%s: aborting reconnect due to a received" + " signal by the process\n", __func__); + return -ERESTARTSYS; + } /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a57da1b88bdf..5df2c0698cda 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -924,6 +924,7 @@ next_pdu: server->pdu_size = next_offset; } + mid_entry = NULL; if (server->ops->is_transform_hdr && server->ops->receive_transform && server->ops->is_transform_hdr(buf)) { @@ -938,8 +939,11 @@ next_pdu: length = mid_entry->receive(server, mid_entry); } - if (length < 0) + if (length < 0) { + if (mid_entry) + cifs_mid_q_entry_release(mid_entry); continue; + } if (server->large_buf) buf = server->bigbuf; @@ -956,6 +960,8 @@ next_pdu: if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); + + cifs_mid_q_entry_release(mid_entry); } else if (server->ops->is_oplock_break && server->ops->is_oplock_break(buf, server)) { cifs_dbg(FYI, "Received oplock break\n"); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index aff8ce8ba34d..646dcd149de1 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) if (compare_mid(mid->mid, buf) && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { + kref_get(&mid->refcount); spin_unlock(&GlobalMid_Lock); return mid; } diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 788412675723..4ed10dd086e6 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, int rc; __le16 *smb2_path; struct smb2_file_all_info *smb2_data = NULL; - __u8 smb2_oplock[17]; + __u8 smb2_oplock; struct cifs_fid *fid = oparms->fid; struct network_resiliency_req nr_ioctl_req; @@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, } oparms->desired_access |= FILE_READ_ATTRIBUTES; - *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; + smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; - if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) - memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE); - - rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL, + rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, NULL); if (rc) goto out; @@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, move_smb2_info_to_cifs(buf, smb2_data); } - *oplock = *smb2_oplock; + *oplock = smb2_oplock; out: kfree(smb2_data); kfree(smb2_path); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0356b5559c71..ea92a38b2f08 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) if ((mid->mid == wire_mid) && (mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->command == shdr->Command)) { + kref_get(&mid->refcount); spin_unlock(&GlobalMid_Lock); return mid; } @@ -855,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, len); + kfree(ea); + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); return rc; @@ -2219,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock) if (!buf) return NULL; - buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); - buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); + memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof @@ -2246,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock) if (!buf) return NULL; - buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); - buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); + memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof @@ -2284,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) return SMB2_OPLOCK_LEVEL_NOCHANGE; if (lease_key) - memcpy(lease_key, &lc->lcontext.LeaseKeyLow, - SMB2_LEASE_KEY_SIZE); + memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); return le32_to_cpu(lc->lcontext.LeaseState); } @@ -2521,7 +2521,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq, if (!tr_hdr) goto err_free_iov; - orig_len = smb2_rqst_len(old_rq, false); + orig_len = smb_rqst_len(server, old_rq); /* fill the 2nd iov with a transform header */ fill_transform_hdr(tr_hdr, orig_len, old_rq); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 810b85787c91..3c92678cb45b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -155,7 +155,7 @@ out: static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) { - int rc = 0; + int rc; struct nls_table *nls_codepage; struct cifs_ses *ses; struct TCP_Server_Info *server; @@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) * for those three - in the calling routine. */ if (tcon == NULL) - return rc; + return 0; if (smb2_command == SMB2_TREE_CONNECT) - return rc; + return 0; if (tcon->tidStatus == CifsExiting) { /* @@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) return -EAGAIN; } - wait_event_interruptible_timeout(server->response_q, - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); + rc = wait_event_interruptible_timeout(server->response_q, + (server->tcpStatus != CifsNeedReconnect), + 10 * HZ); + if (rc < 0) { + cifs_dbg(FYI, "%s: aborting reconnect due to a received" + " signal by the process\n", __func__); + return -ERESTARTSYS; + } /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) @@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) } if (!tcon->ses->need_reconnect && !tcon->need_reconnect) - return rc; + return 0; nls_codepage = load_nls_default(); @@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, return rc; /* BB eventually switch this to SMB2 specific small buf size */ - *request_buf = cifs_small_buf_get(); + if (smb2_command == SMB2_SET_INFO) + *request_buf = cifs_buf_get(); + else + *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; @@ -1707,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, static int add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, - unsigned int *num_iovec, __u8 *oplock) + unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; - iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock); + iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; @@ -2172,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else { - rc = add_lease_context(server, iov, &n_iov, oplock); + rc = add_lease_context(server, iov, &n_iov, + oparms->fid->lease_key, oplock); if (rc) { cifs_small_buf_release(req); kfree(copy_path); @@ -3720,7 +3730,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); - cifs_small_buf_release(req); + cifs_buf_release(req); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) { diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 824dddeee3f2..a671adcc44a6 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -678,16 +678,14 @@ struct create_context { #define SMB2_LEASE_KEY_SIZE 16 struct lease_context { - __le64 LeaseKeyLow; - __le64 LeaseKeyHigh; + u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; __le32 LeaseState; __le32 LeaseFlags; __le64 LeaseDuration; } __packed; struct lease_context_v2 { - __le64 LeaseKeyLow; - __le64 LeaseKeyHigh; + u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; __le32 LeaseState; __le32 LeaseFlags; __le64 LeaseDuration; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 3ae208ac2a77..6e6a4f2ec890 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -113,8 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile, extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); extern void smb2_reconnect_server(struct work_struct *work); extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server); -extern unsigned long -smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker); +extern unsigned long smb_rqst_len(struct TCP_Server_Info *server, + struct smb_rqst *rqst); /* * SMB2 Worker functions - most of protocol specific implementation details diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 51b9437c3c7b..719d55e63d88 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -173,6 +173,8 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) struct kvec *iov = rqst->rq_iov; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; struct cifs_ses *ses; + struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash; + struct smb_rqst drqst; ses = smb2_find_smb_ses(server, shdr->SessionId); if (!ses) { @@ -190,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) } rc = crypto_shash_setkey(server->secmech.hmacsha256, - ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); + ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); return rc; } - rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash); + rc = crypto_shash_init(shash); if (rc) { cifs_dbg(VFS, "%s: Could not init sha256", __func__); return rc; } - rc = __cifs_calc_signature(rqst, server, sigptr, - &server->secmech.sdeschmacsha256->shash); + /* + * For SMB2+, __cifs_calc_signature() expects to sign only the actual + * data, that is, iov[0] should not contain a rfc1002 length. + * + * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to + * __cifs_calc_signature(). + */ + drqst = *rqst; + if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { + rc = crypto_shash_update(shash, iov[0].iov_base, + iov[0].iov_len); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with payload\n", + __func__); + return rc; + } + drqst.rq_iov++; + drqst.rq_nvec--; + } + rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); @@ -408,12 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses) int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - int rc = 0; + int rc; unsigned char smb3_signature[SMB2_CMACAES_SIZE]; unsigned char *sigptr = smb3_signature; struct kvec *iov = rqst->rq_iov; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; struct cifs_ses *ses; + struct shash_desc *shash = &server->secmech.sdesccmacaes->shash; + struct smb_rqst drqst; ses = smb2_find_smb_ses(server, shdr->SessionId); if (!ses) { @@ -425,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); rc = crypto_shash_setkey(server->secmech.cmacaes, - ses->smb3signingkey, SMB2_CMACAES_SIZE); - + ses->smb3signingkey, SMB2_CMACAES_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); return rc; @@ -437,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) * so unlike smb2 case we do not have to check here if secmech are * initialized */ - rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash); + rc = crypto_shash_init(shash); if (rc) { cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); return rc; } - rc = __cifs_calc_signature(rqst, server, sigptr, - &server->secmech.sdesccmacaes->shash); + /* + * For SMB2+, __cifs_calc_signature() expects to sign only the actual + * data, that is, iov[0] should not contain a rfc1002 length. + * + * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to + * __cifs_calc_signature(). + */ + drqst = *rqst; + if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { + rc = crypto_shash_update(shash, iov[0].iov_base, + iov[0].iov_len); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with payload\n", + __func__); + return rc; + } + drqst.rq_iov++; + drqst.rq_nvec--; + } + rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); @@ -548,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); + kref_init(&temp->refcount); temp->mid = le64_to_cpu(shdr->MessageId); temp->pid = current->pid; temp->command = shdr->Command; /* Always LE */ diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 6fd94d9ffac2..c55ea4e6201b 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2083,8 +2083,9 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) * rqst: the data to write * return value: 0 if successfully write, otherwise error code */ -int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) +int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) { + struct smbd_connection *info = server->smbd_conn; struct kvec vec; int nvecs; int size; @@ -2118,7 +2119,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and * ends at page boundary */ - buflen = smb2_rqst_len(rqst, true); + buflen = smb_rqst_len(server, rqst); if (buflen + sizeof(struct smbd_data_transfer) > info->max_fragmented_send_size) { diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h index 1e419c21dc60..a11096254f29 100644 --- a/fs/cifs/smbdirect.h +++ b/fs/cifs/smbdirect.h @@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info); /* Interface for carrying upper layer I/O through send/recv */ int smbd_recv(struct smbd_connection *info, struct msghdr *msg); -int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst); +int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst); enum mr_state { MR_READY, @@ -332,7 +332,7 @@ static inline void *smbd_get_connection( static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } static inline void smbd_destroy(struct smbd_connection *info) {} static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } -static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; } +static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; } #endif #endif diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fb57dfbfb749..a341ec839c83 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); + kref_init(&temp->refcount); temp->mid = get_mid(smb_buffer); temp->pid = current->pid; temp->command = cpu_to_le16(smb_buffer->Command); @@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) return temp; } +static void _cifs_mid_q_entry_release(struct kref *refcount) +{ + struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, + refcount); + + mempool_free(mid, cifs_mid_poolp); +} + +void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) +{ + spin_lock(&GlobalMid_Lock); + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); + spin_unlock(&GlobalMid_Lock); +} + void DeleteMidQEntry(struct mid_q_entry *midEntry) { @@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) } } #endif - mempool_free(midEntry, cifs_mid_poolp); + cifs_mid_q_entry_release(midEntry); } void @@ -202,14 +218,15 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, } unsigned long -smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker) +smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) { unsigned int i; struct kvec *iov; int nvec; unsigned long buflen = 0; - if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) { + if (server->vals->header_preamble_size == 0 && + rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { iov = &rqst->rq_iov[1]; nvec = rqst->rq_nvec - 1; } else { @@ -260,7 +277,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { - rc = smbd_send(server->smbd_conn, rqst); + rc = smbd_send(server, rqst); goto smbd_done; } if (ssocket == NULL) @@ -271,7 +288,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) - send_length += smb2_rqst_len(&rqst[j], true); + send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* Generate a rfc1002 marker for SMB2+ */ diff --git a/fs/eventfd.c b/fs/eventfd.c index ceb1031f1cac..08d3bd602f73 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file) return 0; } -static struct wait_queue_head * -eventfd_get_poll_head(struct file *file, __poll_t events) -{ - struct eventfd_ctx *ctx = file->private_data; - - return &ctx->wqh; -} - -static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) +static __poll_t eventfd_poll(struct file *file, poll_table *wait) { struct eventfd_ctx *ctx = file->private_data; __poll_t events = 0; u64 count; + poll_wait(file, &ctx->wqh, wait); + /* * All writes to ctx->count occur within ctx->wqh.lock. This read * can be done outside ctx->wqh.lock because we know that poll_wait @@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) count = READ_ONCE(ctx->count); if (count > 0) - events |= (EPOLLIN & eventmask); + events |= EPOLLIN; if (count == ULLONG_MAX) events |= EPOLLERR; if (ULLONG_MAX - 1 > count) - events |= (EPOLLOUT & eventmask); + events |= EPOLLOUT; return events; } @@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = { .show_fdinfo = eventfd_show_fdinfo, #endif .release = eventfd_release, - .get_poll_head = eventfd_get_poll_head, - .poll_mask = eventfd_poll_mask, + .poll = eventfd_poll, .read = eventfd_read, .write = eventfd_write, .llseek = noop_llseek, diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ea4436f409fb..67db22fe99c5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head return 0; } -static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file, - __poll_t eventmask) -{ - struct eventpoll *ep = file->private_data; - return &ep->poll_wait; -} - -static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask) +static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) { struct eventpoll *ep = file->private_data; int depth = 0; + /* Insert inside our poll wait queue */ + poll_wait(file, &ep->poll_wait, wait); + /* * Proceed to find out if wanted events are really available inside * the ready list. @@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = { .show_fdinfo = ep_show_fdinfo, #endif .release = ep_eventpoll_release, - .get_poll_head = ep_eventpoll_get_poll_head, - .poll_mask = ep_eventpoll_poll_mask, + .poll = ep_eventpoll_poll, .llseek = noop_llseek, }; diff --git a/fs/exec.c b/fs/exec.c index 2d4e0075bd24..72e961a62adb 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) struct vm_area_struct *vma = NULL; struct mm_struct *mm = bprm->mm; - bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + bprm->vma = vma = vm_area_alloc(mm); if (!vma) return -ENOMEM; @@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) err = -EINTR; goto err_free; } - vma->vm_mm = mm; /* * Place the stack at the largest stack address the architecture @@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - INIT_LIST_HEAD(&vma->anon_vma_chain); err = insert_vm_struct(mm, vma); if (err) @@ -326,7 +324,7 @@ err: up_write(&mm->mmap_sem); err_free: bprm->vma = NULL; - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return err; } diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index b00481c475cb..e68cefe08261 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, unsigned int bit, bit_max; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start, tmp; - int flex_bg = 0; J_ASSERT_BH(bh, buffer_locked(bh)); @@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, start = ext4_group_first_block_no(sb, block_group); - if (ext4_has_feature_flex_bg(sb)) - flex_bg = 1; - /* Set bits for block and inode bitmaps, and inode table */ tmp = ext4_block_bitmap(sb, gdp); - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_bitmap(sb, gdp); - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_table(sb, gdp); for (; tmp < ext4_inode_table(sb, gdp) + sbi->s_itb_per_group; tmp++) { - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); } @@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) goto verify; } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Block bitmap for bg 0 marked " + "uninitialized"); + err = -EFSCORRUPTED; + goto out; + } err = ext4_init_block_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0b127853c584..7c7123f265c2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1114,6 +1114,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ +#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ @@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) { return ino == EXT4_ROOT_INO || - ino == EXT4_USR_QUOTA_INO || - ino == EXT4_GRP_QUOTA_INO || - ino == EXT4_BOOT_LOADER_INO || - ino == EXT4_JOURNAL_INO || - ino == EXT4_RESIZE_INO || (ino >= EXT4_FIRST_INO(sb) && ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); } @@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode, struct iomap; extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); -extern int ext4_try_to_evict_inline_data(handle_t *handle, - struct inode *inode, - int needed); extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); extern int ext4_convert_inline_data(struct inode *inode); diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 98fb0c119c68..adf6668b596f 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -91,6 +91,7 @@ struct ext4_extent_header { }; #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) +#define EXT4_MAX_EXTENT_DEPTH 5 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ (sizeof(struct ext4_extent_header) + \ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0057fe3f248d..8ce6fd5b10dd 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, eh = ext_inode_hdr(inode); depth = ext_depth(inode); + if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { + EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", + depth); + ret = -EFSCORRUPTED; + goto err; + } if (path) { ext4_ext_drop_refs(path); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f525f909b559..fb83750c1a14 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Inode bitmap for bg 0 marked " + "uninitialized"); + err = -EFSCORRUPTED; + goto out; + } memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); @@ -994,7 +1003,8 @@ got: /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 285ed1588730..e55a8bc870bd 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, memset((void *)ext4_raw_inode(&is.iloc)->i_block, 0, EXT4_MIN_INLINE_DATA_SIZE); + memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); if (ext4_has_feature_extents(inode->i_sb)) { if (S_ISDIR(inode->i_mode) || @@ -886,11 +887,11 @@ retry_journal: flags |= AOP_FLAG_NOFS; if (ret == -ENOSPC) { + ext4_journal_stop(handle); ret = ext4_da_convert_inline_data_to_extent(mapping, inode, flags, fsdata); - ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; @@ -1890,42 +1891,6 @@ out: return (error < 0 ? error : 0); } -/* - * Called during xattr set, and if we can sparse space 'needed', - * just create the extent tree evict the data to the outer block. - * - * We use jbd2 instead of page cache to move data to the 1st block - * so that the whole transaction can be committed as a whole and - * the data isn't lost because of the delayed page cache write. - */ -int ext4_try_to_evict_inline_data(handle_t *handle, - struct inode *inode, - int needed) -{ - int error; - struct ext4_xattr_entry *entry; - struct ext4_inode *raw_inode; - struct ext4_iloc iloc; - - error = ext4_get_inode_loc(inode, &iloc); - if (error) - return error; - - raw_inode = ext4_raw_inode(&iloc); - entry = (struct ext4_xattr_entry *)((void *)raw_inode + - EXT4_I(inode)->i_inline_off); - if (EXT4_XATTR_LEN(entry->e_name_len) + - EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) { - error = -ENOSPC; - goto out; - } - - error = ext4_convert_inline_data_nolock(handle, inode, &iloc); -out: - brelse(iloc.bh); - return error; -} - int ext4_inline_data_truncate(struct inode *inode, int *has_inline) { handle_t *handle; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2ea07efbe016..7d6c10017bdf 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func, if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, - "lblock %lu mapped to illegal pblock " + "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, - map->m_len); + map->m_pblk, map->m_len); return -EFSCORRUPTED; } return 0; @@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode, int inodes_per_block, inode_offset; iloc->bh = NULL; - if (!ext4_valid_inum(sb, inode->i_ino)) + if (inode->i_ino < EXT4_ROOT_INO || + inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EFSCORRUPTED; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 6eae2b91aafa..f7ab34088162 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, * initialize bb_free to be able to skip * empty groups without initialization */ - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { @@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, #endif ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 0c4c2201b3aa..ba2396a7bd04 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) static void ext4_handle_error(struct super_block *sb) { + if (test_opt(sb, WARN_ON_ERROR)) + WARN_ON_ONCE(1); + if (sb_rdonly(sb)) return; @@ -740,6 +743,9 @@ __acquires(bitlock) va_end(args); } + if (test_opt(sb, WARN_ON_ERROR)) + WARN_ON_ONCE(1); + if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; @@ -1371,7 +1377,8 @@ enum { Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, - Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, + Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, + Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, @@ -1438,6 +1445,8 @@ static const match_table_t tokens = { {Opt_dax, "dax"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, + {Opt_warn_on_error, "warn_on_error"}, + {Opt_nowarn_on_error, "nowarn_on_error"}, {Opt_lazytime, "lazytime"}, {Opt_nolazytime, "nolazytime"}, {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, @@ -1602,6 +1611,8 @@ static const struct mount_opts { MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR}, + {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, + {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, @@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; @@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (block_bitmap >= sb_block + 1 && + block_bitmap <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " @@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (inode_bitmap >= sb_block + 1 && + inode_bitmap <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " @@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (inode_table >= sb_block + 1 && + inode_table <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode table for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " @@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; + if (!ext4_has_group_desc_csum(sb)) + return ngroups; + for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) + continue; + if (group != 0) break; + ext4_error(sb, "Inode table for bg 0 marked as " + "needing zeroing"); + if (sb_rdonly(sb)) + return ngroups; } return group; @@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) le32_to_cpu(es->s_log_block_size)); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { ext4_msg(sb, KERN_ERR, @@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", + sbi->s_first_ino); + goto failed_mount; + } if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { @@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "block size (%d)", clustersize, blocksize); goto failed_mount; } - if (le32_to_cpu(es->s_log_cluster_size) > - (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { - ext4_msg(sb, KERN_ERR, - "Invalid log cluster size: %u", - le32_to_cpu(es->s_log_cluster_size)); - goto failed_mount; - } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = @@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } } else { if (clustersize != blocksize) { - ext4_warning(sb, "fragment/cluster size (%d) != " - "block size (%d)", clustersize, - blocksize); - clustersize = blocksize; + ext4_msg(sb, KERN_ERR, + "fragment/cluster size (%d) != " + "block size (%d)", clustersize, blocksize); + goto failed_mount; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, @@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_blocks_count(es)); goto failed_mount; } + if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && + (sbi->s_cluster_ratio == 1)) { + ext4_msg(sb, KERN_WARNING, "bad geometry: first data " + "block is 0 with a 1k block and cluster size"); + goto failed_mount; + } + blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); @@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ret = -ENOMEM; goto failed_mount; } + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != + le32_to_cpu(es->s_inodes_count)) { + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", + le32_to_cpu(es->s_inodes_count), + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); + ret = -EINVAL; + goto failed_mount; + } bgl_lock_init(sbi->s_blockgroup_lock); @@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; + + /* + * The superblock bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(sbh)) + return error; + /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fc4ced59c565..723df14f4084 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, { int error = -EFSCORRUPTED; - if (buffer_verified(bh)) - return 0; - if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) goto errout; + if (buffer_verified(bh)) + return 0; + error = -EFSBADCRC; if (!ext4_xattr_block_csum_verify(inode, bh)) goto errout; @@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, handle_t *handle, struct inode *inode, bool is_block) { - struct ext4_xattr_entry *last; + struct ext4_xattr_entry *last, *next; struct ext4_xattr_entry *here = s->here; size_t min_offs = s->end - s->base, name_len = strlen(i->name); int in_inode = i->in_inode; @@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, /* Compute min_offs and last. */ last = s->first; - for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + for (; !IS_LAST_ENTRY(last); last = next) { + next = EXT4_XATTR_NEXT(last); + if ((void *)next >= s->end) { + EXT4_ERROR_INODE(inode, "corrupted xattr entries"); + ret = -EFSCORRUPTED; + goto out; + } if (!last->e_value_inum && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) @@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, if (EXT4_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); - if (error) { - if (error == -ENOSPC && - ext4_has_inline_data(inode)) { - error = ext4_try_to_evict_inline_data(handle, inode, - EXT4_XATTR_LEN(strlen(i->name) + - EXT4_XATTR_SIZE(i->value_len))); - if (error) - return error; - error = ext4_xattr_ibody_find(inode, i, is); - if (error) - return error; - error = ext4_xattr_set_entry(i, s, handle, inode, - false /* is_block */); - } - if (error) - return error; - } + if (error) + return error; header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); @@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, last = IFIRST(header); /* Find the entry best suited to be pushed into EA block */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + /* never move system.data out of the inode */ + if ((last->e_name_len == 4) && + (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && + !memcmp(last->e_name, "data", 4)) + continue; total_size = EXT4_XATTR_LEN(last->e_name_len); if (!last->e_value_inum) total_size += EXT4_XATTR_SIZE( diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 065dc919a0ce..bfd589ea74c0 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb, brelse(bh); } +static void fat_reset_iocharset(struct fat_mount_options *opts) +{ + if (opts->iocharset != fat_default_iocharset) { + /* Note: opts->iocharset can be NULL here */ + kfree(opts->iocharset); + opts->iocharset = fat_default_iocharset; + } +} + static void delayed_free(struct rcu_head *p) { struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); unload_nls(sbi->nls_disk); unload_nls(sbi->nls_io); - if (sbi->options.iocharset != fat_default_iocharset) - kfree(sbi->options.iocharset); + fat_reset_iocharset(&sbi->options); kfree(sbi); } @@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, opts->fs_fmask = opts->fs_dmask = current_umask(); opts->allow_utime = -1; opts->codepage = fat_default_codepage; - opts->iocharset = fat_default_iocharset; + fat_reset_iocharset(opts); if (is_vfat) { opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; opts->rodir = 0; @@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, /* vfat specific */ case Opt_charset: - if (opts->iocharset != fat_default_iocharset) - kfree(opts->iocharset); + fat_reset_iocharset(opts); iocharset = match_strdup(&args[0]); if (!iocharset) return -ENOMEM; @@ -1881,8 +1888,7 @@ out_fail: iput(fat_inode); unload_nls(sbi->nls_io); unload_nls(sbi->nls_disk); - if (sbi->options.iocharset != fat_default_iocharset) - kfree(sbi->options.iocharset); + fat_reset_iocharset(&sbi->options); sb->s_fs_info = NULL; kfree(sbi); return error; diff --git a/fs/inode.c b/fs/inode.c index 2c300e981796..8c86c809ca17 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir, inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; + + /* Directories are special, and always inherit S_ISGID */ if (S_ISDIR(mode)) mode |= S_ISGID; + else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && + !in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(dir, CAP_FSETID)) + mode &= ~S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; diff --git a/fs/internal.h b/fs/internal.h index 980d005b21b4..5645b4ebf494 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, extern int open_check_o_direct(struct file *f); extern int vfs_open(const struct path *, struct file *, const struct cred *); -extern struct file *filp_clone_open(struct file *); /* * inode.c diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 51dd68e67b0f..c0b66a7a795b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) if (jh->b_transaction == transaction && jh->b_jlist != BJ_Metadata) { jbd_lock_bh_state(bh); + if (jh->b_transaction == transaction && + jh->b_jlist != BJ_Metadata) + pr_err("JBD2: assertion failure: h_type=%u " + "h_line_no=%u block_no=%llu jlist=%u\n", + handle->h_type, handle->h_line_no, + (unsigned long long) bh->b_blocknr, + jh->b_jlist); J_ASSERT_JH(jh, jh->b_transaction != transaction || jh->b_jlist == BJ_Metadata); jbd_unlock_bh_state(bh); @@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) * of the transaction. This needs to be done * once a transaction -bzzz */ - jh->b_modified = 1; if (handle->h_buffer_credits <= 0) { ret = -ENOSPC; goto out_unlock_bh; } + jh->b_modified = 1; handle->h_buffer_credits--; } diff --git a/fs/pipe.c b/fs/pipe.c index bb0840e234f3..39d6f431da83 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } } -static struct wait_queue_head * -pipe_get_poll_head(struct file *filp, __poll_t events) -{ - struct pipe_inode_info *pipe = filp->private_data; - - return &pipe->wait; -} - /* No kernel lock held - fine */ -static __poll_t pipe_poll_mask(struct file *filp, __poll_t events) +static __poll_t +pipe_poll(struct file *filp, poll_table *wait) { + __poll_t mask; struct pipe_inode_info *pipe = filp->private_data; - int nrbufs = pipe->nrbufs; - __poll_t mask = 0; + int nrbufs; + + poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ + nrbufs = pipe->nrbufs; + mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) @@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = { .llseek = no_llseek, .read_iter = pipe_read, .write_iter = pipe_write, - .get_poll_head = pipe_get_poll_head, - .poll_mask = pipe_poll_mask, + .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .release = pipe_release, .fasync = pipe_fasync, diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 6ac1c92997ea..bb1c1625b158 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file) return seq_open(file, de->seq_ops); } +static int proc_seq_release(struct inode *inode, struct file *file) +{ + struct proc_dir_entry *de = PDE(inode); + + if (de->state_size) + return seq_release_private(inode, file); + return seq_release(inode, file); +} + static const struct file_operations proc_seq_fops = { .open = proc_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = proc_seq_release, }; struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e9679016271f..dfd73a4616ce 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); SEQ_PUT_DEC(" kB\nSwapPss: ", mss->swap_pss >> PSS_SHIFT); - SEQ_PUT_DEC(" kB\nLocked: ", mss->pss >> PSS_SHIFT); + SEQ_PUT_DEC(" kB\nLocked: ", + mss->pss_locked >> PSS_SHIFT); seq_puts(m, " kB\n"); } if (!rollup_mode) { diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 7e288d97adcb..9fed1c05f1f4 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key) } /* %k */ -static void sprintf_le_key(char *buf, struct reiserfs_key *key) +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id), - le32_to_cpu(key->k_objectid), le_offset(key), - le_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + le32_to_cpu(key->k_dir_id), + le32_to_cpu(key->k_objectid), le_offset(key), + le_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } /* %K */ -static void sprintf_cpu_key(char *buf, struct cpu_key *key) +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id, - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key), - cpu_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + key->on_disk_key.k_dir_id, + key->on_disk_key.k_objectid, + reiserfs_cpu_offset(key), cpu_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh) +static int scnprintf_de_head(char *buf, size_t size, + struct reiserfs_de_head *deh) { if (deh) - sprintf(buf, - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh), - deh_location(deh), deh_state(deh)); + return scnprintf(buf, size, + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", + deh_offset(deh), deh_dir_id(deh), + deh_objectid(deh), deh_location(deh), + deh_state(deh)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_item_head(char *buf, struct item_head *ih) +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih) { if (ih) { - strcpy(buf, - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*"); - sprintf_le_key(buf + strlen(buf), &(ih->ih_key)); - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, " - "free_space(entry_count) %d", - ih_item_len(ih), ih_location(ih), ih_free_space(ih)); + char *p = buf; + char * const end = buf + size; + + p += scnprintf(p, end - p, "%s", + (ih_version(ih) == KEY_FORMAT_3_6) ? + "*3.6* " : "*3.5*"); + + p += scnprintf_le_key(p, end - p, &ih->ih_key); + + p += scnprintf(p, end - p, + ", item_len %d, item_location %d, free_space(entry_count) %d", + ih_item_len(ih), ih_location(ih), + ih_free_space(ih)); + return p - buf; } else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de) +static int scnprintf_direntry(char *buf, size_t size, + struct reiserfs_dir_entry *de) { char name[20]; memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen); name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0; - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid); + return scnprintf(buf, size, "\"%s\"==>[%d %d]", + name, de->de_dir_id, de->de_objectid); } -static void sprintf_block_head(char *buf, struct buffer_head *bh) +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ", - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); + return scnprintf(buf, size, + "level=%d, nr_items=%d, free_space=%d rdkey ", + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); } -static void sprintf_buffer_head(char *buf, struct buffer_head *bh) +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", - bh->b_bdev, bh->b_size, - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), - bh->b_state, bh->b_page, - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", - buffer_dirty(bh) ? "DIRTY" : "CLEAN", - buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); + return scnprintf(buf, size, + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", + bh->b_bdev, bh->b_size, + (unsigned long long)bh->b_blocknr, + atomic_read(&(bh->b_count)), + bh->b_state, bh->b_page, + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", + buffer_dirty(bh) ? "DIRTY" : "CLEAN", + buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); } -static void sprintf_disk_child(char *buf, struct disk_child *dc) +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc) { - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc), - dc_size(dc)); + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]", + dc_block_number(dc), dc_size(dc)); } static char *is_there_reiserfs_struct(char *fmt, int *what) @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args) char *fmt1 = fmt_buf; char *k; char *p = error_buf; + char * const end = &error_buf[sizeof(error_buf)]; int what; spin_lock(&error_lock); - strcpy(fmt1, fmt); + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) { + strscpy(error_buf, "format string too long", end - error_buf); + goto out_unlock; + } while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { *k = 0; - p += vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); switch (what) { case 'k': - sprintf_le_key(p, va_arg(args, struct reiserfs_key *)); + p += scnprintf_le_key(p, end - p, + va_arg(args, struct reiserfs_key *)); break; case 'K': - sprintf_cpu_key(p, va_arg(args, struct cpu_key *)); + p += scnprintf_cpu_key(p, end - p, + va_arg(args, struct cpu_key *)); break; case 'h': - sprintf_item_head(p, va_arg(args, struct item_head *)); + p += scnprintf_item_head(p, end - p, + va_arg(args, struct item_head *)); break; case 't': - sprintf_direntry(p, - va_arg(args, - struct reiserfs_dir_entry *)); + p += scnprintf_direntry(p, end - p, + va_arg(args, struct reiserfs_dir_entry *)); break; case 'y': - sprintf_disk_child(p, - va_arg(args, struct disk_child *)); + p += scnprintf_disk_child(p, end - p, + va_arg(args, struct disk_child *)); break; case 'z': - sprintf_block_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_block_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'b': - sprintf_buffer_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_buffer_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'a': - sprintf_de_head(p, - va_arg(args, - struct reiserfs_de_head *)); + p += scnprintf_de_head(p, end - p, + va_arg(args, struct reiserfs_de_head *)); break; } - p += strlen(p); fmt1 = k + 2; } - vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); +out_unlock: spin_unlock(&error_lock); } diff --git a/fs/select.c b/fs/select.c index 317891ff8165..4a6b6e4b21cb 100644 --- a/fs/select.c +++ b/fs/select.c @@ -34,29 +34,6 @@ #include <linux/uaccess.h> -__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) -{ - if (file->f_op->poll) { - return file->f_op->poll(file, pt); - } else if (file_has_poll_mask(file)) { - unsigned int events = poll_requested_events(pt); - struct wait_queue_head *head; - - if (pt && pt->_qproc) { - head = file->f_op->get_poll_head(file, events); - if (!head) - return DEFAULT_POLLMASK; - if (IS_ERR(head)) - return EPOLLERR; - pt->_qproc(file, head, pt); - } - - return file->f_op->poll_mask(file, events); - } else { - return DEFAULT_POLLMASK; - } -} -EXPORT_SYMBOL_GPL(vfs_poll); /* * Estimate expected accuracy in ns from a timeval. diff --git a/fs/timerfd.c b/fs/timerfd.c index d84a2bee4f82..cdad49da3ff7 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file) kfree_rcu(ctx, rcu); return 0; } - -static struct wait_queue_head *timerfd_get_poll_head(struct file *file, - __poll_t eventmask) + +static __poll_t timerfd_poll(struct file *file, poll_table *wait) { struct timerfd_ctx *ctx = file->private_data; + __poll_t events = 0; + unsigned long flags; - return &ctx->wqh; -} + poll_wait(file, &ctx->wqh, wait); -static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask) -{ - struct timerfd_ctx *ctx = file->private_data; + spin_lock_irqsave(&ctx->wqh.lock, flags); + if (ctx->ticks) + events |= EPOLLIN; + spin_unlock_irqrestore(&ctx->wqh.lock, flags); - return ctx->ticks ? EPOLLIN : 0; + return events; } static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, @@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg static const struct file_operations timerfd_fops = { .release = timerfd_release, - .get_poll_head = timerfd_get_poll_head, - .poll_mask = timerfd_poll_mask, + .poll = timerfd_poll, .read = timerfd_read, .llseek = noop_llseek, .show_fdinfo = timerfd_show, diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 123bf7d516fc..594d192b2331 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, unsigned long reason) { struct mm_struct *mm = ctx->mm; - pte_t *pte; + pte_t *ptep, pte; bool ret = true; VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); - if (!pte) + ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); + + if (!ptep) goto out; ret = false; + pte = huge_ptep_get(ptep); /* * Lockless access: we're in a wait_event so it's ok if it * changes under us. */ - if (huge_pte_none(*pte)) + if (huge_pte_none(pte)) ret = true; - if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP)) + if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) ret = true; out: return ret; diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 84db76e0e3e3..fecd187fcf2c 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -157,6 +157,7 @@ __xfs_ag_resv_free( error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); resv->ar_reserved = 0; resv->ar_asked = 0; + resv->ar_orig_reserved = 0; if (error) trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno, @@ -189,13 +190,34 @@ __xfs_ag_resv_init( struct xfs_mount *mp = pag->pag_mount; struct xfs_ag_resv *resv; int error; - xfs_extlen_t reserved; + xfs_extlen_t hidden_space; if (used > ask) ask = used; - reserved = ask - used; - error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true); + switch (type) { + case XFS_AG_RESV_RMAPBT: + /* + * Space taken by the rmapbt is not subtracted from fdblocks + * because the rmapbt lives in the free space. Here we must + * subtract the entire reservation from fdblocks so that we + * always have blocks available for rmapbt expansion. + */ + hidden_space = ask; + break; + case XFS_AG_RESV_METADATA: + /* + * Space taken by all other metadata btrees are accounted + * on-disk as used space. We therefore only hide the space + * that is reserved but not used by the trees. + */ + hidden_space = ask - used; + break; + default: + ASSERT(0); + return -EINVAL; + } + error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); if (error) { trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, error, _RET_IP_); @@ -216,7 +238,8 @@ __xfs_ag_resv_init( resv = xfs_perag_resv(pag, type); resv->ar_asked = ask; - resv->ar_reserved = resv->ar_orig_reserved = reserved; + resv->ar_orig_reserved = hidden_space; + resv->ar_reserved = ask - used; trace_xfs_ag_resv_init(pag, type, ask); return 0; diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 01628f0c9a0c..7205268b30bc 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -5780,6 +5780,32 @@ del_cursor: return error; } +/* Make sure we won't be right-shifting an extent past the maximum bound. */ +int +xfs_bmap_can_insert_extents( + struct xfs_inode *ip, + xfs_fileoff_t off, + xfs_fileoff_t shift) +{ + struct xfs_bmbt_irec got; + int is_empty; + int error = 0; + + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); + if (!error && !is_empty && got.br_startoff >= off && + ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) + error = -EINVAL; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + return error; +} + int xfs_bmap_insert_extents( struct xfs_trans *tp, diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 99dddbd0fcc6..9b49ddf99c41 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done, xfs_fsblock_t *firstblock, struct xfs_defer_ops *dfops); +int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off, + xfs_fileoff_t shift); int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock, diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 1c5a8aaf2bfc..059bc44c27e8 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt { XFS_DFORK_DSIZE(dip, mp) : \ XFS_DFORK_ASIZE(dip, mp)) +#define XFS_DFORK_MAXEXT(dip, mp, w) \ + (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec)) + /* * Return pointers to the data or attribute forks. */ @@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block { #define BMBT_STARTBLOCK_BITLEN 52 #define BMBT_BLOCKCOUNT_BITLEN 21 +#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1) + typedef struct xfs_bmbt_rec { __be64 l0, l1; } xfs_bmbt_rec_t; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index d38d724534c4..33dc34655ac3 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -374,6 +374,47 @@ xfs_log_dinode_to_disk( } } +static xfs_failaddr_t +xfs_dinode_verify_fork( + struct xfs_dinode *dip, + struct xfs_mount *mp, + int whichfork) +{ + uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + + switch (XFS_DFORK_FORMAT(dip, whichfork)) { + case XFS_DINODE_FMT_LOCAL: + /* + * no local regular files yet + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISREG(be16_to_cpu(dip->di_mode))) + return __this_address; + if (be64_to_cpu(dip->di_size) > + XFS_DFORK_SIZE(dip, mp, whichfork)) + return __this_address; + } + if (di_nextents) + return __this_address; + break; + case XFS_DINODE_FMT_EXTENTS: + if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork)) + return __this_address; + break; + case XFS_DINODE_FMT_BTREE: + if (whichfork == XFS_ATTR_FORK) { + if (di_nextents > MAXAEXTNUM) + return __this_address; + } else if (di_nextents > MAXEXTNUM) { + return __this_address; + } + break; + default: + return __this_address; + } + return NULL; +} + xfs_failaddr_t xfs_dinode_verify( struct xfs_mount *mp, @@ -441,24 +482,9 @@ xfs_dinode_verify( case S_IFREG: case S_IFLNK: case S_IFDIR: - switch (dip->di_format) { - case XFS_DINODE_FMT_LOCAL: - /* - * no local regular files yet - */ - if (S_ISREG(mode)) - return __this_address; - if (di_size > XFS_DFORK_DSIZE(dip, mp)) - return __this_address; - if (dip->di_nextents) - return __this_address; - /* fall through */ - case XFS_DINODE_FMT_EXTENTS: - case XFS_DINODE_FMT_BTREE: - break; - default: - return __this_address; - } + fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK); + if (fa) + return fa; break; case 0: /* Uninitialized inode ok. */ @@ -468,17 +494,9 @@ xfs_dinode_verify( } if (XFS_DFORK_Q(dip)) { - switch (dip->di_aformat) { - case XFS_DINODE_FMT_LOCAL: - if (dip->di_anextents) - return __this_address; - /* fall through */ - case XFS_DINODE_FMT_EXTENTS: - case XFS_DINODE_FMT_BTREE: - break; - default: - return __this_address; - } + fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK); + if (fa) + return fa; } else { /* * If there is no fork offset, this may be a freshly-made inode diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index 65fc4ed2e9a1..b228c821bae6 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range( if (low_rec->ar_startext >= mp->m_sb.sb_rextents || low_rec->ar_startext == high_rec->ar_startext) return 0; - if (high_rec->ar_startext >= mp->m_sb.sb_rextents) - high_rec->ar_startext = mp->m_sb.sb_rextents - 1; + if (high_rec->ar_startext > mp->m_sb.sb_rextents) + high_rec->ar_startext = mp->m_sb.sb_rextents; /* Iterate the bitmap, looking for discrepancies. */ rtstart = low_rec->ar_startext; diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index c35009a86699..83b1e8c6c18f 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -685,12 +685,10 @@ out_unlock_iolock: } /* - * dead simple method of punching delalyed allocation blocks from a range in - * the inode. Walks a block at a time so will be slow, but is only executed in - * rare error cases so the overhead is not critical. This will always punch out - * both the start and end blocks, even if the ranges only partially overlap - * them, so it is up to the caller to ensure that partial blocks are not - * passed in. + * Dead simple method of punching delalyed allocation blocks from a range in + * the inode. This will always punch out both the start and end blocks, even + * if the ranges only partially overlap them, so it is up to the caller to + * ensure that partial blocks are not passed in. */ int xfs_bmap_punch_delalloc_range( @@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range( xfs_fileoff_t start_fsb, xfs_fileoff_t length) { - xfs_fileoff_t remaining = length; + struct xfs_ifork *ifp = &ip->i_df; + xfs_fileoff_t end_fsb = start_fsb + length; + struct xfs_bmbt_irec got, del; + struct xfs_iext_cursor icur; int error = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - do { - int done; - xfs_bmbt_irec_t imap; - int nimaps = 1; - xfs_fsblock_t firstblock; - struct xfs_defer_ops dfops; + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); + if (error) + return error; + } - /* - * Map the range first and check that it is a delalloc extent - * before trying to unmap the range. Otherwise we will be - * trying to remove a real extent (which requires a - * transaction) or a hole, which is probably a bad idea... - */ - error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, - XFS_BMAPI_ENTIRE); + if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) + return 0; - if (error) { - /* something screwed, just bail */ - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_alert(ip->i_mount, - "Failed delalloc mapping lookup ino %lld fsb %lld.", - ip->i_ino, start_fsb); - } - break; - } - if (!nimaps) { - /* nothing there */ - goto next_block; - } - if (imap.br_startblock != DELAYSTARTBLOCK) { - /* been converted, ignore */ - goto next_block; - } - WARN_ON(imap.br_blockcount == 0); + while (got.br_startoff + got.br_blockcount > start_fsb) { + del = got; + xfs_trim_extent(&del, start_fsb, length); /* - * Note: while we initialise the firstblock/dfops pair, they - * should never be used because blocks should never be - * allocated or freed for a delalloc extent and hence we need - * don't cancel or finish them after the xfs_bunmapi() call. + * A delete can push the cursor forward. Step back to the + * previous extent on non-delalloc or extents outside the + * target range. */ - xfs_defer_init(&dfops, &firstblock); - error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, - &dfops, &done); - if (error) - break; + if (!del.br_blockcount || + !isnullstartblock(del.br_startblock)) { + if (!xfs_iext_prev_extent(ifp, &icur, &got)) + break; + continue; + } - ASSERT(!xfs_defer_has_unfinished_work(&dfops)); -next_block: - start_fsb++; - remaining--; - } while(remaining > 0); + error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, + &got, &del); + if (error || !xfs_iext_get_extent(ifp, &icur, &got)) + break; + } return error; } @@ -1208,7 +1187,22 @@ xfs_free_file_space( return 0; if (offset + len > XFS_ISIZE(ip)) len = XFS_ISIZE(ip) - offset; - return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); + error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); + if (error) + return error; + + /* + * If we zeroed right up to EOF and EOF straddles a page boundary we + * must make sure that the post-EOF area is also zeroed because the + * page could be mmap'd and iomap_zero_range doesn't do that for us. + * Writeback of the eof page will do this, albeit clumsily. + */ + if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) { + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, + (offset + len) & ~PAGE_MASK, LLONG_MAX); + } + + return error; } /* @@ -1404,6 +1398,10 @@ xfs_insert_file_space( trace_xfs_insert_file_space(ip); + error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb); + if (error) + return error; + error = xfs_prepare_shift(ip, offset); if (error) return error; diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index c34fa9c342f2..c7157bc48bd1 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query( struct xfs_trans *tp, struct xfs_getfsmap_info *info) { - struct xfs_rtalloc_rec alow; - struct xfs_rtalloc_rec ahigh; + struct xfs_rtalloc_rec alow = { 0 }; + struct xfs_rtalloc_rec ahigh = { 0 }; int error; xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a7afcad6b711..3f2bd6032cf8 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -387,7 +387,7 @@ xfs_reserve_blocks( do { free = percpu_counter_sum(&mp->m_fdblocks) - mp->m_alloc_set_aside; - if (!free) + if (free <= 0) break; delta = request - mp->m_resblks; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7a96c4e0ab5c..5df4de666cc1 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3236,7 +3236,6 @@ xfs_iflush_cluster( struct xfs_inode *cip; int nr_found; int clcount = 0; - int bufwasdelwri; int i; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); @@ -3360,37 +3359,22 @@ cluster_corrupt_out: * inode buffer and shut down the filesystem. */ rcu_read_unlock(); - /* - * Clean up the buffer. If it was delwri, just release it -- - * brelse can handle it with no problems. If not, shut down the - * filesystem before releasing the buffer. - */ - bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); - if (bufwasdelwri) - xfs_buf_relse(bp); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - if (!bufwasdelwri) { - /* - * Just like incore_relse: if we have b_iodone functions, - * mark the buffer as an error and call them. Otherwise - * mark it as stale and brelse. - */ - if (bp->b_iodone) { - bp->b_flags &= ~XBF_DONE; - xfs_buf_stale(bp); - xfs_buf_ioerror(bp, -EIO); - xfs_buf_ioend(bp); - } else { - xfs_buf_stale(bp); - xfs_buf_relse(bp); - } - } - /* - * Unlocks the flush lock + * We'll always have an inode attached to the buffer for completion + * process by the time we are called from xfs_iflush(). Hence we have + * always need to do IO completion processing to abort the inodes + * attached to the buffer. handle them just like the shutdown case in + * xfs_buf_submit(). */ + ASSERT(bp->b_iodone); + bp->b_flags &= ~XBF_DONE; + xfs_buf_stale(bp); + xfs_buf_ioerror(bp, -EIO); + xfs_buf_ioend(bp); + + /* abort the corrupt inode, as it was not attached to the buffer */ xfs_iflush_abort(cip, false); kmem_free(cilist); xfs_perag_put(pag); @@ -3486,12 +3470,17 @@ xfs_iflush( xfs_log_force(mp, 0); /* - * inode clustering: - * see if other inodes can be gathered into this write + * inode clustering: try to gather other inodes into this write + * + * Note: Any error during clustering will result in the filesystem + * being shut down and completion callbacks run on the cluster buffer. + * As we have already flushed and attached this inode to the buffer, + * it has already been aborted and released by xfs_iflush_cluster() and + * so we have no further error handling to do here. */ error = xfs_iflush_cluster(ip, bp); if (error) - goto cluster_corrupt_out; + return error; *bpp = bp; return 0; @@ -3500,12 +3489,8 @@ corrupt_out: if (bp) xfs_buf_relse(bp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); -cluster_corrupt_out: - error = -EFSCORRUPTED; abort_out: - /* - * Unlocks the flush lock - */ + /* abort the corrupt inode, as it was not attached to the buffer */ xfs_iflush_abort(ip, false); return error; } diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 49f5492eed3b..55876dd02f0c 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -963,12 +963,13 @@ xfs_ilock_for_iomap( unsigned *lockmode) { unsigned mode = XFS_ILOCK_SHARED; + bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO); /* * COW writes may allocate delalloc space or convert unwritten COW * extents, so we need to make sure to take the lock exclusively here. */ - if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) { + if (xfs_is_reflink_inode(ip) && is_write) { /* * FIXME: It could still overwrite on unshared extents and not * need allocation. @@ -989,6 +990,7 @@ xfs_ilock_for_iomap( mode = XFS_ILOCK_EXCL; } +relock: if (flags & IOMAP_NOWAIT) { if (!xfs_ilock_nowait(ip, mode)) return -EAGAIN; @@ -996,6 +998,17 @@ xfs_ilock_for_iomap( xfs_ilock(ip, mode); } + /* + * The reflink iflag could have changed since the earlier unlocked + * check, so if we got ILOCK_SHARED for a write and but we're now a + * reflink inode we have to switch to ILOCK_EXCL and relock. + */ + if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) { + xfs_iunlock(ip, mode); + mode = XFS_ILOCK_EXCL; + goto relock; + } + *lockmode = mode; return 0; } diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index e040af120b69..524f543c5b82 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -258,7 +258,12 @@ xfs_trans_alloc( if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); - WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); + /* + * Zero-reservation ("empty") transactions can't modify anything, so + * they're allowed to run while we're frozen. + */ + WARN_ON(resp->tr_logres > 0 && + mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); atomic_inc(&mp->m_active_trans); tp = kmem_zone_zalloc(xfs_trans_zone, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index faddde44de8c..3063125197ad 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, * For now w.r.t page table cache, mark the range_size as PAGE_SIZE */ +#ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) +#endif +#ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) +#endif #ifndef __ARCH_HAS_4LEVEL_HACK +#ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif +#endif #ifndef __ARCH_HAS_5LEVEL_HACK +#ifndef p4d_free_tlb #define p4d_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif +#endif #define tlb_migrate_finish(mm) do {} while (0) diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index cc414db9da0a..482461d8931d 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(struct crypto_async_request *_req, int err); -__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen); int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h index 9564597cbfac..0aa1d9c3e0b9 100644 --- a/include/dt-bindings/clock/imx6ul-clock.h +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -235,27 +235,25 @@ #define IMX6UL_CLK_CSI_PODF 222 #define IMX6UL_CLK_PLL3_120M 223 #define IMX6UL_CLK_KPP 224 -#define IMX6UL_CLK_CKO1_SEL 225 -#define IMX6UL_CLK_CKO1_PODF 226 -#define IMX6UL_CLK_CKO1 227 -#define IMX6UL_CLK_CKO2_SEL 228 -#define IMX6UL_CLK_CKO2_PODF 229 -#define IMX6UL_CLK_CKO2 230 -#define IMX6UL_CLK_CKO 231 - -/* For i.MX6ULL */ -#define IMX6ULL_CLK_ESAI_PRED 232 -#define IMX6ULL_CLK_ESAI_PODF 233 -#define IMX6ULL_CLK_ESAI_EXTAL 234 -#define IMX6ULL_CLK_ESAI_MEM 235 -#define IMX6ULL_CLK_ESAI_IPG 236 -#define IMX6ULL_CLK_DCP_CLK 237 -#define IMX6ULL_CLK_EPDC_PRE_SEL 238 -#define IMX6ULL_CLK_EPDC_SEL 239 -#define IMX6ULL_CLK_EPDC_PODF 240 -#define IMX6ULL_CLK_EPDC_ACLK 241 -#define IMX6ULL_CLK_EPDC_PIX 242 -#define IMX6ULL_CLK_ESAI_SEL 243 +#define IMX6ULL_CLK_ESAI_PRED 225 +#define IMX6ULL_CLK_ESAI_PODF 226 +#define IMX6ULL_CLK_ESAI_EXTAL 227 +#define IMX6ULL_CLK_ESAI_MEM 228 +#define IMX6ULL_CLK_ESAI_IPG 229 +#define IMX6ULL_CLK_DCP_CLK 230 +#define IMX6ULL_CLK_EPDC_PRE_SEL 231 +#define IMX6ULL_CLK_EPDC_SEL 232 +#define IMX6ULL_CLK_EPDC_PODF 233 +#define IMX6ULL_CLK_EPDC_ACLK 234 +#define IMX6ULL_CLK_EPDC_PIX 235 +#define IMX6ULL_CLK_ESAI_SEL 236 +#define IMX6UL_CLK_CKO1_SEL 237 +#define IMX6UL_CLK_CKO1_PODF 238 +#define IMX6UL_CLK_CKO1 239 +#define IMX6UL_CLK_CKO2_SEL 240 +#define IMX6UL_CLK_CKO2_PODF 241 +#define IMX6UL_CLK_CKO2 242 +#define IMX6UL_CLK_CKO 243 #define IMX6UL_CLK_END 244 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4b35a66383f9..e54f40974eb0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level); + int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9154570edf29..79226ca8f80f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, if (!q->limits.chunk_sectors) return q->limits.max_sectors; - return q->limits.chunk_sectors - - (offset & (q->limits.chunk_sectors - 1)); + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); } static inline unsigned int blk_rq_get_max_sectors(struct request *rq, diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 975fb4cf1bb7..d50c2f0a655a 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -2,6 +2,7 @@ #ifndef _BPF_CGROUP_H #define _BPF_CGROUP_H +#include <linux/errno.h> #include <linux/jump_label.h> #include <uapi/linux/bpf.h> @@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, \ __ret; \ }) +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype); +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); #else +struct bpf_prog; struct cgroup_bpf {}; static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7df32a3200f7..8827e797ff97 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -696,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); +int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog); #else static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) { @@ -714,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map, { return -EOPNOTSUPP; } + +static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog) +{ + return -EINVAL; +} #endif #if defined(CONFIG_XDP_SOCKETS) diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h index 5f8a4283092d..9d9ff755ec29 100644 --- a/include/linux/bpf_lirc.h +++ b/include/linux/bpf_lirc.h @@ -5,11 +5,12 @@ #include <uapi/linux/bpf.h> #ifdef CONFIG_BPF_LIRC_MODE2 -int lirc_prog_attach(const union bpf_attr *attr); +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int lirc_prog_detach(const union bpf_attr *attr); int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else -static inline int lirc_prog_attach(const union bpf_attr *attr) +static inline int lirc_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) { return -EINVAL; } diff --git a/include/linux/compat.h b/include/linux/compat.h index b1a5562b3215..c68acc47da57 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -72,6 +72,9 @@ */ #ifndef COMPAT_SYSCALL_DEFINEx #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_compat_sys##name)))); \ @@ -80,8 +83,11 @@ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ { \ - return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ } \ + __diag_pop(); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* COMPAT_SYSCALL_DEFINEx */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index f1a7492a5cc8..573f5a7d42d4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -66,25 +66,40 @@ #endif /* + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not + * defined so the gnu89 semantics are the default. + */ +#ifdef __GNUC_STDC_INLINE__ +# define __gnu_inline __attribute__((gnu_inline)) +#else +# define __gnu_inline +#endif + +/* * Force always-inline if the user requests it so via the .config, * or if gcc is too old. * GCC does not warn about unused static inline functions for * -Wunused-function. This turns out to avoid the need for complex #ifdef * directives. Suppress the warning in clang as well by using "unused" * function attribute, which is redundant but not harmful for gcc. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline,unused)) notrace -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace -#define __inline __inline __attribute__((always_inline,unused)) notrace +#define inline \ + inline __attribute__((always_inline, unused)) notrace __gnu_inline #else -/* A lot of inline functions can cause havoc with function tracing */ -#define inline inline __attribute__((unused)) notrace -#define __inline__ __inline__ __attribute__((unused)) notrace -#define __inline __inline __attribute__((unused)) notrace +#define inline inline __attribute__((unused)) notrace __gnu_inline #endif +#define __inline__ inline +#define __inline inline #define __always_inline inline __attribute__((always_inline)) #define noinline __attribute__((noinline)) @@ -347,3 +362,28 @@ #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_GCC(version, severity, s) \ + __diag_GCC_ ## version(__diag_GCC_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_GCC_ignore ignored +#define __diag_GCC_warn warning +#define __diag_GCC_error error + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#endif + +#if GCC_VERSION >= 80000 +#define __diag_GCC_8(s) __diag(s) +#else +#define __diag_GCC_8(s) +#endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 6b79a9bba9a7..a8ba6b04152c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -271,4 +271,22 @@ struct ftrace_likely_data { # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) #endif +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + #endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/dax.h b/include/linux/dax.h index 3855e3800f48..deb0f663252f 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); -int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, +vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t *pfnp, int *errp, const struct iomap_ops *ops); vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t pfn); diff --git a/include/linux/filter.h b/include/linux/filter.h index 20f2659dd829..c73dd7396886 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -470,9 +470,7 @@ struct sock_fprog_kern { }; struct bpf_binary_header { - u16 pages; - u16 locked:1; - + u32 pages; /* Some arches need word alignment for their instructions */ u8 image[] __aligned(4); }; @@ -481,7 +479,7 @@ struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ - locked:1, /* Program image locked? */ + undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { -#ifdef CONFIG_ARCH_HAS_SET_MEMORY - fp->locked = 1; - if (set_memory_ro((unsigned long)fp, fp->pages)) - fp->locked = 0; -#endif + fp->undo_set_mem = 1; + set_memory_ro((unsigned long)fp, fp->pages); } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { -#ifdef CONFIG_ARCH_HAS_SET_MEMORY - if (fp->locked) { - WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); - /* In case set_memory_rw() fails, we want to be the first - * to crash here instead of some random place later on. - */ - fp->locked = 0; - } -#endif + if (fp->undo_set_mem) + set_memory_rw((unsigned long)fp, fp->pages); } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { -#ifdef CONFIG_ARCH_HAS_SET_MEMORY - hdr->locked = 1; - if (set_memory_ro((unsigned long)hdr, hdr->pages)) - hdr->locked = 0; -#endif + set_memory_ro((unsigned long)hdr, hdr->pages); } static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) { -#ifdef CONFIG_ARCH_HAS_SET_MEMORY - if (hdr->locked) { - WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); - /* In case set_memory_rw() fails, we want to be the first - * to crash here instead of some random place later on. - */ - hdr->locked = 0; - } -#endif + set_memory_rw((unsigned long)hdr, hdr->pages); } static inline struct bpf_binary_header * @@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp) return (void *)addr; } -#ifdef CONFIG_ARCH_HAS_SET_MEMORY -static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp) -{ - if (!fp->locked) - return -ENOLCK; - if (fp->jited) { - const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); - - if (!hdr->locked) - return -ENOLCK; - } - - return 0; -} -#endif - int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { @@ -805,8 +765,8 @@ static inline bool bpf_dump_raw_ok(void) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); -static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, - struct net_device *fwd) +static inline int xdp_ok_fwd_dev(const struct net_device *fwd, + unsigned int pktlen) { unsigned int len; @@ -814,7 +774,7 @@ static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, return -ENETDOWN; len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; - if (skb->len > len) + if (pktlen > len) return -EMSGSIZE; return 0; diff --git a/include/linux/fs.h b/include/linux/fs.h index 5c91108846db..805bf22898cf 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1720,8 +1720,6 @@ struct file_operations { int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -2422,6 +2420,7 @@ extern struct file *filp_open(const char *, int, umode_t); extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int, umode_t); extern struct file * dentry_open(const struct path *, int, const struct cred *); +extern struct file *filp_clone_open(struct file *); extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 3efa3b861d44..941b11811f85 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -16,6 +16,7 @@ #define __FSL_GUTS_H__ #include <linux/types.h> +#include <linux/io.h> /** * Global Utility Registers. diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8154f4920fcb..ebb77674be90 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type; */ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); -void clear_ftrace_function(void); extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op, struct pt_regs *regs); @@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void) { return 0; } -static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } diff --git a/include/linux/hid.h b/include/linux/hid.h index 41a3d5775394..773bcb1d4044 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -511,6 +511,7 @@ struct hid_output_fifo { #define HID_STAT_ADDED BIT(0) #define HID_STAT_PARSED BIT(1) #define HID_STAT_DUP_DETECTED BIT(2) +#define HID_STAT_REPROBED BIT(3) struct hid_input { struct list_head list; @@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */ bool battery_avoid_query; #endif - unsigned int status; /* see STAT flags above */ + unsigned long status; /* see STAT flags above */ unsigned claimed; /* Claimed by hidinput, hiddev? */ unsigned quirks; /* Various quirks the device can pull on us */ bool io_started; /* If IO has started */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 7843b98e1c6e..c20c7e197d07 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev) static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) { - return -1; + return -EINVAL; } static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo) { - return -1; + return -EINVAL; } #endif diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f8231854b5d6..119f53941c12 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -109,6 +109,8 @@ struct ip_mc_list { extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); +extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, + unsigned int mode); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern void ip_mc_drop_socket(struct sock *sk); extern int ip_mc_source(int add, int omode, struct sock *sk, diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 767467d886de..67c75372b691 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index d7188de4db96..3f4bf60b0bb5 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis) return axis == ABS_MT_SLOT || input_is_mt_value(axis); } -void input_mt_report_slot_state(struct input_dev *dev, +bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); void input_mt_report_finger_count(struct input_dev *dev, int count); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1df940196ab2..ef169d67df92 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -121,6 +121,7 @@ #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2803264c512f..c1961761311d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); -void kthread_park_complete(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/include/linux/libata.h b/include/linux/libata.h index 8b8946dd63b9..32f247cb5e9e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -210,6 +210,7 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ @@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag) return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); } +#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ + for ((tag) = 0; (tag) < (max_tag) && \ + ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ + +/* + * Internal use only, iterate commands ignoring error handling and + * status of 'qc'. + */ +#define ata_qc_for_each_raw(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) + +/* + * Iterate all potential commands that can be queued + */ +#define ata_qc_for_each(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) + +/* + * Like ata_qc_for_each, but with the internal tag included + */ +#define ata_qc_for_each_with_internal(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) + /* * device helpers */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 4f5f8c21e283..1eb6f244588d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -27,6 +27,8 @@ */ #define MARVELL_PHY_ID_88E6390 0x01410f90 +#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) + /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index d3c9db492b30..fab5121ffb8f 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -8,6 +8,8 @@ #include <linux/mlx5/driver.h> +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) + enum { SRIOV_NONE, SRIOV_LEGACY, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 27134c4fcb76..ac281f5ec9b8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vnic_env_queue_counters[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 eswitch_flow_table[0x1]; + u8 eswitch_manager[0x1]; u8 device_memory[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; diff --git a/include/linux/mm.h b/include/linux/mm.h index a0fbb9ffe380..d3a3842316b8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, * mmap() functions). */ -extern struct kmem_cache *vm_area_cachep; +struct vm_area_struct *vm_area_alloc(struct mm_struct *); +struct vm_area_struct *vm_area_dup(struct vm_area_struct *); +void vm_area_free(struct vm_area_struct *); #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; @@ -2132,7 +2134,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state); #endif -#ifdef CONFIG_HAVE_MEMBLOCK +#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) void zero_resv_unavail(void); #else static inline void zero_resv_unavail(void) {} diff --git a/include/linux/net.h b/include/linux/net.h index 08b6eb964dd6..6554d3ba4396 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -147,7 +147,6 @@ struct proto_ops { int (*getname) (struct socket *sock, struct sockaddr *addr, int peer); - __poll_t (*poll_mask) (struct socket *sock, __poll_t events); __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec9850c7936..3d0cc0b5cec2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + if (PTR_ERR(pp) != -EINPROGRESS) { + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; + } +} #else static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; +} #endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, diff --git a/include/linux/pci.h b/include/linux/pci.h index 340029b2fb38..abd5d5e17aee 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1240,6 +1240,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr); void pci_unmap_iospace(struct resource *res); void __iomem *devm_pci_remap_cfgspace(struct device *dev, resource_size_t offset, diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 9206a4fef9ac..cb8d84090cfb 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node); + struct device_node *np); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, static inline unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node) + struct device_node *np) { - return -ENODEV; + return 0; } static inline int genpd_dev_pm_attach(struct device *dev) diff --git a/include/linux/poll.h b/include/linux/poll.h index fdf86b4cbc71..7e0fdcf905d2 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) pt->_key = ~(__poll_t)0; /* all events enabled */ } -static inline bool file_has_poll_mask(struct file *file) +static inline bool file_can_poll(struct file *file) { - return file->f_op->get_poll_head && file->f_op->poll_mask; + return file->f_op->poll; } -static inline bool file_can_poll(struct file *file) +static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) { - return file->f_op->poll || file_has_poll_mask(file); + if (unlikely(!file->f_op->poll)) + return DEFAULT_POLLMASK; + return file->f_op->poll(file, pt); } -__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt); - struct poll_table_entry { struct file *filp; __poll_t key; diff --git a/include/linux/rmi.h b/include/linux/rmi.h index 64125443f8a6..5ef5c7c412a7 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -354,6 +354,8 @@ struct rmi_driver_data { struct mutex irq_mutex; struct input_dev *input; + struct irq_domain *irqdomain; + u8 pdt_props; u8 num_rx_electrodes; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 51f52020ad5f..093aa57120b0 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -9,9 +9,6 @@ #include <asm/io.h> struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif unsigned long page_link; unsigned int offset; unsigned int length; @@ -64,7 +61,6 @@ struct sg_table { * */ -#define SG_MAGIC 0x87654321 #define SG_CHAIN 0x01UL #define SG_END 0x02UL @@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) */ BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif sg->page_link = page_link | (unsigned long) page; @@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, static inline struct page *sg_page(struct scatterlist *sg) { #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); @@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, **/ static inline void sg_mark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif /* * Set termination bit, clear potential chain bit */ @@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg) **/ static inline void sg_unmark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif sg->page_link &= ~SG_END; } @@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg) static inline void sg_init_marker(struct scatterlist *sgl, unsigned int nents) { -#ifdef CONFIG_DEBUG_SG - unsigned int i; - - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; -#endif sg_mark_end(&sgl[nents - 1]); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 9256118bd40c..43731fe51c97 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -118,7 +118,7 @@ struct task_group; * the comment with set_special_state(). */ #define is_special_task_state(state) \ - ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) #define __set_current_state(state_value) \ do { \ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 5be31eb7b266..108ede99e533 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -extern long kernel_wait4(pid_t, int *, int, struct rusage *); +extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); extern void free_task(struct task_struct *tsk); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c86885954994..610a201126ee 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t; * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -735,7 +736,7 @@ struct sk_buff { peeked:1, head_frag:1, xmit_more:1, - __unused:1; /* one bit hole */ + pfmemalloc:1; /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() @@ -754,31 +755,30 @@ struct sk_buff { __u8 __pkt_type_offset[0]; __u8 pkt_type:3; - __u8 pfmemalloc:1; __u8 ignore_df:1; - __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; + __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; - __u8 no_fcs:1; /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; + __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_not_inet:1; - __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif __u8 ipvs_property:1; + __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV @@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); -__poll_t datagram_poll_mask(struct socket *sock, __poll_t events); +__poll_t datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, struct iov_iter *to, int size); static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 09fa2c6f0e68..3a1a1dbc6f49 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -155,8 +155,12 @@ struct kmem_cache { #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS +void sysfs_slab_unlink(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *); #else +static inline void sysfs_slab_unlink(struct kmem_cache *s) +{ +} static inline void sysfs_slab_release(struct kmem_cache *s) { } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 73810808cdf2..5c1a0933768e 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -11,6 +11,7 @@ #ifndef _LINUX_SYSCALLS_H #define _LINUX_SYSCALLS_H +struct __aio_sigset; struct epoll_event; struct iattr; struct inode; @@ -231,6 +232,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) */ #ifndef __SYSCALL_DEFINEx #define __SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_sys##name)))); \ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ @@ -243,6 +247,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ return ret; \ } \ + __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* __SYSCALL_DEFINEx */ diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 6c5f2074e14f..6f8b68cd460f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -75,7 +75,7 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - spinlock_t info_lock; + struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; }; diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 53ce8176c313..ec9d6bc65855 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); -__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events); +__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); int bt_sock_wait_ready(struct sock *sk, unsigned long flags); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 59656fc580df..7b9c82de11cc 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } +static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) +{ + return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == + RTF_GATEWAY; +} + void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 16475c269749..8f73be494503 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, - struct ipv6_opt_hdr __user *newopt, - int newoptlen); -struct ipv6_txoptions * -ipv6_renew_options_kern(struct sock *sk, - struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr *newopt, - int newoptlen); + struct ipv6_opt_hdr *newopt); struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); @@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, * to minimize possbility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ - rol32(hash, 16); + hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; @@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void); int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); +int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, + const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); #endif /* _NET_IPV6_H */ diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index b0eaeb02d46d..f4c21b5a1242 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -153,6 +153,8 @@ struct iucv_sock_list { atomic_t autobind_name; }; +__poll_t iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 47e35cce3b64..a71264d75d7f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -128,6 +128,7 @@ struct net { #endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; #endif struct sock *nfnl; struct sock *nfnl_stash; diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index e0c0c2558ec4..a05134507e7b 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops; extern struct static_key_false nft_counters_enabled; extern struct static_key_false nft_trace_enabled; +extern struct nft_set_type nft_set_rhash_type; +extern struct nft_set_type nft_set_hash_type; +extern struct nft_set_type nft_set_hash_fast_type; +extern struct nft_set_type nft_set_rbtree_type; +extern struct nft_set_type nft_set_bitmap_type; + #endif /* _NET_NF_TABLES_CORE_H */ diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 9754a50ecde9..4cc64c8446eb 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h @@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, * belonging to established connections going through that one. */ struct sock * -nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, +nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, const u8 protocol, const __be32 saddr, const __be32 daddr, const __be16 sport, const __be16 dport, @@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, struct sock *sk); struct sock * -nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, +nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, const __be16 sport, const __be16 dport, diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c978a31b0f84..762ac9931b62 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -109,7 +109,6 @@ struct netns_ipv6 { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag { - struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; }; #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..20b059574e60 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, { } +static inline bool tcf_block_shared(struct tcf_block *block) +{ + return false; +} + static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { return NULL; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 30b3e2fe240a..8c2caa370e0f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); void sctp_data_ready(struct sock *sk); -__poll_t sctp_poll_mask(struct socket *sock, __poll_t events); +__poll_t sctp_poll(struct file *file, struct socket *sock, + poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc); diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index 9470fd7e4350..32d2454c0479 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h @@ -7,7 +7,6 @@ #include <linux/tc_act/tc_csum.h> struct tcf_csum_params { - int action; u32 update_flags; struct rcu_head rcu; }; diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h index efef0b4b1b2b..46b8c7f1c8d5 100644 --- a/include/net/tc_act/tc_tunnel_key.h +++ b/include/net/tc_act/tc_tunnel_key.h @@ -18,7 +18,6 @@ struct tcf_tunnel_key_params { struct rcu_head rcu; int tcft_action; - int action; struct metadata_dst *tcft_enc_metadata; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 0448e7c5d2b4..3482d13d655b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -388,7 +388,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); void tcp_close(struct sock *sk, long timeout); void tcp_init_sock(struct sock *sk); void tcp_init_transfer(struct sock *sk, int bpf_op); -__poll_t tcp_poll_mask(struct socket *sock, __poll_t events); +__poll_t tcp_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int tcp_setsockopt(struct sock *sk, int level, int optname, @@ -827,6 +828,10 @@ struct tcp_skb_cb { #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) +static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) +{ + TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); +} #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, @@ -907,8 +912,6 @@ enum tcp_ca_event { CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ - CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ - CA_EVENT_NON_DELAYED_ACK, }; /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ diff --git a/include/net/tls.h b/include/net/tls.h index 7f84ea3e217c..70c273777fe9 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -109,7 +109,8 @@ struct tls_sw_context_rx { struct strparser strp; void (*saved_data_ready)(struct sock *sk); - __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events); + unsigned int (*sk_poll)(struct file *file, struct socket *sock, + struct poll_table_struct *wait); struct sk_buff *recv_pkt; u8 control; bool decrypted; @@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); -__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events); +unsigned int tls_sw_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); diff --git a/include/net/udp.h b/include/net/udp.h index b1ea8b0f5e6a..81afdacd4fff 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk); int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags); -__poll_t udp_poll_mask(struct socket *sock, __poll_t events); +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6); diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 9fe472f2ac95..7161856bcf9c 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -60,6 +60,10 @@ struct xdp_sock { bool zc; /* Protects multiple processes in the control path */ struct mutex mutex; + /* Mutual exclusion of NAPI TX thread and sendmsg error paths + * in the SKB destructor callback. + */ + spinlock_t tx_completion_lock; u64 rx_dropped; }; diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index d00221345c19..d4593a6062ef 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h @@ -29,7 +29,6 @@ #include <linux/types.h> #include <linux/fs.h> -#include <linux/signal.h> #include <asm/byteorder.h> typedef __kernel_ulong_t aio_context_t; @@ -39,8 +38,10 @@ enum { IOCB_CMD_PWRITE = 1, IOCB_CMD_FSYNC = 2, IOCB_CMD_FDSYNC = 3, - /* 4 was the experimental IOCB_CMD_PREADX */ - IOCB_CMD_POLL = 5, + /* These two are experimental. + * IOCB_CMD_PREADX = 4, + * IOCB_CMD_POLL = 5, + */ IOCB_CMD_NOOP = 6, IOCB_CMD_PREADV = 7, IOCB_CMD_PWRITEV = 8, @@ -108,10 +109,5 @@ struct iocb { #undef IFBIG #undef IFLITTLE -struct __aio_sigset { - const sigset_t __user *sigmask; - size_t sigsetsize; -}; - #endif /* __LINUX__AIO_ABI_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 59b19b6a40d7..b7db3261c62d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1857,7 +1857,8 @@ union bpf_attr { * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric - * is set to metric from route (IPv4/IPv6 only). + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the @@ -1873,9 +1874,10 @@ union bpf_attr { * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return - * Egress device index on success, 0 if packet needs to continue - * up the stack for further processing or a negative error in case - * of failure. + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args { #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) +enum { + BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ + BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ + BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ + BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ + BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ + BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ + BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ + BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ + BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ +}; + struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop @@ -2625,7 +2639,11 @@ struct bpf_fib_lookup { /* total length of packet from network header - used for MTU check */ __u16 tot_len; - __u32 ifindex; /* L3 device index for lookup */ + + /* input: L3 device index for lookup + * output: device index from FIB lookup + */ + __u32 ifindex; union { /* inputs to lookup */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 4ca65b56084f..7363f18e65a5 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -226,7 +226,7 @@ enum tunable_id { ETHTOOL_TX_COPYBREAK, ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ /* - * Add your fresh new tubale attribute above and remember to update + * Add your fresh new tunable attribute above and remember to update * tunable_strings[] in net/core/ethtool.c */ __ETHTOOL_TUNABLE_COUNT, diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h index d620fa43756c..9a402fdb60e9 100644 --- a/include/uapi/linux/rseq.h +++ b/include/uapi/linux/rseq.h @@ -10,13 +10,8 @@ * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */ -#ifdef __KERNEL__ -# include <linux/types.h> -#else -# include <stdint.h> -#endif - -#include <linux/types_32_64.h> +#include <linux/types.h> +#include <asm/byteorder.h> enum rseq_cpu_id_state { RSEQ_CPU_ID_UNINITIALIZED = -1, @@ -52,10 +47,10 @@ struct rseq_cs { __u32 version; /* enum rseq_cs_flags */ __u32 flags; - LINUX_FIELD_u32_u64(start_ip); + __u64 start_ip; /* Offset from start_ip. */ - LINUX_FIELD_u32_u64(post_commit_offset); - LINUX_FIELD_u32_u64(abort_ip); + __u64 post_commit_offset; + __u64 abort_ip; } __attribute__((aligned(4 * sizeof(__u64)))); /* @@ -67,28 +62,30 @@ struct rseq_cs { struct rseq { /* * Restartable sequences cpu_id_start field. Updated by the - * kernel, and read by user-space with single-copy atomicity - * semantics. Aligned on 32-bit. Always contains a value in the - * range of possible CPUs, although the value may not be the - * actual current CPU (e.g. if rseq is not initialized). This - * CPU number value should always be compared against the value - * of the cpu_id field before performing a rseq commit or - * returning a value read from a data structure indexed using - * the cpu_id_start value. + * kernel. Read by user-space with single-copy atomicity + * semantics. This field should only be read by the thread which + * registered this data structure. Aligned on 32-bit. Always + * contains a value in the range of possible CPUs, although the + * value may not be the actual current CPU (e.g. if rseq is not + * initialized). This CPU number value should always be compared + * against the value of the cpu_id field before performing a rseq + * commit or returning a value read from a data structure indexed + * using the cpu_id_start value. */ __u32 cpu_id_start; /* - * Restartable sequences cpu_id field. Updated by the kernel, - * and read by user-space with single-copy atomicity semantics. - * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and - * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the - * former means "rseq uninitialized", and latter means "rseq - * initialization failed". This value is meant to be read within - * rseq critical sections and compared with the cpu_id_start - * value previously read, before performing the commit instruction, - * or read and compared with the cpu_id_start value before returning - * a value loaded from a data structure indexed using the - * cpu_id_start value. + * Restartable sequences cpu_id field. Updated by the kernel. + * Read by user-space with single-copy atomicity semantics. This + * field should only be read by the thread which registered this + * data structure. Aligned on 32-bit. Values + * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED + * have a special semantic: the former means "rseq uninitialized", + * and latter means "rseq initialization failed". This value is + * meant to be read within rseq critical sections and compared + * with the cpu_id_start value previously read, before performing + * the commit instruction, or read and compared with the + * cpu_id_start value before returning a value loaded from a data + * structure indexed using the cpu_id_start value. */ __u32 cpu_id; /* @@ -105,27 +102,44 @@ struct rseq { * targeted by the rseq_cs. Also needs to be set to NULL by user-space * before reclaiming memory that contains the targeted struct rseq_cs. * - * Read and set by the kernel with single-copy atomicity semantics. - * Set by user-space with single-copy atomicity semantics. Aligned - * on 64-bit. + * Read and set by the kernel. Set by user-space with single-copy + * atomicity semantics. This field should only be updated by the + * thread which registered this data structure. Aligned on 64-bit. */ - LINUX_FIELD_u32_u64(rseq_cs); + union { + __u64 ptr64; +#ifdef __LP64__ + __u64 ptr; +#else + struct { +#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN) + __u32 padding; /* Initialized to zero. */ + __u32 ptr32; +#else /* LITTLE */ + __u32 ptr32; + __u32 padding; /* Initialized to zero. */ +#endif /* ENDIAN */ + } ptr; +#endif + } rseq_cs; + /* - * - RSEQ_DISABLE flag: + * Restartable sequences flags field. + * + * This field should only be updated by the thread which + * registered this data structure. Read by the kernel. + * Mainly used for single-stepping through rseq critical sections + * with debuggers. * - * Fallback fast-track flag for single-stepping. - * Set by user-space if lack of progress is detected. - * Cleared by user-space after rseq finish. - * Read by the kernel. * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT - * Inhibit instruction sequence block restart and event - * counter increment on preemption for this thread. + * Inhibit instruction sequence block restart on preemption + * for this thread. * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL - * Inhibit instruction sequence block restart and event - * counter increment on signal delivery for this thread. + * Inhibit instruction sequence block restart on signal + * delivery for this thread. * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE - * Inhibit instruction sequence block restart and event - * counter increment on migration for this thread. + * Inhibit instruction sequence block restart on migration for + * this thread. */ __u32 flags; } __attribute__((aligned(4 * sizeof(__u64)))); diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 6e299349b158..b7b57967d90f 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -44,6 +44,7 @@ #define TCMU_MAILBOX_VERSION 2 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ +#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */ struct tcmu_mailbox { __u16 version; @@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr { __u16 cmd_id; __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 +#define TCMU_UFLAG_READ_LEN 0x2 __u8 uflags; } __packed; @@ -119,7 +121,7 @@ struct tcmu_cmd_entry { __u8 scsi_status; __u8 __pad1; __u16 __pad2; - __u32 __pad3; + __u32 read_len; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 29eb659aa77a..e3f6ed8a7064 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -127,6 +127,10 @@ enum { #define TCP_CM_INQ TCP_INQ +#define TCP_REPAIR_ON 1 +#define TCP_REPAIR_OFF 0 +#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ + struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h deleted file mode 100644 index 0a87ace34a57..000000000000 --- a/include/uapi/linux/types_32_64.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -#ifndef _UAPI_LINUX_TYPES_32_64_H -#define _UAPI_LINUX_TYPES_32_64_H - -/* - * linux/types_32_64.h - * - * Integer type declaration for pointers across 32-bit and 64-bit systems. - * - * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> - */ - -#ifdef __KERNEL__ -# include <linux/types.h> -#else -# include <stdint.h> -#endif - -#include <asm/byteorder.h> - -#ifdef __BYTE_ORDER -# if (__BYTE_ORDER == __BIG_ENDIAN) -# define LINUX_BYTE_ORDER_BIG_ENDIAN -# else -# define LINUX_BYTE_ORDER_LITTLE_ENDIAN -# endif -#else -# ifdef __BIG_ENDIAN -# define LINUX_BYTE_ORDER_BIG_ENDIAN -# else -# define LINUX_BYTE_ORDER_LITTLE_ENDIAN -# endif -#endif - -#ifdef __LP64__ -# define LINUX_FIELD_u32_u64(field) __u64 field -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v -#else -# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN -# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ - field ## _padding = 0, field = (intptr_t)v -# else -# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ - field = (intptr_t)v, field ## _padding = 0 -# endif -#endif - -#endif /* _UAPI_LINUX_TYPES_32_64_H */ diff --git a/init/Kconfig b/init/Kconfig index fde3d09e8b27..041f3a022122 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections, and linking with - --gc-sections. + Enable this if you want to do dead code and data elimination with + the linker by compiling with -ffunction-sections -fdata-sections, + and linking with --gc-sections. This can reduce on disk and in-memory size of the kernel code and static data, particularly for small configs and diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2d49d18b793a..e016ac3afa24 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf, void *data, u8 bits_offset, struct seq_file *m) { + u16 left_shift_bits, right_shift_bits; u32 int_data = btf_type_int(t); u16 nr_bits = BTF_INT_BITS(int_data); u16 total_bits_offset; u16 nr_copy_bytes; u16 nr_copy_bits; - u8 nr_upper_bits; - union { - u64 u64_num; - u8 u8_nums[8]; - } print_num; + u64 print_num; total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); data += BITS_ROUNDDOWN_BYTES(total_bits_offset); @@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf, nr_copy_bits = nr_bits + bits_offset; nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); - print_num.u64_num = 0; - memcpy(&print_num.u64_num, data, nr_copy_bytes); + print_num = 0; + memcpy(&print_num, data, nr_copy_bytes); - /* Ditch the higher order bits */ - nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits); - if (nr_upper_bits) { - /* We need to mask out some bits of the upper byte. */ - u8 mask = (1 << nr_upper_bits) - 1; +#ifdef __BIG_ENDIAN_BITFIELD + left_shift_bits = bits_offset; +#else + left_shift_bits = BITS_PER_U64 - nr_copy_bits; +#endif + right_shift_bits = BITS_PER_U64 - nr_bits; - print_num.u8_nums[nr_copy_bytes - 1] &= mask; - } - - print_num.u64_num >>= bits_offset; + print_num <<= left_shift_bits; + print_num >>= right_shift_bits; - seq_printf(m, "0x%llx", print_num.u64_num); + seq_printf(m, "0x%llx", print_num); } static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index f7c00bd6f8e4..3d83ee7df381 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, return ret; } +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog) +{ + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, + attr->attach_flags); + cgroup_put(cgrp); + return ret; +} + +int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + struct bpf_prog *prog; + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); + if (IS_ERR(prog)) + prog = NULL; + + ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); + if (prog) + bpf_prog_put(prog); + + cgroup_put(cgrp); + return ret; +} + +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->query.target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + ret = cgroup_bpf_query(cgrp, attr, uattr); + + cgroup_put(cgrp); + return ret; +} + /** * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering * @sk: The socket sending or receiving traffic diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a9e6c04d0f4a..1e5625d46414 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -598,8 +598,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_fill_ill_insns(hdr, size); hdr->pages = size / PAGE_SIZE; - hdr->locked = 0; - hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); start = (get_random_int() % hole) & ~(alignment - 1); @@ -1450,22 +1448,6 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) return 0; } -static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp) -{ -#ifdef CONFIG_ARCH_HAS_SET_MEMORY - int i, err; - - for (i = 0; i < fp->aux->func_cnt; i++) { - err = bpf_prog_check_pages_ro_single(fp->aux->func[i]); - if (err) - return err; - } - - return bpf_prog_check_pages_ro_single(fp); -#endif - return 0; -} - static void bpf_prog_select_func(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON @@ -1524,17 +1506,7 @@ finalize: * all eBPF JITs might immediately support all features. */ *err = bpf_check_tail_call(fp); - if (*err) - return fp; - - /* Checkpoint: at this point onwards any cBPF -> eBPF or - * native eBPF program is read-only. If we failed to change - * the page attributes (e.g. allocation failure from - * splitting large pages), then reject the whole program - * in order to guarantee not ending up with any W+X pages - * from BPF side in kernel. - */ - *err = bpf_prog_check_pages_ro_locked(fp); + return fp; } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 642c97f6d1b8..d361fc1e3bf3 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, { struct net_device *dev = dst->dev; struct xdp_frame *xdpf; + int err; if (!dev->netdev_ops->ndo_xdp_xmit) return -EOPNOTSUPP; + err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); + if (unlikely(err)) + return err; + xdpf = convert_to_xdp_frame(xdp); if (unlikely(!xdpf)) return -EOVERFLOW; @@ -350,7 +355,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, { int err; - err = __xdp_generic_ok_fwd_dev(skb, dst->dev); + err = xdp_ok_fwd_dev(dst->dev, skb->len); if (unlikely(err)) return err; skb->dev = dst->dev; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3ca2198a6d22..513d9dfcf4ee 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, * old element will be freed immediately. * Otherwise return an error */ - atomic_dec(&htab->count); - return ERR_PTR(-E2BIG); + l_new = ERR_PTR(-E2BIG); + goto dec_count; } l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, htab->map.numa_node); - if (!l_new) - return ERR_PTR(-ENOMEM); + if (!l_new) { + l_new = ERR_PTR(-ENOMEM); + goto dec_count; + } } memcpy(l_new->key, key, key_size); @@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, GFP_ATOMIC | __GFP_NOWARN); if (!pptr) { kfree(l_new); - return ERR_PTR(-ENOMEM); + l_new = ERR_PTR(-ENOMEM); + goto dec_count; } } @@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, l_new->hash = hash; return l_new; +dec_count: + atomic_dec(&htab->count); + return l_new; } static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 52a91d816c0e..98fb7938beea 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -72,6 +72,7 @@ struct bpf_htab { u32 n_buckets; u32 elem_size; struct bpf_sock_progs progs; + struct rcu_head rcu; }; struct htab_elem { @@ -89,8 +90,8 @@ enum smap_psock_state { struct smap_psock_map_entry { struct list_head list; struct sock **entry; - struct htab_elem *hash_link; - struct bpf_htab *htab; + struct htab_elem __rcu *hash_link; + struct bpf_htab __rcu *htab; }; struct smap_psock { @@ -120,6 +121,7 @@ struct smap_psock { struct bpf_prog *bpf_parse; struct bpf_prog *bpf_verdict; struct list_head maps; + spinlock_t maps_lock; /* Back reference used when sock callback trigger sockmap operations */ struct sock *sock; @@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); static int bpf_tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); +static void bpf_tcp_close(struct sock *sk, long timeout); static inline struct smap_psock *smap_psock_sk(const struct sock *sk) { @@ -161,7 +164,42 @@ out: return !empty; } -static struct proto tcp_bpf_proto; +enum { + SOCKMAP_IPV4, + SOCKMAP_IPV6, + SOCKMAP_NUM_PROTS, +}; + +enum { + SOCKMAP_BASE, + SOCKMAP_TX, + SOCKMAP_NUM_CONFIGS, +}; + +static struct proto *saved_tcpv6_prot __read_mostly; +static DEFINE_SPINLOCK(tcpv6_prot_lock); +static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; +static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], + struct proto *base) +{ + prot[SOCKMAP_BASE] = *base; + prot[SOCKMAP_BASE].close = bpf_tcp_close; + prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; + prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; + + prot[SOCKMAP_TX] = prot[SOCKMAP_BASE]; + prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg; + prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage; +} + +static void update_sk_prot(struct sock *sk, struct smap_psock *psock) +{ + int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4; + int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE; + + sk->sk_prot = &bpf_tcp_prots[family][conf]; +} + static int bpf_tcp_init(struct sock *sk) { struct smap_psock *psock; @@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk) psock->save_close = sk->sk_prot->close; psock->sk_proto = sk->sk_prot; - if (psock->bpf_tx_msg) { - tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg; - tcp_bpf_proto.sendpage = bpf_tcp_sendpage; - tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg; - tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read; + /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */ + if (sk->sk_family == AF_INET6 && + unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { + spin_lock_bh(&tcpv6_prot_lock); + if (likely(sk->sk_prot != saved_tcpv6_prot)) { + build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot); + smp_store_release(&saved_tcpv6_prot, sk->sk_prot); + } + spin_unlock_bh(&tcpv6_prot_lock); } - - sk->sk_prot = &tcp_bpf_proto; + update_sk_prot(sk, psock); rcu_read_unlock(); return 0; } @@ -219,24 +260,64 @@ out: rcu_read_unlock(); } +static struct htab_elem *lookup_elem_raw(struct hlist_head *head, + u32 hash, void *key, u32 key_size) +{ + struct htab_elem *l; + + hlist_for_each_entry_rcu(l, head, hash_node) { + if (l->hash == hash && !memcmp(&l->key, key, key_size)) + return l; + } + + return NULL; +} + +static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) +{ + return &htab->buckets[hash & (htab->n_buckets - 1)]; +} + +static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) +{ + return &__select_bucket(htab, hash)->head; +} + static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { atomic_dec(&htab->count); kfree_rcu(l, rcu); } +static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, + struct smap_psock *psock) +{ + struct smap_psock_map_entry *e; + + spin_lock_bh(&psock->maps_lock); + e = list_first_entry_or_null(&psock->maps, + struct smap_psock_map_entry, + list); + if (e) + list_del(&e->list); + spin_unlock_bh(&psock->maps_lock); + return e; +} + static void bpf_tcp_close(struct sock *sk, long timeout) { void (*close_fun)(struct sock *sk, long timeout); - struct smap_psock_map_entry *e, *tmp; + struct smap_psock_map_entry *e; struct sk_msg_buff *md, *mtmp; struct smap_psock *psock; struct sock *osk; + lock_sock(sk); rcu_read_lock(); psock = smap_psock_sk(sk); if (unlikely(!psock)) { rcu_read_unlock(); + release_sock(sk); return sk->sk_prot->close(sk, timeout); } @@ -247,7 +328,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout) */ close_fun = psock->save_close; - write_lock_bh(&sk->sk_callback_lock); if (psock->cork) { free_start_sg(psock->sock, psock->cork); kfree(psock->cork); @@ -260,21 +340,40 @@ static void bpf_tcp_close(struct sock *sk, long timeout) kfree(md); } - list_for_each_entry_safe(e, tmp, &psock->maps, list) { + e = psock_map_pop(sk, psock); + while (e) { if (e->entry) { osk = cmpxchg(e->entry, sk, NULL); if (osk == sk) { - list_del(&e->list); smap_release_sock(psock, sk); } } else { - hlist_del_rcu(&e->hash_link->hash_node); - smap_release_sock(psock, e->hash_link->sk); - free_htab_elem(e->htab, e->hash_link); + struct htab_elem *link = rcu_dereference(e->hash_link); + struct bpf_htab *htab = rcu_dereference(e->htab); + struct hlist_head *head; + struct htab_elem *l; + struct bucket *b; + + b = __select_bucket(htab, link->hash); + head = &b->head; + raw_spin_lock_bh(&b->lock); + l = lookup_elem_raw(head, + link->hash, link->key, + htab->map.key_size); + /* If another thread deleted this object skip deletion. + * The refcnt on psock may or may not be zero. + */ + if (l) { + hlist_del_rcu(&link->hash_node); + smap_release_sock(psock, link->sk); + free_htab_elem(htab, link); + } + raw_spin_unlock_bh(&b->lock); } + e = psock_map_pop(sk, psock); } - write_unlock_bh(&sk->sk_callback_lock); rcu_read_unlock(); + release_sock(sk); close_fun(sk, timeout); } @@ -472,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) while (sg[i].length) { free += sg[i].length; sk_mem_uncharge(sk, sg[i].length); - put_page(sg_page(&sg[i])); + if (!md->skb) + put_page(sg_page(&sg[i])); sg[i].length = 0; sg[i].page_link = 0; sg[i].offset = 0; @@ -481,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) if (i == MAX_SKB_FRAGS) i = 0; } + if (md->skb) + consume_skb(md->skb); return free; } @@ -1111,8 +1213,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock, static int bpf_tcp_ulp_register(void) { - tcp_bpf_proto = tcp_prot; - tcp_bpf_proto.close = bpf_tcp_close; + build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); /* Once BPF TX ULP is registered it is never unregistered. It * will be in the ULP list for the lifetime of the system. Doing * duplicate registers is not a problem. @@ -1135,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) */ TCP_SKB_CB(skb)->bpf.sk_redir = NULL; skb->sk = psock->sock; - bpf_compute_data_pointers(skb); + bpf_compute_data_end_sk_skb(skb); preempt_disable(); rc = (*prog->bpf_func)(skb, prog->insnsi); preempt_enable(); @@ -1357,7 +1458,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock) { if (refcount_dec_and_test(&psock->refcnt)) { tcp_cleanup_ulp(sock); + write_lock_bh(&sock->sk_callback_lock); smap_stop_sock(psock, sock); + write_unlock_bh(&sock->sk_callback_lock); clear_bit(SMAP_TX_RUNNING, &psock->state); rcu_assign_sk_user_data(sock, NULL); call_rcu_sched(&psock->rcu, smap_destroy_psock); @@ -1388,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp, * any socket yet. */ skb->sk = psock->sock; - bpf_compute_data_pointers(skb); + bpf_compute_data_end_sk_skb(skb); rc = (*prog->bpf_func)(skb, prog->insnsi); skb->sk = NULL; rcu_read_unlock(); @@ -1508,6 +1611,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node) INIT_LIST_HEAD(&psock->maps); INIT_LIST_HEAD(&psock->ingress); refcount_set(&psock->refcnt, 1); + spin_lock_init(&psock->maps_lock); rcu_assign_sk_user_data(sock, psock); sock_hold(sock); @@ -1564,18 +1668,32 @@ free_stab: return ERR_PTR(err); } -static void smap_list_remove(struct smap_psock *psock, - struct sock **entry, - struct htab_elem *hash_link) +static void smap_list_map_remove(struct smap_psock *psock, + struct sock **entry) { struct smap_psock_map_entry *e, *tmp; + spin_lock_bh(&psock->maps_lock); list_for_each_entry_safe(e, tmp, &psock->maps, list) { - if (e->entry == entry || e->hash_link == hash_link) { + if (e->entry == entry) + list_del(&e->list); + } + spin_unlock_bh(&psock->maps_lock); +} + +static void smap_list_hash_remove(struct smap_psock *psock, + struct htab_elem *hash_link) +{ + struct smap_psock_map_entry *e, *tmp; + + spin_lock_bh(&psock->maps_lock); + list_for_each_entry_safe(e, tmp, &psock->maps, list) { + struct htab_elem *c = rcu_dereference(e->hash_link); + + if (c == hash_link) list_del(&e->list); - break; - } } + spin_unlock_bh(&psock->maps_lock); } static void sock_map_free(struct bpf_map *map) @@ -1601,7 +1719,6 @@ static void sock_map_free(struct bpf_map *map) if (!sock) continue; - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get the * sk_callback_lock before this case but after xchg happens @@ -1609,10 +1726,9 @@ static void sock_map_free(struct bpf_map *map) * to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, &stab->sock_map[i], NULL); + smap_list_map_remove(psock, &stab->sock_map[i]); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); } rcu_read_unlock(); @@ -1661,17 +1777,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key) if (!sock) return -EINVAL; - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); if (!psock) goto out; if (psock->bpf_parse) smap_stop_sock(psock, sock); - smap_list_remove(psock, &stab->sock_map[k], NULL); + smap_list_map_remove(psock, &stab->sock_map[k]); smap_release_sock(psock, sock); out: - write_unlock_bh(&sock->sk_callback_lock); return 0; } @@ -1752,7 +1866,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, } } - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* 2. Do not allow inheriting programs if psock exists and has @@ -1789,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); if (!e) { err = -ENOMEM; - goto out_progs; + goto out_free; } } @@ -1809,7 +1922,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, if (err) goto out_free; smap_init_progs(psock, verdict, parse); + write_lock_bh(&sock->sk_callback_lock); smap_start_sock(psock, sock); + write_unlock_bh(&sock->sk_callback_lock); } /* 4. Place psock in sockmap for use and stop any programs on @@ -1819,9 +1934,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, */ if (map_link) { e->entry = map_link; + spin_lock_bh(&psock->maps_lock); list_add_tail(&e->list, &psock->maps); + spin_unlock_bh(&psock->maps_lock); } - write_unlock_bh(&sock->sk_callback_lock); return err; out_free: smap_release_sock(psock, sock); @@ -1832,7 +1948,6 @@ out_progs: } if (tx_msg) bpf_prog_put(tx_msg); - write_unlock_bh(&sock->sk_callback_lock); kfree(e); return err; } @@ -1869,10 +1984,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, if (osock) { struct smap_psock *opsock = smap_psock_sk(osock); - write_lock_bh(&osock->sk_callback_lock); - smap_list_remove(opsock, &stab->sock_map[i], NULL); + smap_list_map_remove(opsock, &stab->sock_map[i]); smap_release_sock(opsock, osock); - write_unlock_bh(&osock->sk_callback_lock); } out: return err; @@ -1915,6 +2028,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) return 0; } +int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog) +{ + int ufd = attr->target_fd; + struct bpf_map *map; + struct fd f; + int err; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = sock_map_prog(map, prog, attr->attach_type); + fdput(f); + return err; +} + static void *sock_map_lookup(struct bpf_map *map, void *key) { return NULL; @@ -1944,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map, return -EOPNOTSUPP; } + lock_sock(skops.sk); + preempt_disable(); + rcu_read_lock(); err = sock_map_ctx_update_elem(&skops, map, key, flags); + rcu_read_unlock(); + preempt_enable(); + release_sock(skops.sk); fput(socket->file); return err; } @@ -2043,14 +2180,13 @@ free_htab: return ERR_PTR(err); } -static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) +static void __bpf_htab_free(struct rcu_head *rcu) { - return &htab->buckets[hash & (htab->n_buckets - 1)]; -} + struct bpf_htab *htab; -static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) -{ - return &__select_bucket(htab, hash)->head; + htab = container_of(rcu, struct bpf_htab, rcu); + bpf_map_area_free(htab->buckets); + kfree(htab); } static void sock_hash_free(struct bpf_map *map) @@ -2069,16 +2205,18 @@ static void sock_hash_free(struct bpf_map *map) */ rcu_read_lock(); for (i = 0; i < htab->n_buckets; i++) { - struct hlist_head *head = select_bucket(htab, i); + struct bucket *b = __select_bucket(htab, i); + struct hlist_head *head; struct hlist_node *n; struct htab_elem *l; + raw_spin_lock_bh(&b->lock); + head = &b->head; hlist_for_each_entry_safe(l, n, head, hash_node) { struct sock *sock = l->sk; struct smap_psock *psock; hlist_del_rcu(&l->hash_node); - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get * the sk_callback_lock before this case but after xchg @@ -2086,16 +2224,15 @@ static void sock_hash_free(struct bpf_map *map) * (psock) to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, NULL, l); + smap_list_hash_remove(psock, l); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); - kfree(l); + free_htab_elem(htab, l); } + raw_spin_unlock_bh(&b->lock); } rcu_read_unlock(); - bpf_map_area_free(htab->buckets); - kfree(htab); + call_rcu(&htab->rcu, __bpf_htab_free); } static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, @@ -2122,19 +2259,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, return l_new; } -static struct htab_elem *lookup_elem_raw(struct hlist_head *head, - u32 hash, void *key, u32 key_size) -{ - struct htab_elem *l; - - hlist_for_each_entry_rcu(l, head, hash_node) { - if (l->hash == hash && !memcmp(&l->key, key, key_size)) - return l; - } - - return NULL; -} - static inline u32 htab_map_hash(const void *key, u32 key_len) { return jhash(key, key_len, 0); @@ -2230,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, if (err) goto err; - /* bpf_map_update_elem() can be called in_irq() */ + /* psock is valid here because otherwise above *ctx_update_elem would + * have thrown an error. It is safe to skip error check. + */ + psock = smap_psock_sk(sock); raw_spin_lock_bh(&b->lock); l_old = lookup_elem_raw(head, hash, key, key_size); if (l_old && map_flags == BPF_NOEXIST) { @@ -2248,15 +2375,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, goto bucket_err; } - psock = smap_psock_sk(sock); - if (unlikely(!psock)) { - err = -EINVAL; - goto bucket_err; - } - - e->hash_link = l_new; - e->htab = container_of(map, struct bpf_htab, map); + rcu_assign_pointer(e->hash_link, l_new); + rcu_assign_pointer(e->htab, + container_of(map, struct bpf_htab, map)); + spin_lock_bh(&psock->maps_lock); list_add_tail(&e->list, &psock->maps); + spin_unlock_bh(&psock->maps_lock); /* add new element to the head of the list, so that * concurrent search will find it before old elem @@ -2266,19 +2390,17 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, psock = smap_psock_sk(l_old->sk); hlist_del_rcu(&l_old->hash_node); - smap_list_remove(psock, NULL, l_old); + smap_list_hash_remove(psock, l_old); smap_release_sock(psock, l_old->sk); free_htab_elem(htab, l_old); } raw_spin_unlock_bh(&b->lock); return 0; bucket_err: + smap_release_sock(psock, sock); raw_spin_unlock_bh(&b->lock); err: kfree(e); - psock = smap_psock_sk(sock); - if (psock) - smap_release_sock(psock, sock); return err; } @@ -2300,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map, return -EINVAL; } + lock_sock(skops.sk); + preempt_disable(); + rcu_read_lock(); err = sock_hash_ctx_update_elem(&skops, map, key, flags); + rcu_read_unlock(); + preempt_enable(); + release_sock(skops.sk); fput(socket->file); return err; } @@ -2326,7 +2454,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) struct smap_psock *psock; hlist_del_rcu(&l->hash_node); - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get the * sk_callback_lock before this case but after xchg happens @@ -2334,10 +2461,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) * to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, NULL, l); + smap_list_hash_remove(psock, l); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); free_htab_elem(htab, l); ret = 0; } @@ -2359,10 +2485,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) b = __select_bucket(htab, hash); head = &b->head; - raw_spin_lock_bh(&b->lock); l = lookup_elem_raw(head, hash, key, key_size); sk = l ? l->sk : NULL; - raw_spin_unlock_bh(&b->lock); return sk; } @@ -2383,6 +2507,7 @@ const struct bpf_map_ops sock_hash_ops = { .map_get_next_key = sock_hash_get_next_key, .map_update_elem = sock_hash_update_elem, .map_delete_elem = sock_hash_delete_elem, + .map_release_uref = sock_map_release, }; BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 35dc466641f2..a31a1ba0f8ea 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr) if (bpf_map_is_dev_bound(map)) { err = bpf_map_offload_update_elem(map, key, value, attr->flags); goto out; - } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) { + } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || + map->map_type == BPF_MAP_TYPE_SOCKHASH || + map->map_type == BPF_MAP_TYPE_SOCKMAP) { err = map->ops->map_update_elem(map, key, value, attr->flags); goto out; } @@ -1483,8 +1485,6 @@ out_free_tp: return err; } -#ifdef CONFIG_CGROUP_BPF - static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, enum bpf_attach_type attach_type) { @@ -1499,40 +1499,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, #define BPF_PROG_ATTACH_LAST_FIELD attach_flags -static int sockmap_get_from_fd(const union bpf_attr *attr, - int type, bool attach) -{ - struct bpf_prog *prog = NULL; - int ufd = attr->target_fd; - struct bpf_map *map; - struct fd f; - int err; - - f = fdget(ufd); - map = __bpf_map_get(f); - if (IS_ERR(map)) - return PTR_ERR(map); - - if (attach) { - prog = bpf_prog_get_type(attr->attach_bpf_fd, type); - if (IS_ERR(prog)) { - fdput(f); - return PTR_ERR(prog); - } - } - - err = sock_map_prog(map, prog, attr->attach_type); - if (err) { - fdput(f); - if (prog) - bpf_prog_put(prog); - return err; - } - - fdput(f); - return 0; -} - #define BPF_F_ATTACH_MASK \ (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) @@ -1540,7 +1506,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) { enum bpf_prog_type ptype; struct bpf_prog *prog; - struct cgroup *cgrp; int ret; if (!capable(CAP_NET_ADMIN)) @@ -1577,12 +1542,15 @@ static int bpf_prog_attach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); + ptype = BPF_PROG_TYPE_SK_MSG; + break; case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); + ptype = BPF_PROG_TYPE_SK_SKB; + break; case BPF_LIRC_MODE2: - return lirc_prog_attach(attr); + ptype = BPF_PROG_TYPE_LIRC_MODE2; + break; default: return -EINVAL; } @@ -1596,18 +1564,20 @@ static int bpf_prog_attach(const union bpf_attr *attr) return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->target_fd); - if (IS_ERR(cgrp)) { - bpf_prog_put(prog); - return PTR_ERR(cgrp); + switch (ptype) { + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_SK_MSG: + ret = sockmap_get_from_fd(attr, ptype, prog); + break; + case BPF_PROG_TYPE_LIRC_MODE2: + ret = lirc_prog_attach(attr, prog); + break; + default: + ret = cgroup_bpf_prog_attach(attr, ptype, prog); } - ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, - attr->attach_flags); if (ret) bpf_prog_put(prog); - cgroup_put(cgrp); - return ret; } @@ -1616,9 +1586,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) static int bpf_prog_detach(const union bpf_attr *attr) { enum bpf_prog_type ptype; - struct bpf_prog *prog; - struct cgroup *cgrp; - int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1651,29 +1618,17 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); default: return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->target_fd); - if (IS_ERR(cgrp)) - return PTR_ERR(cgrp); - - prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); - if (IS_ERR(prog)) - prog = NULL; - - ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); - if (prog) - bpf_prog_put(prog); - cgroup_put(cgrp); - return ret; + return cgroup_bpf_prog_detach(attr, ptype); } #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt @@ -1681,9 +1636,6 @@ static int bpf_prog_detach(const union bpf_attr *attr) static int bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - struct cgroup *cgrp; - int ret; - if (!capable(CAP_NET_ADMIN)) return -EPERM; if (CHECK_ATTR(BPF_PROG_QUERY)) @@ -1711,14 +1663,9 @@ static int bpf_prog_query(const union bpf_attr *attr, default: return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->query.target_fd); - if (IS_ERR(cgrp)) - return PTR_ERR(cgrp); - ret = cgroup_bpf_query(cgrp, attr, uattr); - cgroup_put(cgrp); - return ret; + + return cgroup_bpf_prog_query(attr, uattr); } -#endif /* CONFIG_CGROUP_BPF */ #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration @@ -2365,7 +2312,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_OBJ_GET: err = bpf_obj_get(&attr); break; -#ifdef CONFIG_CGROUP_BPF case BPF_PROG_ATTACH: err = bpf_prog_attach(&attr); break; @@ -2375,7 +2321,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_PROG_QUERY: err = bpf_prog_query(&attr, uattr); break; -#endif case BPF_PROG_TEST_RUN: err = bpf_prog_test_run(&attr, uattr); break; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9e2bf834f13a..63aaac52a265 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; + /* Upon error here we cannot fall back to interpreter but + * need a hard reject of the program. Thus -EFAULT is + * propagated in any case. + */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", @@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) - return -ENOMEM; + goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; @@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); - err = -EFAULT; + err = -ENOTSUPP; goto out_free; } cond_resched(); @@ -5552,6 +5556,7 @@ out_free: if (func[i]) bpf_jit_free(func[i]); kfree(func); +out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { @@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env) err = jit_subprogs(env); if (err == 0) return 0; + if (err == -EFAULT) + return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 04b68d9dffac..904541055792 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = { .unmap_page = swiotlb_unmap_page, .dma_supported = dma_direct_supported, }; +EXPORT_SYMBOL(swiotlb_dma_ops); diff --git a/kernel/events/core.c b/kernel/events/core.c index 80cca2b30c4f..8f0434a9951a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header, data->phys_addr = perf_virt_to_phys(data->addr); } -static void __always_inline +static __always_inline void __perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs, diff --git a/kernel/fork.c b/kernel/fork.c index 9440d61b925c..a191c05e757d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -303,11 +303,38 @@ struct kmem_cache *files_cachep; struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ -struct kmem_cache *vm_area_cachep; +static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; +struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) +{ + struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + if (vma) { + vma->vm_mm = mm; + INIT_LIST_HEAD(&vma->anon_vma_chain); + } + return vma; +} + +struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) +{ + struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + + if (new) { + *new = *orig; + INIT_LIST_HEAD(&new->anon_vma_chain); + } + return new; +} + +void vm_area_free(struct vm_area_struct *vma) +{ + kmem_cache_free(vm_area_cachep, vma); +} + static void account_kernel_stack(struct task_struct *tsk, int account) { void *stack = task_stack_page(tsk); @@ -455,11 +482,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto fail_nomem; charge = len; } - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; - *tmp = *mpnt; - INIT_LIST_HEAD(&tmp->anon_vma_chain); retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; @@ -539,7 +564,7 @@ fail_uprobe_end: fail_nomem_anon_vma_fork: mpol_put(vma_policy(tmp)); fail_nomem_policy: - kmem_cache_free(vm_area_cachep, tmp); + vm_area_free(tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); diff --git a/kernel/kthread.c b/kernel/kthread.c index 481951bf091d..750cb8082694 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { for (;;) { - set_current_state(TASK_PARKED); + /* + * TASK_PARKED is a special state; we must serialize against + * possible pending wakeups to avoid store-store collisions on + * task->state. + * + * Such a collision might possibly result in the task state + * changin from TASK_PARKED and us failing the + * wait_task_inactive() in kthread_park(). + */ + set_special_state(TASK_PARKED); if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + + complete_all(&self->parked); schedule(); } __set_current_state(TASK_RUNNING); @@ -191,11 +202,6 @@ void kthread_parkme(void) } EXPORT_SYMBOL_GPL(kthread_parkme); -void kthread_park_complete(struct task_struct *k) -{ - complete_all(&to_kthread(k)->parked); -} - static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -461,6 +467,9 @@ void kthread_unpark(struct task_struct *k) reinit_completion(&kthread->parked); clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. + */ wake_up_state(k, TASK_PARKED); } EXPORT_SYMBOL_GPL(kthread_unpark); @@ -487,7 +496,16 @@ int kthread_park(struct task_struct *k) set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) { wake_up_process(k); + /* + * Wait for __kthread_parkme() to complete(), this means we + * _will_ have TASK_PARKED and are about to call schedule(). + */ wait_for_completion(&kthread->parked); + /* + * Now wait for that schedule() to complete and the task to + * get scheduled out. + */ + WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); } return 0; diff --git a/kernel/rseq.c b/kernel/rseq.c index 22b6acf1ad63..c6242d8594dc 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t) { u32 cpu_id = raw_smp_processor_id(); - if (__put_user(cpu_id, &t->rseq->cpu_id_start)) + if (put_user(cpu_id, &t->rseq->cpu_id_start)) return -EFAULT; - if (__put_user(cpu_id, &t->rseq->cpu_id)) + if (put_user(cpu_id, &t->rseq->cpu_id)) return -EFAULT; trace_rseq_update(t); return 0; @@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) /* * Reset cpu_id_start to its initial state (0). */ - if (__put_user(cpu_id_start, &t->rseq->cpu_id_start)) + if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) return -EFAULT; /* * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming * in after unregistration can figure out that rseq needs to be * registered again. */ - if (__put_user(cpu_id, &t->rseq->cpu_id)) + if (put_user(cpu_id, &t->rseq->cpu_id)) return -EFAULT; return 0; } @@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) { struct rseq_cs __user *urseq_cs; - unsigned long ptr; + u64 ptr; u32 __user *usig; u32 sig; int ret; - ret = __get_user(ptr, &t->rseq->rseq_cs); - if (ret) - return ret; + if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) + return -EFAULT; if (!ptr) { memset(rseq_cs, 0, sizeof(*rseq_cs)); return 0; } - urseq_cs = (struct rseq_cs __user *)ptr; + if (ptr >= TASK_SIZE) + return -EINVAL; + urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) return -EFAULT; - if (rseq_cs->version > 0) - return -EINVAL; + if (rseq_cs->start_ip >= TASK_SIZE || + rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE || + rseq_cs->abort_ip >= TASK_SIZE || + rseq_cs->version > 0) + return -EINVAL; + /* Check for overflow. */ + if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip) + return -EINVAL; /* Ensure that abort_ip is not in the critical section. */ if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) return -EINVAL; - usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32)); + usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); ret = get_user(sig, usig); if (ret) return ret; @@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) printk_ratelimited(KERN_WARNING "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", sig, current->rseq_sig, current->pid, usig); - return -EPERM; + return -EINVAL; } return 0; } @@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) int ret; /* Get thread flags. */ - ret = __get_user(flags, &t->rseq->flags); + ret = get_user(flags, &t->rseq->flags); if (ret) return ret; @@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t) * of code outside of the rseq assembly block. This performs * a lazy clear of the rseq_cs field. * - * Set rseq_cs to NULL with single-copy atomicity. + * Set rseq_cs to NULL. */ - return __put_user(0UL, &t->rseq->rseq_cs); + if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) + return -EFAULT; + return 0; } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 78d8facba456..fe365c9a08e9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7,7 +7,6 @@ */ #include "sched.h" -#include <linux/kthread.h> #include <linux/nospec.h> #include <linux/kcov.h> @@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev) membarrier_mm_sync_core_before_usermode(mm); mmdrop(mm); } - if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { - switch (prev_state) { - case TASK_DEAD: - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); - - /* Task is done with its stack. */ - put_task_stack(prev); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); - put_task_struct(prev); - break; + /* Task is done with its stack. */ + put_task_stack(prev); - case TASK_PARKED: - kthread_park_complete(prev); - break; - } + put_task_struct(prev); } tick_nohz_task_switch(); @@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work) struct tick_work *twork = container_of(dwork, struct tick_work, work); int cpu = twork->cpu; struct rq *rq = cpu_rq(cpu); + struct task_struct *curr; struct rq_flags rf; + u64 delta; /* * Handle the tick only if it appears the remote CPU is running in full @@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work) * statistics and checks timeslices in a time-independent way, regardless * of when exactly it is running. */ - if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) { - struct task_struct *curr; - u64 delta; + if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) + goto out_requeue; - rq_lock_irq(rq, &rf); - update_rq_clock(rq); - curr = rq->curr; - delta = rq_clock_task(rq) - curr->se.exec_start; + rq_lock_irq(rq, &rf); + curr = rq->curr; + if (is_idle_task(curr)) + goto out_unlock; - /* - * Make sure the next tick runs within a reasonable - * amount of time. - */ - WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); - curr->sched_class->task_tick(rq, curr, 0); - rq_unlock_irq(rq, &rf); - } + update_rq_clock(rq); + delta = rq_clock_task(rq) - curr->se.exec_start; + + /* + * Make sure the next tick runs within a reasonable + * amount of time. + */ + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + curr->sched_class->task_tick(rq, curr, 0); + +out_unlock: + rq_unlock_irq(rq, &rf); +out_requeue: /* * Run the remote tick once per second (1Hz). This arbitrary * frequency is large enough to avoid overload but short enough diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cde46483f0a..c907fde01eaa 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); - if (rq->rt.rt_nr_running) + if (rt_rq_is_runnable(&rq->rt)) return sg_cpu->max; /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fbfc3f1d368a..10c7b51c0d1f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2290,8 +2290,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (task_on_rq_queued(p) && p->dl.dl_runtime) task_non_contending(p); - if (!task_on_rq_queued(p)) + if (!task_on_rq_queued(p)) { + /* + * Inactive timer is armed. However, p is leaving DEADLINE and + * might migrate away from this rq while continuing to run on + * some other class. We need to remove its contribution from + * this rq running_bw now, or sub_rq_bw (below) will complain. + */ + if (p->dl.dl_non_contending) + sub_running_bw(&p->dl, &rq->dl); sub_rq_bw(&p->dl, &rq->dl); + } /* * We cannot use inactive_task_timer() to invoke sub_running_bw() diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1866e64792a7..2f0a0be4d344 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (!sched_feat(UTIL_EST)) return; - /* - * Update root cfs_rq's estimated utilization - * - * If *p is the last task then the root cfs_rq's estimated utilization - * of a CPU is 0 by definition. - */ - ue.enqueued = 0; - if (cfs_rq->nr_running) { - ue.enqueued = cfs_rq->avg.util_est.enqueued; - ue.enqueued -= min_t(unsigned int, ue.enqueued, - (_task_util_est(p) | UTIL_AVG_UNCHANGED)); - } + /* Update root cfs_rq's estimated utilization */ + ue.enqueued = cfs_rq->avg.util_est.enqueued; + ue.enqueued -= min_t(unsigned int, ue.enqueued, + (_task_util_est(p) | UTIL_AVG_UNCHANGED)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); /* @@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) now = sched_clock_cpu(smp_processor_id()); cfs_b->runtime = cfs_b->quota; cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); + cfs_b->expires_seq++; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount, expires; + int expires_seq; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; @@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_b->idle = 0; } } + expires_seq = cfs_b->expires_seq; expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); @@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) * spread between our sched_clock and the one on which runtime was * issued. */ - if ((s64)(expires - cfs_rq->runtime_expires) > 0) + if (cfs_rq->expires_seq != expires_seq) { + cfs_rq->expires_seq = expires_seq; cfs_rq->runtime_expires = expires; + } return cfs_rq->runtime_remaining > 0; } @@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) * has not truly expired. * * Fortunately we can check determine whether this the case by checking - * whether the global deadline has advanced. It is valid to compare - * cfs_b->runtime_expires without any locks since we only care about - * exact equality, so a partial write will still work. + * whether the global deadline(cfs_b->expires_seq) has advanced. */ - - if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { + if (cfs_rq->expires_seq == cfs_b->expires_seq) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; } else { @@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); - if (!cfs_b->period_active) { - cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); - hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); - } + if (cfs_b->period_active) + return; + + cfs_b->period_active = 1; + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); + cfs_b->expires_seq++; + hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 47556b0c9a95..572567078b60 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) rt_se = rt_rq->tg->rt_se[cpu]; - if (!rt_se) + if (!rt_se) { dequeue_top_rt_rq(rt_rq); + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); + } else if (on_rt_rq(rt_se)) dequeue_rt_entity(rt_se, 0); } @@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) sub_nr_running(rq, rt_rq->rt_nr_running); rt_rq->rt_queued = 0; - /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ - cpufreq_update_util(rq, 0); } static void @@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) if (rt_rq->rt_queued) return; - if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) + + if (rt_rq_throttled(rt_rq)) return; - add_nr_running(rq, rt_rq->rt_nr_running); - rt_rq->rt_queued = 1; + if (rt_rq->rt_nr_running) { + add_nr_running(rq, rt_rq->rt_nr_running); + rt_rq->rt_queued = 1; + } /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ cpufreq_update_util(rq, 0); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6601baf2361c..c7742dcc136c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -334,9 +334,10 @@ struct cfs_bandwidth { u64 runtime; s64 hierarchical_quota; u64 runtime_expires; + int expires_seq; - int idle; - int period_active; + short idle; + short period_active; struct hrtimer period_timer; struct hrtimer slack_timer; struct list_head throttled_cfs_rq; @@ -551,6 +552,7 @@ struct cfs_rq { #ifdef CONFIG_CFS_BANDWIDTH int runtime_enabled; + int expires_seq; u64 runtime_expires; s64 runtime_remaining; @@ -609,6 +611,11 @@ struct rt_rq { #endif }; +static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) +{ + return rt_rq->rt_queued && rt_rq->rt_nr_running; +} + /* Deadline class' related fields in a runqueue */ struct dl_rq { /* runqueue is an rbtree, ordered by deadline */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 900dcfee542c..75ffc1d1a2e0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -79,12 +79,16 @@ static void wakeup_softirqd(void) /* * If ksoftirqd is scheduled, we do not want to process pending softirqs - * right now. Let ksoftirqd handle this at its own rate, to get fairness. + * right now. Let ksoftirqd handle this at its own rate, to get fairness, + * unless we're doing some of the synchronous softirqs. */ -static bool ksoftirqd_running(void) +#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) +static bool ksoftirqd_running(unsigned long pending) { struct task_struct *tsk = __this_cpu_read(ksoftirqd); + if (pending & SOFTIRQ_NOW_MASK) + return false; return tsk && (tsk->state == TASK_RUNNING); } @@ -328,7 +332,7 @@ asmlinkage __visible void do_softirq(void) pending = local_softirq_pending(); - if (pending && !ksoftirqd_running()) + if (pending && !ksoftirqd_running(pending)) do_softirq_own_stack(); local_irq_restore(flags); @@ -355,7 +359,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { - if (ksoftirqd_running()) + if (ksoftirqd_running(local_softirq_pending())) return; if (!force_irqthreads) { diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index f89014a2c238..1ff523dae6e2 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -270,7 +270,11 @@ unlock: goto retry; } - wake_up_q(&wakeq); + if (!err) { + preempt_disable(); + wake_up_q(&wakeq); + preempt_enable(); + } return err; } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b7005dd21ec1..14de3727b18e 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, */ return !curdev || newdev->rating > curdev->rating || - (!cpumask_equal(curdev->cpumask, newdev->cpumask) && - !tick_check_percpu(curdev, newdev, smp_processor_id())); + !cpumask_equal(curdev->cpumask, newdev->cpumask); } /* diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index efed9c1cfb7e..caf9cbf35816 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, op->saved_func(ip, parent_ip, op, regs); } -/** - * clear_ftrace_function - reset the ftrace function - * - * This NULLs the ftrace function and in essence stops - * tracing. There may be lag - */ -void clear_ftrace_function(void) -{ - ftrace_trace_function = ftrace_stub; -} - static void ftrace_sync(struct work_struct *work) { /* @@ -6689,7 +6678,7 @@ void ftrace_kill(void) { ftrace_disabled = 1; ftrace_enabled = 0; - clear_ftrace_function(); + ftrace_trace_function = ftrace_stub; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a0079b4c7a49..87cf25171fb8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2953,6 +2953,7 @@ out_nobuffer: } EXPORT_SYMBOL_GPL(trace_vbprintk); +__printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) @@ -3007,12 +3008,14 @@ out_nobuffer: return len; } +__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } +__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -3028,6 +3031,7 @@ int trace_array_printk(struct trace_array *tr, return ret; } +__printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -3043,6 +3047,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer, return ret; } +__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); @@ -3360,8 +3365,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, print_event_info(buf, m); - seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); - seq_printf(m, "# | | | %s | |\n", tgid ? " | " : ""); + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, @@ -3381,9 +3386,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); - seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", + seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); - seq_printf(m, "# | | | %s|||| | |\n", + seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 630c5a24b2b2..f8f86231ad90 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit) static inline struct ring_buffer_iter * trace_buffer_iter(struct trace_iterator *iter, int cpu) { - if (iter->buffer_iter && iter->buffer_iter[cpu]) - return iter->buffer_iter[cpu]; - return NULL; + return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; } int tracer_init(struct tracer *t, struct trace_array *tr); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0dceb77d1d42..893a206bcba4 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1701,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe) * @filter_str: filter string * @set_str: remember @filter_str and enable detailed error in filter * @filterp: out param for created filter (always updated on return) + * Must be a pointer that references a NULL pointer. * * Creates a filter for @call with @filter_str. If @set_str is %true, * @filter_str is copied and recorded in the new filter. @@ -1718,6 +1719,10 @@ static int create_filter(struct trace_event_call *call, struct filter_parse_error *pe = NULL; int err; + /* filterp must point to NULL */ + if (WARN_ON(*filterp)) + *filterp = NULL; + err = create_filter_start(filter_string, set_str, &pe, filterp); if (err) return err; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 046c716a6536..aae18af94c94 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var) else if (system) snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); else - strncpy(err, var, MAX_FILTER_STR_VAL); + strscpy(err, var, MAX_FILTER_STR_VAL); hist_err(str, err); } diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 23c0b0cb5fb9..169b3c44ee97 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, struct ftrace_graph_ret *graph_ret; struct ftrace_graph_ent *call; unsigned long long duration; + int cpu = iter->cpu; int i; graph_ret = &ret_entry->ret; @@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, if (data) { struct fgraph_cpu_data *cpu_data; - int cpu = iter->cpu; cpu_data = per_cpu_ptr(data->cpu_data, cpu); @@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, trace_seq_printf(s, "%ps();\n", (void *)call->func); + print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, + cpu, iter->ent->pid, flags); + return trace_handle_return(s); } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index daa81571b22a..21f718472942 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1480,8 +1480,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, } ret = __register_trace_kprobe(tk); - if (ret < 0) + if (ret < 0) { + kfree(tk->tp.call.print_fmt); goto error; + } return &tk->tp.call; error: @@ -1501,6 +1503,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call) } __unregister_trace_kprobe(tk); + + kfree(tk->tp.call.print_fmt); free_trace_kprobe(tk); } #endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 90db994ac900..1c8e30fda46a 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%16s-%-5d [%03d] ", - comm, entry->pid, iter->cpu); + trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { unsigned int tgid = trace_find_tgid(entry->pid); @@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter) trace_seq_printf(s, "(%5d) ", tgid); } + trace_seq_printf(s, "[%03d] ", iter->cpu); + if (tr->trace_flags & TRACE_ITER_IRQ_INFO) trace_print_lat_fmt(s, entry); diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 3d35d062970d..c253c1b46c6b 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" depends on SLUB || (SLAB && !DEBUG_SLAB) + select SLUB_DEBUG if SLUB select CONSTRUCTORS select STACKDEPOT help diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 7e43cd54c84c..8be175df3075 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, return ret; } +static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, + struct iov_iter *i) +{ + struct pipe_inode_info *pipe = i->pipe; + size_t n, off, xfer = 0; + int idx; + + if (!sanity(i)) + return 0; + + bytes = n = push_pipe(i, bytes, &idx, &off); + if (unlikely(!n)) + return 0; + for ( ; n; idx = next_idx(idx, pipe), off = 0) { + size_t chunk = min_t(size_t, n, PAGE_SIZE - off); + unsigned long rem; + + rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr, + chunk); + i->idx = idx; + i->iov_offset = off + chunk - rem; + xfer += chunk - rem; + if (rem) + break; + n -= chunk; + addr += chunk; + } + i->count -= xfer; + return xfer; +} + +/** + * _copy_to_iter_mcsafe - copy to user with source-read error exception handling + * @addr: source kernel address + * @bytes: total transfer length + * @iter: destination iterator + * + * The pmem driver arranges for filesystem-dax to use this facility via + * dax_copy_to_iter() for protecting read/write to persistent memory. + * Unless / until an architecture can guarantee identical performance + * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a + * performance regression to switch more users to the mcsafe version. + * + * Otherwise, the main differences between this and typical _copy_to_iter(). + * + * * Typical tail/residue handling after a fault retries the copy + * byte-by-byte until the fault happens again. Re-triggering machine + * checks is potentially fatal so the implementation uses source + * alignment and poison alignment assumptions to avoid re-triggering + * hardware exceptions. + * + * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. + * Compare to copy_to_iter() where only ITER_IOVEC attempts might return + * a short copy. + * + * See MCSAFE_TEST for self-test. + */ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; - if (unlikely(i->type & ITER_PIPE)) { - WARN_ON(1); - return 0; - } + if (unlikely(i->type & ITER_PIPE)) + return copy_pipe_to_iter_mcsafe(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); iterate_and_advance(i, bytes, v, @@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +/** + * _copy_from_iter_flushcache - write destination through cpu cache + * @addr: destination kernel address + * @bytes: total transfer length + * @iter: source iterator + * + * The pmem driver arranges for filesystem-dax to use this facility via + * dax_copy_from_iter() for ensuring that writes to persistent memory + * are flushed through the CPU cache. It is differentiated from + * _copy_from_iter_nocache() in that guarantees all data is flushed for + * all iterator types. The _copy_from_iter_nocache() only attempts to + * bypass the cache for the ITER_IOVEC case, and on some archs may use + * instructions that strand dirty-data in the cache. + */ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9bbd9c5d375a..beb14839b41a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) spin_lock_irqsave(&tags->lock, flags); /* Fastpath */ - if (likely(tags->nr_free >= 0)) { + if (likely(tags->nr_free)) { tag = tags->freelist[--tags->nr_free]; spin_unlock_irqrestore(&tags->lock, flags); return tag; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9427b5766134..e5c8586cf717 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) skip++; if (list == iter->list) { iter->p = p; - skip = skip; + iter->skip = skip; goto found; } } @@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop); static size_t rounded_hashtable_size(const struct rhashtable_params *params) { - return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), - (unsigned long)params->min_size); + size_t retsize; + + if (params->nelem_hint) + retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + (unsigned long)params->min_size); + else + retsize = max(HASH_DEFAULT_SIZE, + (unsigned long)params->min_size); + + return retsize; } static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) @@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht, struct bucket_table *tbl; size_t size; - size = HASH_DEFAULT_SIZE; - if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; @@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); - if (params->nelem_hint) - size = rounded_hashtable_size(&ht->p); + size = rounded_hashtable_size(&ht->p); if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); @@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { - struct bucket_table *tbl; + struct bucket_table *tbl, *next_tbl; unsigned int i; cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); +restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; @@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, } } + next_tbl = rht_dereference(tbl->future_tbl, ht); bucket_table_free(tbl); + if (next_tbl) { + tbl = next_tbl; + goto restart; + } mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 1642fd507a96..7c6096a71704 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -24,9 +24,6 @@ **/ struct scatterlist *sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif if (sg_is_last(sg)) return NULL; @@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) for_each_sg(sgl, sg, nents, i) ret = sg; -#ifdef CONFIG_DEBUG_SG - BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); -#endif return ret; } EXPORT_SYMBOL(sg_last); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 60aedc879361..08d3d59dca17 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = { { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Ctx heavy transformations", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { }, { { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } }, .fill_helper = bpf_fill_maxinsns6, + .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Call heavy transformations", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, +#else CLASSIC | FLAG_NO_DATA, +#endif { }, { { 1, 0 }, { 10, 0 } }, .fill_helper = bpf_fill_maxinsns7, + .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Jump heavy test", @@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: exec all MSH", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { 0xfa, 0xfb, 0xfc, 0xfd, }, { { 4, 0xababab83 } }, .fill_helper = bpf_fill_maxinsns13, + .expected_errcode = -ENOTSUPP, }, { "BPF_MAXINSNS: ld_abs+get_processor_id", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { }, { { 1, 0xbee } }, .fill_helper = bpf_fill_ld_abs_get_processor_id, + .expected_errcode = -ENOTSUPP, }, /* * LD_IND / LD_ABS on fragmented SKBs diff --git a/lib/test_printf.c b/lib/test_printf.c index b2aa8f514844..cea592f402ed 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -260,13 +260,6 @@ plain(void) { int err; - /* - * Make sure crng is ready. Otherwise we get "(ptrval)" instead - * of a hashed address when printing '%p' in plain_hash() and - * plain_format(). - */ - wait_for_random_bytes(); - err = plain_hash(); if (err) { pr_warn("plain 'p' does not appear to be hashed\n"); diff --git a/mm/debug.c b/mm/debug.c index 56e2d9125ea5..38c926520c97 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = { void __dump_page(struct page *page, const char *reason) { + bool page_poisoned = PagePoisoned(page); + int mapcount; + + /* + * If struct page is poisoned don't access Page*() functions as that + * leads to recursive loop. Page*() check for poisoned pages, and calls + * dump_page() when detected. + */ + if (page_poisoned) { + pr_emerg("page:%px is uninitialized and poisoned", page); + goto hex_only; + } + /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ - int mapcount = PageSlab(page) ? 0 : page_mapcount(page); + mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, @@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason) pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); +hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); @@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG - if (page->mem_cgroup) + if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } @@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) int locked = 0; long ret = 0; - VM_BUG_ON(start & ~PAGE_MASK); - VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; for (nstart = start; nstart < end; nstart = nend) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1cd7c1a57a14..25346bd99364 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, if (vma_is_dax(vma)) return; page = pmd_page(_pmd); + if (!PageDirty(page) && pmd_dirty(_pmd)) + set_page_dirty(page); if (!PageReferenced(page) && pmd_young(_pmd)) SetPageReferenced(page); page_remove_rmap(page, true); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3612fbb32e9d..039ddbc574e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void) */ if (hstate_is_gigantic(h)) adjust_managed_page_count(page, 1 << h->order); + cond_resched(); } } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index f185455b3406..c3bd5209da38 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip) int kasan_module_alloc(void *addr, size_t size) { void *ret; + size_t scaled_size; size_t shadow_size; unsigned long shadow_start; shadow_start = (unsigned long)kasan_mem_to_shadow(addr); - shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, - PAGE_SIZE); + scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; + shadow_size = round_up(scaled_size, PAGE_SIZE); if (WARN_ON(!PAGE_ALIGNED(shadow_start))) return -EINVAL; diff --git a/mm/memblock.c b/mm/memblock.c index 03d48d8835ba..4b5d245fafc1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -20,6 +20,7 @@ #include <linux/kmemleak.h> #include <linux/seq_file.h> #include <linux/memblock.h> +#include <linux/bootmem.h> #include <asm/sections.h> #include <linux/io.h> @@ -227,7 +228,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, * so we use WARN_ONCE() here to see the stack trace if * fail happens. */ - WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); + WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), + "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); } return __memblock_find_range_top_down(start, end, size, align, nid, @@ -1224,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } +#if defined(CONFIG_NO_BOOTMEM) /** * memblock_virt_alloc_internal - allocate boot memory block * @size: size of memory block to be allocated in bytes @@ -1431,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid( (u64)max_addr); return NULL; } +#endif /** * __memblock_free_early - free boot memory block diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e6f0d5ef320a..8c0280b3143e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) int nid; int i; - while ((memcg = parent_mem_cgroup(memcg))) { + for (; memcg; memcg = parent_mem_cgroup(memcg)) { for_each_node(nid) { mz = mem_cgroup_nodeinfo(memcg, nid); for (i = 0; i <= DEF_PRIORITY; i++) { diff --git a/mm/mmap.c b/mm/mmap.c index d1eb87ef4b1a..ff1944d8d458 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,12 +182,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return next; } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); - +static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, + struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; @@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto out; /* Ok, looks good - let it rip. */ - if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) + if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; set_brk: @@ -911,7 +911,7 @@ again: anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); - kmem_cache_free(vm_area_cachep, next); + vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter @@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } - vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; - INIT_LIST_HEAD(&vma->anon_vma_chain); if (file) { if (vm_flags & VM_DENYWRITE) { @@ -1832,7 +1830,7 @@ allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); @@ -2620,15 +2618,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return err; } - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + new = vm_area_dup(vma); if (!new) return -ENOMEM; - /* most fields are the same, copy all, and then fixup */ - *new = *vma; - - INIT_LIST_HEAD(&new->anon_vma_chain); - if (new_below) new->vm_end = addr; else { @@ -2669,7 +2662,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: - kmem_cache_free(vm_area_cachep, new); + vm_area_free(new); return err; } @@ -2929,21 +2922,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) +static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; - unsigned long len; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; - len = PAGE_ALIGN(request); - if (len < request) - return -ENOMEM; - if (!len) - return 0; - /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; @@ -2991,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long /* * create a vma struct for an anonymous mapping */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(mm); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; @@ -3015,18 +2999,20 @@ out: return 0; } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) -{ - return do_brk_flags(addr, len, 0, uf); -} - -int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) +int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; + unsigned long len; int ret; bool populate; LIST_HEAD(uf); + len = PAGE_ALIGN(request); + if (len < request) + return -ENOMEM; + if (!len) + return 0; + if (down_write_killable(&mm->mmap_sem)) return -EINTR; @@ -3207,16 +3193,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { - new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + new_vma = vm_area_dup(vma); if (!new_vma) goto out; - *new_vma = *vma; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; - INIT_LIST_HEAD(&new_vma->anon_vma_chain); if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) @@ -3231,7 +3215,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: - kmem_cache_free(vm_area_cachep, new_vma); + vm_area_free(new_vma); out: return NULL; } @@ -3355,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping( int ret; struct vm_area_struct *vma; - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->anon_vma_chain); - vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; @@ -3381,7 +3363,7 @@ static struct vm_area_struct *__install_special_mapping( return vma; out: - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return ERR_PTR(ret); } diff --git a/mm/nommu.c b/mm/nommu.c index 4452d8bd9ae4..1d22fdbf7d7c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) if (vma->vm_file) fput(vma->vm_file); put_nommu_region(vma->vm_region); - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); } /* @@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file, if (!region) goto error_getting_region; - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = vm_area_alloc(current->mm); if (!vma) goto error_getting_vma; @@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file, region->vm_flags = vm_flags; region->vm_pgoff = pgoff; - INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_flags = vm_flags; vma->vm_pgoff = pgoff; @@ -1368,7 +1367,7 @@ error: kmem_cache_free(vm_region_jar, region); if (vma->vm_file) fput(vma->vm_file); - kmem_cache_free(vm_area_cachep, vma); + vm_area_free(vma); return ret; sharing_violation: @@ -1469,14 +1468,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (!region) return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + new = vm_area_dup(vma); if (!new) { kmem_cache_free(vm_region_jar, region); return -ENOMEM; } /* most fields are the same, copy all, and then fixup */ - *new = *vma; *region = *vma->vm_region; new->vm_region = region; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1521100f1e63..a790ef4be74e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, free_area_init_core(pgdat); } -#ifdef CONFIG_HAVE_MEMBLOCK +#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) /* * Only struct pages that are backed by physical memory are zeroed and * initialized by going through __init_single_page(). But, there are some @@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void) if (pgcnt) pr_info("Reserved but unavailable: %lld pages", pgcnt); } -#endif /* CONFIG_HAVE_MEMBLOCK */ +#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP @@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); + zero_resv_unavail(); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, NULL, @@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) node_set_state(nid, N_MEMORY); check_for_memory(pgdat, nid); } - zero_resv_unavail(); } static int __init cmdline_parse_core(char *p, unsigned long *core, @@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init free_area_init(unsigned long *zones_size) { + zero_resv_unavail(); free_area_init_node(0, zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); - zero_resv_unavail(); } static int page_alloc_cpu_dead(unsigned int cpu) diff --git a/mm/rmap.c b/mm/rmap.c index 6db729dc4c50..eb477809a5c0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -64,6 +64,7 @@ #include <linux/backing-dev.h> #include <linux/page_idle.h> #include <linux/memremap.h> +#include <linux/userfaultfd_k.h> #include <asm/tlbflush.h> @@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, set_pte_at(mm, address, pvmw.pte, pteval); } - } else if (pte_unused(pteval)) { + } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. + * A future reference will then fault in a new zero + * page. When userfaultfd is active, we must not drop + * this page though, as its main user (postcopy + * migration) will not expect userfaults on already + * copied pages. */ dec_mm_counter(mm, mm_counter(page)); /* We have to invalidate as we cleared the pte */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 890b1f04a03a..2296caf87bfb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s) list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { +#ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); +#endif list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { #ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); sysfs_slab_release(s); #else slab_kmem_cache_release(s); diff --git a/mm/slub.c b/mm/slub.c index a3b8467c14af..51258eff4178 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work) kset_unregister(s->memcg_kset); #endif kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); out: kobject_put(&s->kobj); } @@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s) schedule_work(&s->kobj_remove_work); } +void sysfs_slab_unlink(struct kmem_cache *s) +{ + if (slab_state >= FULL) + kobject_del(&s->kobj); +} + void sysfs_slab_release(struct kmem_cache *s) { if (slab_state >= FULL) diff --git a/mm/vmstat.c b/mm/vmstat.c index 75eda9c2b260..8ba0870ecddd 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ - preempt_disable(); queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - preempt_enable(); } } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 73a65789271b..8ccee3d01822 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/9p/client.c b/net/9p/client.c index 18c5271910dc..5c1343195292 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt) } free_and_return: - v9fs_put_trans(clnt->trans_mod); + if (ret) + v9fs_put_trans(clnt->trans_mod); kfree(tmp_options); return ret; } diff --git a/net/Makefile b/net/Makefile index 13ec0d5415c7..bdaf53925acd 100644 --- a/net/Makefile +++ b/net/Makefile @@ -20,11 +20,7 @@ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX) += unix/ obj-$(CONFIG_NET) += ipv6/ -ifneq ($(CC_CAN_LINK),y) -$(warning CC cannot link executables. Skipping bpfilter.) -else obj-$(CONFIG_BPFILTER) += bpfilter/ -endif obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 55fdba05d7d9..9b6bc5abe946 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = atalk_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = atalk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = atalk_compat_ioctl, diff --git a/net/atm/common.c b/net/atm/common.c index ff5748b2190f..a7a68e509628 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -647,11 +647,16 @@ out: return error; } -__poll_t vcc_poll_mask(struct socket *sock, __poll_t events) +__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; - struct atm_vcc *vcc = ATM_SD(sock); - __poll_t mask = 0; + struct atm_vcc *vcc; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; + + vcc = ATM_SD(sock); /* exceptional events */ if (sk->sk_err) diff --git a/net/atm/common.h b/net/atm/common.h index 526796ad230f..5850649068bb 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci); int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len); -__poll_t vcc_poll_mask(struct socket *sock, __poll_t events); +__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait); int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_setsockopt(struct socket *sock, int level, int optname, diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 9f75092fe778..2cb10af16afc 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pvc_getname, - .poll_mask = vcc_poll_mask, + .poll = vcc_poll, .ioctl = vcc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vcc_compat_ioctl, diff --git a/net/atm/svc.c b/net/atm/svc.c index 53f4ad7087b1..2f91b766ac42 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = { .socketpair = sock_no_socketpair, .accept = svc_accept, .getname = svc_getname, - .poll_mask = vcc_poll_mask, + .poll = vcc_poll, .ioctl = svc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = svc_compat_ioctl, diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index d1d2442ce573..c603d33d5410 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = { .socketpair = sock_no_socketpair, .accept = ax25_accept, .getname = ax25_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ax25_ioctl, .listen = ax25_listen, .shutdown = ax25_shutdown, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index be09a9883825..73bf6a93a3cf 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; - struct batadv_gw_node *curr_gw; + struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; @@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, ret = 0; out: + if (curr_gw) + batadv_gw_node_put(curr_gw); if (router_ifinfo) batadv_neigh_ifinfo_put(router_ifinfo); if (router) diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index ec93337ee259..6baec4e68898 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; - struct batadv_gw_node *curr_gw; + struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; @@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, ret = 0; out: + if (curr_gw) + batadv_gw_node_put(curr_gw); if (router_ifinfo) batadv_neigh_ifinfo_put(router_ifinfo); if (router) diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 4229b01ac7b5..87479c60670e 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -19,6 +19,7 @@ #include "debugfs.h" #include "main.h" +#include <linux/dcache.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/errno.h> @@ -344,6 +345,25 @@ out: } /** + * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif + * @hard_iface: hard interface which was renamed + */ +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface) +{ + const char *name = hard_iface->net_dev->name; + struct dentry *dir; + struct dentry *d; + + dir = hard_iface->debug_dir; + if (!dir) + return; + + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); + if (!d) + pr_err("Can't rename debugfs dir to %s\n", name); +} + +/** * batadv_debugfs_del_hardif() - delete the base directory for a hard interface * in debugfs. * @hard_iface: hard interface which is deleted. @@ -414,6 +434,26 @@ out: } /** + * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif + * @dev: net_device which was renamed + */ +void batadv_debugfs_rename_meshif(struct net_device *dev) +{ + struct batadv_priv *bat_priv = netdev_priv(dev); + const char *name = dev->name; + struct dentry *dir; + struct dentry *d; + + dir = bat_priv->debug_dir; + if (!dir) + return; + + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); + if (!d) + pr_err("Can't rename debugfs dir to %s\n", name); +} + +/** * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries * @dev: netdev struct of the soft interface */ diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 37b069698b04..08a592ffbee5 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -30,8 +30,10 @@ struct net_device; void batadv_debugfs_init(void); void batadv_debugfs_destroy(void); int batadv_debugfs_add_meshif(struct net_device *dev); +void batadv_debugfs_rename_meshif(struct net_device *dev); void batadv_debugfs_del_meshif(struct net_device *dev); int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface); void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); #else @@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev) return 0; } +static inline void batadv_debugfs_rename_meshif(struct net_device *dev) +{ +} + static inline void batadv_debugfs_del_meshif(struct net_device *dev) { } @@ -60,6 +66,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) } static inline +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface) +{ +} + +static inline void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) { } diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index c405d15befd6..2f0d42f2f913 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void) rtnl_unlock(); } +/** + * batadv_hard_if_event_softif() - Handle events for soft interfaces + * @event: NETDEV_* event to handle + * @net_dev: net_device which generated an event + * + * Return: NOTIFY_* result + */ +static int batadv_hard_if_event_softif(unsigned long event, + struct net_device *net_dev) +{ + struct batadv_priv *bat_priv; + + switch (event) { + case NETDEV_REGISTER: + batadv_sysfs_add_meshif(net_dev); + bat_priv = netdev_priv(net_dev); + batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS); + break; + case NETDEV_CHANGENAME: + batadv_debugfs_rename_meshif(net_dev); + break; + } + + return NOTIFY_DONE; +} + static int batadv_hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this, struct batadv_hard_iface *primary_if = NULL; struct batadv_priv *bat_priv; - if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) { - batadv_sysfs_add_meshif(net_dev); - bat_priv = netdev_priv(net_dev); - batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS); - return NOTIFY_DONE; - } + if (batadv_softif_is_valid(net_dev)) + return batadv_hard_if_event_softif(event, net_dev); hard_iface = batadv_hardif_get_by_netdev(net_dev); if (!hard_iface && (event == NETDEV_REGISTER || @@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this, if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; break; + case NETDEV_CHANGENAME: + batadv_debugfs_rename_hardif(hard_iface); + break; default: break; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 3986551397ca..12a2b7d21376 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, ether_addr_copy(common->addr, tt_addr); common->vid = vid; - common->flags = flags; + if (!is_multicast_ether_addr(common->addr)) + common->flags = flags & (~BATADV_TT_SYNC_MASK); + tt_global_entry->roam_at = 0; /* node must store current time in case of roaming. This is * needed to purge this entry out on timeout (if nobody claims @@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, * TT_CLIENT_TEMP, therefore they have to be copied in the * client entry */ - common->flags |= flags & (~BATADV_TT_SYNC_MASK); + if (!is_multicast_ether_addr(common->addr)) + common->flags |= flags & (~BATADV_TT_SYNC_MASK); /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 510ab4f55df5..3264e1873219 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent) return 0; } -__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events) +__poll_t bt_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; BT_DBG("sock %p, sk %p", sock, sk); + poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); @@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(bt_sock_poll_mask); +EXPORT_SYMBOL(bt_sock_poll); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d6c099861538..1506e1632394 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = { .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 742a190034e6..686bdc6b35b0 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = { .getname = l2cap_sock_getname, .sendmsg = l2cap_sock_sendmsg, .recvmsg = l2cap_sock_recvmsg, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 1cf57622473a..d606e9212291 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = { .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap }; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index d60dbc61d170..413b8ee49fec 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = { .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 68c3578343b4..22a78eedf4b1 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, u32 size = kattr->test.data_size_in; u32 repeat = kattr->test.repeat; u32 retval, duration; + int hh_len = ETH_HLEN; struct sk_buff *skb; void *data; int ret; @@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, skb_reset_network_header(skb); if (is_l2) - __skb_push(skb, ETH_HLEN); + __skb_push(skb, hh_len); if (is_direct_pkt_access) bpf_compute_data_pointers(skb); retval = bpf_test_run(prog, skb, repeat, &duration); - if (!is_l2) - __skb_push(skb, ETH_HLEN); + if (!is_l2) { + if (skb_headroom(skb) < hh_len) { + int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); + + if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { + kfree_skb(skb); + return -ENOMEM; + } + } + memset(__skb_push(skb, hh_len), 0, hh_len); + } + size = skb->len; /* bpf program can never convert linear skb to non-linear */ if (WARN_ON_ONCE(skb_is_nonlinear(skb))) diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index a948b072c28f..76deb6615883 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig @@ -1,6 +1,5 @@ menuconfig BPFILTER bool "BPF based packet filtering framework (BPFILTER)" - default n depends on NET && BPF && INET help This builds experimental bpfilter framework that is aiming to @@ -9,6 +8,7 @@ menuconfig BPFILTER if BPFILTER config BPFILTER_UMH tristate "bpfilter kernel module with user mode helper" + depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC)) default m help This builds bpfilter kernel module with embedded user mode helper diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index 051dc18b8ccb..39c6980b5d99 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile @@ -15,20 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y) HOSTLDFLAGS += -static endif -# a bit of elf magic to convert bpfilter_umh binary into a binary blob -# inside bpfilter_umh.o elf file referenced by -# _binary_net_bpfilter_bpfilter_umh_start symbol -# which bpfilter_kern.c passes further into umh blob loader at run-time -quiet_cmd_copy_umh = GEN $@ - cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \ - $(OBJCOPY) -I binary \ - `LC_ALL=C $(OBJDUMP) -f net/bpfilter/bpfilter_umh \ - |awk -F' |,' '/file format/{print "-O",$$NF} \ - /^architecture:/{print "-B",$$2}'` \ - --rename-section .data=.init.rodata $< $@ - -$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh - $(call cmd,copy_umh) +$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o -bpfilter-objs += bpfilter_kern.o bpfilter_umh.o +bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 09522573f611..f0fc182d3db7 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -10,11 +10,8 @@ #include <linux/file.h> #include "msgfmt.h" -#define UMH_start _binary_net_bpfilter_bpfilter_umh_start -#define UMH_end _binary_net_bpfilter_bpfilter_umh_end - -extern char UMH_start; -extern char UMH_end; +extern char bpfilter_umh_start; +extern char bpfilter_umh_end; static struct umh_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ @@ -93,7 +90,9 @@ static int __init load_umh(void) int err; /* fork usermode process */ - err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info); + err = fork_usermode_blob(&bpfilter_umh_start, + &bpfilter_umh_end - &bpfilter_umh_start, + &info); if (err) return err; pr_info("Loaded bpfilter_umh pid %d\n", info.pid); diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S new file mode 100644 index 000000000000..40311d10d2f2 --- /dev/null +++ b/net/bpfilter/bpfilter_umh_blob.S @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .section .init.rodata, "a" + .global bpfilter_umh_start +bpfilter_umh_start: + .incbin "net/bpfilter/bpfilter_umh" + .global bpfilter_umh_end +bpfilter_umh_end: diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index c7991867d622..a6fb1b3bcad9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -934,11 +934,15 @@ static int caif_release(struct socket *sock) } /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ -static __poll_t caif_poll_mask(struct socket *sock, __poll_t events) +static __poll_t caif_poll(struct file *file, + struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; + __poll_t mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - __poll_t mask = 0; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err) @@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = caif_poll_mask, + .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = caif_poll_mask, + .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/can/bcm.c b/net/can/bcm.c index 9393f25df08d..0af8f0db892a 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/can/raw.c b/net/can/raw.c index fd7e2f49ea6a..1051eee82581 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = raw_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/core/datagram.c b/net/core/datagram.c index f19bf3dc2bd6..9938952c5c78 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); /** * datagram_poll - generic datagram poll + * @file: file struct * @sock: socket - * @events to wait for + * @wait: poll table * * Datagram poll: Again totally generic. This also handles * sequenced packet sockets providing the socket receive queue @@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); * and you use a different write policy from sock_writeable() * then please supply your own write_space callback. */ -__poll_t datagram_poll_mask(struct socket *sock, __poll_t events) +__poll_t datagram_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) @@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(datagram_poll_mask); +EXPORT_SYMBOL(datagram_poll); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index a04e1e88bf3a..50537ff961a7 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) if (ifr->ifr_qlen < 0) return -EINVAL; if (dev->tx_queue_len ^ ifr->ifr_qlen) { - unsigned int orig_len = dev->tx_queue_len; - - dev->tx_queue_len = ifr->ifr_qlen; - err = call_netdevice_notifiers( - NETDEV_CHANGE_TX_QUEUE_LEN, dev); - err = notifier_to_errno(err); - if (err) { - dev->tx_queue_len = orig_len; + err = dev_change_tx_queue_len(dev, ifr->ifr_qlen); + if (err) return err; - } } return 0; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 126ffc5bc630..f64aa13811ea 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, if (rule->mark && r->mark != rule->mark) continue; + if (rule->suppress_ifgroup != -1 && + r->suppress_ifgroup != rule->suppress_ifgroup) + continue; + + if (rule->suppress_prefixlen != -1 && + r->suppress_prefixlen != rule->suppress_prefixlen) + continue; + if (rule->mark_mask && r->mark_mask != rule->mark_mask) continue; @@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, if (rule->ip_proto && r->ip_proto != rule->ip_proto) continue; + if (rule->proto && r->proto != rule->proto) + continue; + if (fib_rule_port_range_set(&rule->sport_range) && !fib_rule_port_range_compare(&r->sport_range, &rule->sport_range)) @@ -645,6 +656,73 @@ errout: return err; } +static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, + struct nlattr **tb, struct fib_rule *rule) +{ + struct fib_rule *r; + + list_for_each_entry(r, &ops->rules_list, list) { + if (r->action != rule->action) + continue; + + if (r->table != rule->table) + continue; + + if (r->pref != rule->pref) + continue; + + if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) + continue; + + if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) + continue; + + if (r->mark != rule->mark) + continue; + + if (r->suppress_ifgroup != rule->suppress_ifgroup) + continue; + + if (r->suppress_prefixlen != rule->suppress_prefixlen) + continue; + + if (r->mark_mask != rule->mark_mask) + continue; + + if (r->tun_id != rule->tun_id) + continue; + + if (r->fr_net != rule->fr_net) + continue; + + if (r->l3mdev != rule->l3mdev) + continue; + + if (!uid_eq(r->uid_range.start, rule->uid_range.start) || + !uid_eq(r->uid_range.end, rule->uid_range.end)) + continue; + + if (r->ip_proto != rule->ip_proto) + continue; + + if (r->proto != rule->proto) + continue; + + if (!fib_rule_port_range_compare(&r->sport_range, + &rule->sport_range)) + continue; + + if (!fib_rule_port_range_compare(&r->dport_range, + &rule->dport_range)) + continue; + + if (!ops->compare(r, frh, tb)) + continue; + return 1; + } + return 0; +} + int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { @@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; if ((nlh->nlmsg_flags & NLM_F_EXCL) && - rule_find(ops, frh, tb, rule, user_priority)) { + rule_exists(ops, frh, tb, rule)) { err = -EEXIST; goto errout_free; } diff --git a/net/core/filter.c b/net/core/filter.c index e7f12e9f598c..06da770f543f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) (!unaligned_ok && offset >= 0 && offset + ip_align >= 0 && offset + ip_align % size == 0))) { + bool ldx_off_ok = offset <= S16_MAX; + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); - *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian); - *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, - offset); + *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, + size, 2 + endian + (!ldx_off_ok * 2)); + if (ldx_off_ok) { + *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, + BPF_REG_D, offset); + } else { + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); + *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, + BPF_REG_TMP, 0); + } if (endian) *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); *insn++ = BPF_JMP_A(8); @@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = { .arg2_type = ARG_ANYTHING, }; +static inline int sk_skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + int err = __bpf_try_make_writable(skb, write_len); + + bpf_compute_data_end_sk_skb(skb); + return err; +} + +BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) +{ + /* Idea is the following: should the needed direct read/write + * test fail during runtime, we can pull in more data and redo + * again, since implicitly, we invalidate previous checks here. + * + * Or, since we know how much we need to make read/writeable, + * this can be done once at the program beginning for direct + * access case. By this we overcome limitations of only current + * headroom being accessible. + */ + return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); +} + +static const struct bpf_func_proto sk_skb_pull_data_proto = { + .func = sk_skb_pull_data, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +}; + BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { @@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) static u32 __bpf_skb_max_len(const struct sk_buff *skb) { - return skb->dev->mtu + skb->dev->hard_header_len; + return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : + SKB_MAX_ALLOC; } static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) @@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) return __skb_trim_rcsum(skb, new_len); } -BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, - u64, flags) +static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, + u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 min_len = __bpf_skb_min_len(skb); @@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, if (!ret && skb_is_gso(skb)) skb_gso_reset(skb); } + return ret; +} + +BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, + u64, flags) +{ + int ret = __bpf_skb_change_tail(skb, new_len, flags); bpf_compute_data_pointers(skb); return ret; @@ -2914,9 +2963,27 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, +BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { + int ret = __bpf_skb_change_tail(skb, new_len, flags); + + bpf_compute_data_end_sk_skb(skb); + return ret; +} + +static const struct bpf_func_proto sk_skb_change_tail_proto = { + .func = sk_skb_change_tail, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + +static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, + u64 flags) +{ u32 max_len = __bpf_skb_max_len(skb); u32 new_len = skb->len + head_room; int ret; @@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, skb_reset_mac_header(skb); } + return ret; +} + +BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, + u64, flags) +{ + int ret = __bpf_skb_change_head(skb, head_room, flags); + bpf_compute_data_pointers(skb); - return 0; + return ret; } static const struct bpf_func_proto bpf_skb_change_head_proto = { @@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, + u64, flags) +{ + int ret = __bpf_skb_change_head(skb, head_room, flags); + + bpf_compute_data_end_sk_skb(skb); + return ret; +} + +static const struct bpf_func_proto sk_skb_change_head_proto = { + .func = sk_skb_change_head, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : @@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev, u32 index) { struct xdp_frame *xdpf; - int sent; + int err, sent; if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; } + err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); + if (unlikely(err)) + return err; + xdpf = convert_to_xdp_frame(xdp); if (unlikely(!xdpf)) return -EOVERFLOW; @@ -3285,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, goto err; } - if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) + err = xdp_ok_fwd_dev(fwd, skb->len); + if (unlikely(err)) goto err; skb->dev = fwd; @@ -4073,8 +4170,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; + params->ifindex = dev->ifindex; - return dev->ifindex; + return 0; } #endif @@ -4098,7 +4196,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, /* verify forwarding is enabled on this interface */ in_dev = __in_dev_get_rcu(dev); if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) - return 0; + return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl4.flowi4_iif = 1; @@ -4123,7 +4221,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, tb = fib_get_table(net, tbid); if (unlikely(!tb)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); } else { @@ -4135,8 +4233,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); } - if (err || res.type != RTN_UNICAST) - return 0; + if (err) { + /* map fib lookup errors to RTN_ type */ + if (err == -EINVAL) + return BPF_FIB_LKUP_RET_BLACKHOLE; + if (err == -EHOSTUNREACH) + return BPF_FIB_LKUP_RET_UNREACHABLE; + if (err == -EACCES) + return BPF_FIB_LKUP_RET_PROHIBIT; + + return BPF_FIB_LKUP_RET_NOT_FWDED; + } + + if (res.type != RTN_UNICAST) + return BPF_FIB_LKUP_RET_NOT_FWDED; if (res.fi->fib_nhs > 1) fib_select_path(net, &res, &fl4, NULL); @@ -4144,19 +4254,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (check_mtu) { mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); if (params->tot_len > mtu) - return 0; + return BPF_FIB_LKUP_RET_FRAG_NEEDED; } nh = &res.fi->fib_nh[res.nh_sel]; /* do not handle lwt encaps right now */ if (nh->nh_lwtstate) - return 0; + return BPF_FIB_LKUP_RET_UNSUPP_LWT; dev = nh->nh_dev; - if (unlikely(!dev)) - return 0; - if (nh->nh_gw) params->ipv4_dst = nh->nh_gw; @@ -4166,10 +4273,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, * rcu_read_lock_bh is not needed here */ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); - if (neigh) - return bpf_fib_set_fwd_params(params, neigh, dev); + if (!neigh) + return BPF_FIB_LKUP_RET_NO_NEIGH; - return 0; + return bpf_fib_set_fwd_params(params, neigh, dev); } #endif @@ -4190,7 +4297,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, /* link local addresses are never forwarded */ if (rt6_need_strict(dst) || rt6_need_strict(src)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; dev = dev_get_by_index_rcu(net, params->ifindex); if (unlikely(!dev)) @@ -4198,7 +4305,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, idev = __in6_dev_get_safely(dev); if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) - return 0; + return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl6.flowi6_iif = 1; @@ -4225,7 +4332,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); } else { @@ -4238,11 +4345,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, } if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; + + if (unlikely(f6i->fib6_flags & RTF_REJECT)) { + switch (f6i->fib6_type) { + case RTN_BLACKHOLE: + return BPF_FIB_LKUP_RET_BLACKHOLE; + case RTN_UNREACHABLE: + return BPF_FIB_LKUP_RET_UNREACHABLE; + case RTN_PROHIBIT: + return BPF_FIB_LKUP_RET_PROHIBIT; + default: + return BPF_FIB_LKUP_RET_NOT_FWDED; + } + } - if (unlikely(f6i->fib6_flags & RTF_REJECT || - f6i->fib6_type != RTN_UNICAST)) - return 0; + if (f6i->fib6_type != RTN_UNICAST) + return BPF_FIB_LKUP_RET_NOT_FWDED; if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, @@ -4252,11 +4371,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (check_mtu) { mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); if (params->tot_len > mtu) - return 0; + return BPF_FIB_LKUP_RET_FRAG_NEEDED; } if (f6i->fib6_nh.nh_lwtstate) - return 0; + return BPF_FIB_LKUP_RET_UNSUPP_LWT; if (f6i->fib6_flags & RTF_GATEWAY) *dst = f6i->fib6_nh.nh_gw; @@ -4270,10 +4389,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, */ neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, ndisc_hashfn, dst, dev); - if (neigh) - return bpf_fib_set_fwd_params(params, neigh, dev); + if (!neigh) + return BPF_FIB_LKUP_RET_NO_NEIGH; - return 0; + return bpf_fib_set_fwd_params(params, neigh, dev); } #endif @@ -4315,7 +4434,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, struct bpf_fib_lookup *, params, int, plen, u32, flags) { struct net *net = dev_net(skb->dev); - int index = -EAFNOSUPPORT; + int rc = -EAFNOSUPPORT; if (plen < sizeof(*params)) return -EINVAL; @@ -4326,25 +4445,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: - index = bpf_ipv4_fib_lookup(net, params, flags, false); + rc = bpf_ipv4_fib_lookup(net, params, flags, false); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - index = bpf_ipv6_fib_lookup(net, params, flags, false); + rc = bpf_ipv6_fib_lookup(net, params, flags, false); break; #endif } - if (index > 0) { + if (!rc) { struct net_device *dev; - dev = dev_get_by_index_rcu(net, index); + dev = dev_get_by_index_rcu(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) - index = 0; + rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; } - return index; + return rc; } static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { @@ -4417,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = { .arg4_type = ARG_CONST_SIZE }; +#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len) { -#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); void *srh_tlvs, *srh_end, *ptr; @@ -4446,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, memcpy(skb->data + offset, from, len); return 0; -#else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; -#endif } static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { @@ -4464,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, u32, action, void *, param, u32, param_len) { -#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh; @@ -4512,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, default: return -EINVAL; } -#else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; -#endif } static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { @@ -4530,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, s32, len) { -#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); void *srh_end, *srh_tlvs, *ptr; @@ -4574,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, srh_state->hdrlen += len; srh_state->valid = 0; return 0; -#else /* CONFIG_IPV6_SEG6_BPF */ - return -EOPNOTSUPP; -#endif } static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { @@ -4587,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; +#endif /* CONFIG_IPV6_SEG6_BPF */ bool bpf_helper_changes_pkt_data(void *func) { @@ -4595,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func) func == bpf_skb_store_bytes || func == bpf_skb_change_proto || func == bpf_skb_change_head || + func == sk_skb_change_head || func == bpf_skb_change_tail || + func == sk_skb_change_tail || func == bpf_skb_adjust_room || func == bpf_skb_pull_data || + func == sk_skb_pull_data || func == bpf_clone_redirect || func == bpf_l3_csum_replace || func == bpf_l4_csum_replace || @@ -4605,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func) func == bpf_xdp_adjust_meta || func == bpf_msg_pull_data || func == bpf_xdp_adjust_tail || - func == bpf_lwt_push_encap || +#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) func == bpf_lwt_seg6_store_bytes || func == bpf_lwt_seg6_adjust_srh || - func == bpf_lwt_seg6_action - ) + func == bpf_lwt_seg6_action || +#endif + func == bpf_lwt_push_encap) return true; return false; @@ -4849,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; case BPF_FUNC_skb_pull_data: - return &bpf_skb_pull_data_proto; + return &sk_skb_pull_data_proto; case BPF_FUNC_skb_change_tail: - return &bpf_skb_change_tail_proto; + return &sk_skb_change_tail_proto; case BPF_FUNC_skb_change_head: - return &bpf_skb_change_head_proto; + return &sk_skb_change_head_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: @@ -4944,12 +5057,14 @@ static const struct bpf_func_proto * lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { +#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) case BPF_FUNC_lwt_seg6_store_bytes: return &bpf_lwt_seg6_store_bytes_proto; case BPF_FUNC_lwt_seg6_action: return &bpf_lwt_seg6_action_proto; case BPF_FUNC_lwt_seg6_adjust_srh: return &bpf_lwt_seg6_adjust_srh_proto; +#endif default: return lwt_out_func_proto(func_id, prog); } diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index b2b2323bdc84..188d693cb251 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, d->lock = lock; spin_lock_bh(lock); } - if (d->tail) - return gnet_stats_copy(d, type, NULL, 0, padattr); + if (d->tail) { + int ret = gnet_stats_copy(d, type, NULL, 0, padattr); + + /* The initial attribute added in gnet_stats_copy() may be + * preceded by a padding attribute, in which case d->tail will + * end up pointing at the padding instead of the real attribute. + * Fix this so gnet_stats_finish_copy() adjusts the length of + * the right attribute. + */ + if (ret == 0 && d->tail->nla_type == padattr) + d->tail = (struct nlattr *)((char *)d->tail + + NLA_ALIGN(d->tail->nla_len)); + return ret; + } return 0; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c642304f178c..8e51f8555e11 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n->cloned = 1; n->nohdr = 0; n->peeked = 0; + C(pfmemalloc); n->destructor = NULL; C(tail); C(end); @@ -5276,8 +5277,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, if (npages >= 1 << order) { page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | - __GFP_NOWARN | - __GFP_NORETRY, + __GFP_NOWARN, order); if (page) goto fill_page; diff --git a/net/core/sock.c b/net/core/sock.c index bcc41829a16d..9e8f65585b81 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot) rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, rsk_prot->obj_size, 0, - prot->slab_flags, NULL); + SLAB_ACCOUNT | prot->slab_flags, + NULL); if (!rsk_prot->slab) { pr_crit("%s: Can't create request sock SLAB cache!\n", @@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab) if (alloc_slab) { prot->slab = kmem_cache_create_usercopy(prot->name, prot->obj_size, 0, - SLAB_HWCACHE_ALIGN | prot->slab_flags, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | + prot->slab_flags, prot->useroffset, prot->usersize, NULL); @@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab) kmem_cache_create(prot->twsk_prot->twsk_slab_name, prot->twsk_prot->twsk_obj_size, 0, + SLAB_ACCOUNT | prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 0ea2ee56ac1b..f91e3816806b 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); void dccp_shutdown(struct sock *sk, int how); int inet_dccp_listen(struct socket *sock, int backlog); -__poll_t dccp_poll_mask(struct socket *sock, __poll_t events); +__poll_t dccp_poll(struct file *file, struct socket *sock, + poll_table *wait); int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void dccp_req_err(struct sock *sk, u64 seq); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a9e478cd3787..b08feb219b44 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = { .accept = inet_accept, .getname = inet_getname, /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ - .poll_mask = dccp_poll_mask, + .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 17fc4e0166ba..6344f1b18a6a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet6_getname, - .poll_mask = dccp_poll_mask, + .poll = dccp_poll, .ioctl = inet6_ioctl, .listen = inet_dccp_listen, .shutdown = inet_shutdown, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index ca21c1c76da0..0d56e36a6db7 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags) EXPORT_SYMBOL_GPL(dccp_disconnect); -__poll_t dccp_poll_mask(struct socket *sock, __poll_t events) +/* + * Wait for a DCCP event. + * + * Note that we don't need to lock the socket, as the upper poll layers + * take care of normal races (between the test and the event) and we don't + * go look at any of the socket buffers directly. + */ +__poll_t dccp_poll(struct file *file, struct socket *sock, + poll_table *wait) { __poll_t mask; struct sock *sk = sock->sk; + sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); @@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL_GPL(dccp_poll_mask); +EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) { diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 9a686d890bfa..7d6ff983ba2c 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer) } -static __poll_t dn_poll_mask(struct socket *sock, __poll_t events) +static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); if (!skb_queue_empty(&scp->other_receive_queue)) mask |= EPOLLRDBAND; @@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = { .socketpair = sock_no_socketpair, .accept = dn_accept, .getname = dn_getname, - .poll_mask = dn_poll_mask, + .poll = dn_poll, .ioctl = dn_ioctl, .listen = dn_listen, .shutdown = dn_shutdown, diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 40c851693f77..0c9478b91fa5 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) opt++; kdebug("options: '%s'", opt); do { + int opt_len, opt_nlen; const char *eq; - int opt_len, opt_nlen, opt_vlen, tmp; + char optval[128]; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; - if (opt_len <= 0 || opt_len > 128) { + if (opt_len <= 0 || opt_len > sizeof(optval)) { pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", opt_len); return -EINVAL; } - eq = memchr(opt, '=', opt_len) ?: end; - opt_nlen = eq - opt; - eq++; - opt_vlen = next_opt - eq; /* will be -1 if no value */ + eq = memchr(opt, '=', opt_len); + if (eq) { + opt_nlen = eq - opt; + eq++; + memcpy(optval, eq, next_opt - eq); + optval[next_opt - eq] = '\0'; + } else { + opt_nlen = opt_len; + optval[0] = '\0'; + } - tmp = opt_vlen >= 0 ? opt_vlen : 0; - kdebug("option '%*.*s' val '%*.*s'", - opt_nlen, opt_nlen, opt, tmp, tmp, eq); + kdebug("option '%*.*s' val '%s'", + opt_nlen, opt_nlen, opt, optval); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); - if (opt_vlen <= 0) - goto bad_option_value; - ret = kstrtoul(eq, 10, &derrno); + ret = kstrtoul(optval, 10, &derrno); if (ret < 0) goto bad_option_value; diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 275449b0d633..3297e7fa9945 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) return 0; } +static int lowpan_get_iflink(const struct net_device *dev) +{ + return lowpan_802154_dev(dev)->wdev->ifindex; +} + static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_open = lowpan_open, .ndo_stop = lowpan_stop, .ndo_neigh_construct = lowpan_neigh_construct, + .ndo_get_iflink = lowpan_get_iflink, }; static void lowpan_setup(struct net_device *ldev) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a0768d2759b8..a60658c85a9a 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 15e125558c76..b403499fdabe 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, - .poll_mask = tcp_poll_mask, + .poll = tcp_poll, .ioctl = inet_ioctl, .listen = inet_listen, .shutdown = inet_shutdown, @@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, - .poll_mask = udp_poll_mask, + .poll = udp_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, @@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops); /* * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without - * udp_poll_mask + * udp_poll */ static const struct proto_ops inet_sockraw_ops = { .family = PF_INET, @@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index b21833651394..e46cdd310e5f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { struct flowi4 fl4 = { .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a..c9ec1603666b 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -448,9 +448,7 @@ next_proto: out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a..6a7d980105f6 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 85b617b655bc..b3c899a630a0 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1200,13 +1200,14 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) spin_lock_bh(&im->lock); if (pmc) { im->interface = pmc->interface; - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; im->sfmode = pmc->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { im->tomb = pmc->tomb; im->sources = pmc->sources; for (psf = im->sources; psf; psf = psf->sf_next) - psf->sf_crcount = im->crcount; + psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + } else { + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; } in_dev_put(pmc->interface); kfree(pmc); @@ -1288,7 +1289,7 @@ static void igmp_group_dropped(struct ip_mc_list *im) #endif } -static void igmp_group_added(struct ip_mc_list *im) +static void igmp_group_added(struct ip_mc_list *im, unsigned int mode) { struct in_device *in_dev = im->interface; #ifdef CONFIG_IP_MULTICAST @@ -1316,7 +1317,13 @@ static void igmp_group_added(struct ip_mc_list *im) } /* else, v3 */ - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should + * not send filter-mode change record as the mode should be from + * IN() to IN(A). + */ + if (mode == MCAST_EXCLUDE) + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + igmp_ifc_event(in_dev); #endif } @@ -1381,8 +1388,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev, /* * A socket has joined a multicast group on device dev. */ - -void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) +void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) { struct ip_mc_list *im; #ifdef CONFIG_IP_MULTICAST @@ -1394,7 +1400,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) for_each_pmc_rtnl(in_dev, im) { if (im->multiaddr == addr) { im->users++; - ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); + ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); goto out; } } @@ -1408,8 +1414,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) in_dev_hold(in_dev); im->multiaddr = addr; /* initial mode is (EX, empty) */ - im->sfmode = MCAST_EXCLUDE; - im->sfcount[MCAST_EXCLUDE] = 1; + im->sfmode = mode; + im->sfcount[mode] = 1; refcount_set(&im->refcnt, 1); spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST @@ -1426,12 +1432,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, im); #endif - igmp_group_added(im); + igmp_group_added(im, mode); if (!in_dev->dead) ip_rt_multicast_event(in_dev); out: return; } + +void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) +{ + __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); +} EXPORT_SYMBOL(ip_mc_inc_group); static int ip_mc_check_iphdr(struct sk_buff *skb) @@ -1688,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, pmc); #endif - igmp_group_added(pmc); + igmp_group_added(pmc, pmc->sfmode); } } @@ -1751,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, pmc); #endif - igmp_group_added(pmc); + igmp_group_added(pmc, pmc->sfmode); } } @@ -2130,8 +2141,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) /* Join a multicast group */ - -int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) +static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, + unsigned int mode) { __be32 addr = imr->imr_multiaddr.s_addr; struct ip_mc_socklist *iml, *i; @@ -2172,15 +2183,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) memcpy(&iml->multi, imr, sizeof(*imr)); iml->next_rcu = inet->mc_list; iml->sflist = NULL; - iml->sfmode = MCAST_EXCLUDE; + iml->sfmode = mode; rcu_assign_pointer(inet->mc_list, iml); - ip_mc_inc_group(in_dev, addr); + __ip_mc_inc_group(in_dev, addr, mode); err = 0; done: return err; } + +/* Join ASM (Any-Source Multicast) group + */ +int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) +{ + return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE); +} EXPORT_SYMBOL(ip_mc_join_group); +/* Join SSM (Source-Specific Multicast) group + */ +int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, + unsigned int mode) +{ + return __ip_mc_join_group(sk, imr, mode); +} + static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, struct in_device *in_dev) { diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index c9e35b81d093..1e4cf3ab560f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg) void inet_frags_exit_net(struct netns_frags *nf) { - nf->low_thresh = 0; /* prevent creation of new frags */ + nf->high_thresh = 0; /* prevent creation of new frags */ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fc32fdbeefa6..64c76dcf7386 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -984,7 +984,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; - err = ip_mc_join_group(sk, &mreq); + err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); if (err && err != -EADDRINUSE) break; omode = MCAST_INCLUDE; @@ -1061,7 +1061,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, mreq.imr_multiaddr = psin->sin_addr; mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; - err = ip_mc_join_group(sk, &mreq); + err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); if (err && err != -EADDRINUSE) break; greqs.gsr_interface = mreq.imr_ifindex; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index ca0dad90803a..e77872c93c20 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = { .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, + .me = THIS_MODULE, }, }; diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c index 805e83ec3ad9..164714104965 100644 --- a/net/ipv4/netfilter/nf_tproxy_ipv4.c +++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c @@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, * to a listener socket if there's one */ struct sock *sk2; - sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, + sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol, iph->saddr, laddr ? laddr : iph->daddr, hp->source, lport ? lport : hp->dest, skb->dev, NF_TPROXY_LOOKUP_LISTENER); @@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) EXPORT_SYMBOL_GPL(nf_tproxy_laddr4); struct sock * -nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, +nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, const u8 protocol, const __be32 saddr, const __be32 daddr, const __be16 sport, const __be16 dport, @@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; - struct tcphdr *tcph; switch (protocol) { - case IPPROTO_TCP: + case IPPROTO_TCP: { + struct tcphdr _hdr, *hp; + + hp = skb_header_pointer(skb, ip_hdrlen(skb), + sizeof(struct tcphdr), &_hdr); + if (hp == NULL) + return NULL; + switch (lookup_type) { case NF_TPROXY_LOOKUP_LISTENER: - tcph = hp; sk = inet_lookup_listener(net, &tcp_hashinfo, skb, ip_hdrlen(skb) + - __tcp_hdrlen(tcph), + __tcp_hdrlen(hp), saddr, sport, daddr, dport, in->ifindex, 0); @@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, BUG(); } break; + } case IPPROTO_UDP: sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d06247ba08b2..5fa335fd3852 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, if (write && ret == 0) { low = make_kgid(user_ns, urange[0]); high = make_kgid(user_ns, urange[1]); - if (!gid_valid(low) || !gid_valid(high) || - (urange[1] < urange[0]) || gid_lt(high, low)) { + if (!gid_valid(low) || !gid_valid(high)) + return -EINVAL; + if (urange[1] < urange[0] || gid_lt(high, low)) { low = make_kgid(&init_user_ns, 1); high = make_kgid(&init_user_ns, 0); } @@ -265,8 +266,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, ipv4.sysctl_tcp_fastopen); struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; - int ret; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ + __le32 key[4]; + int ret, i; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) @@ -275,11 +277,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, rcu_read_lock(); ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); if (ctxt) - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else - memset(user_key, 0, sizeof(user_key)); + memset(key, 0, sizeof(key)); rcu_read_unlock(); + for (i = 0; i < ARRAY_SIZE(key); i++) + user_key[i] = le32_to_cpu(key[i]); + snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); @@ -290,13 +295,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, ret = -EINVAL; goto bad_key; } - tcp_fastopen_reset_cipher(net, NULL, user_key, + + for (i = 0; i < ARRAY_SIZE(user_key); i++) + key[i] = cpu_to_le32(user_key[i]); + + tcp_fastopen_reset_cipher(net, NULL, key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", - user_key[0], user_key[1], user_key[2], user_key[3], + user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 141acd92e58a..4491faf83f4f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, } /* - * Socket is not locked. We are protected from async events by poll logic and - * correct handling of state changes made by other threads is impossible in - * any case. + * Wait for a TCP event. + * + * Note that we don't need to lock the socket, as the upper poll layers + * take care of normal races (between the test and the event) and we don't + * go look at any of the socket buffers directly. */ -__poll_t tcp_poll_mask(struct socket *sock, __poll_t events) +__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { + __poll_t mask; struct sock *sk = sock->sk; const struct tcp_sock *tp = tcp_sk(sk); - __poll_t mask = 0; int state; + sock_poll_wait(file, sk_sleep(sk), wait); + state = inet_sk_state_load(sk); if (state == TCP_LISTEN) return inet_csk_listen_poll(sk); + /* Socket is not locked. We are protected from async events + * by poll logic and correct handling of state changes + * made by other threads is impossible in any case. + */ + + mask = 0; + /* * EPOLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a @@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(tcp_poll_mask); +EXPORT_SYMBOL(tcp_poll); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { @@ -1987,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; @@ -2002,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), - "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); } @@ -2551,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); + tp->copied_seq = tp->rcv_nxt; + tp->urg_data = 0; tcp_write_queue_purge(sk); tcp_fastopen_active_disable_ofo_check(sk); skb_rbtree_purge(&tp->out_of_order_queue); @@ -2810,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level, case TCP_REPAIR: if (!tcp_can_repair_sock(sk)) err = -EPERM; - else if (val == 1) { + else if (val == TCP_REPAIR_ON) { tp->repair = 1; sk->sk_reuse = SK_FORCE_REUSE; tp->repair_queue = TCP_NO_QUEUE; - } else if (val == 0) { + } else if (val == TCP_REPAIR_OFF) { tp->repair = 0; sk->sk_reuse = SK_NO_REUSE; tcp_send_window_probe(sk); + } else if (val == TCP_REPAIR_OFF_NO_WP) { + tp->repair = 0; + sk->sk_reuse = SK_NO_REUSE; } else err = -EINVAL; @@ -3709,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err) struct request_sock *req = inet_reqsk(sk); local_bh_disable(); - inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, - req); + inet_csk_reqsk_queue_drop(req->rsk_listener, req); local_bh_enable(); return 0; } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5f5e5936760e..5869f89ca656 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -55,7 +55,6 @@ struct dctcp { u32 dctcp_alpha; u32 next_seq; u32 ce_state; - u32 delayed_ack_reserved; u32 loss_cwnd; }; @@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk) ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); - ca->delayed_ack_reserved = 0; ca->loss_cwnd = 0; ca->ce_state = 0; @@ -134,7 +132,8 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) /* State has changed from CE=0 to CE=1 and delayed * ACK has not sent yet. */ - if (!ca->ce_state && ca->delayed_ack_reserved) { + if (!ca->ce_state && + inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { u32 tmp_rcv_nxt; /* Save current rcv_nxt. */ @@ -164,7 +163,8 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) /* State has changed from CE=1 to CE=0 and delayed * ACK has not sent yet. */ - if (ca->ce_state && ca->delayed_ack_reserved) { + if (ca->ce_state && + inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { u32 tmp_rcv_nxt; /* Save current rcv_nxt. */ @@ -248,25 +248,6 @@ static void dctcp_state(struct sock *sk, u8 new_state) } } -static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) -{ - struct dctcp *ca = inet_csk_ca(sk); - - switch (ev) { - case CA_EVENT_DELAYED_ACK: - if (!ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 1; - break; - case CA_EVENT_NON_DELAYED_ACK: - if (ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 0; - break; - default: - /* Don't care for the rest. */ - break; - } -} - static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) { switch (ev) { @@ -276,10 +257,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) case CA_EVENT_ECN_NO_CE: dctcp_ce_state_1_to_0(sk); break; - case CA_EVENT_DELAYED_ACK: - case CA_EVENT_NON_DELAYED_ACK: - dctcp_update_ack_reserved(sk, ev); - break; default: /* Don't care for the rest. */ break; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 355d3dffd021..8e5522c6833a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) - tcp_enter_quickack_mode(sk, 1); + tcp_enter_quickack_mode(sk, 2); break; case INET_ECN_CE: if (tcp_ca_needs_ecn(sk)) @@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ - tcp_enter_quickack_mode(sk, 1); + tcp_enter_quickack_mode(sk, 2); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; @@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); + + /* If any of the cumulatively ACKed segments was + * retransmitted, non-SACK case cannot confirm that + * progress was due to original transmission due to + * lack of TCPCB_SACKED_ACKED bits even if some of + * the packets may have been never retransmitted. + */ + if (flag & FLAG_RETRANS_DATA_ACKED) + flag &= ~FLAG_ORIG_SACK_ACKED; } else { int delta; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bea17f1e8302..3b2711e33e4c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) */ if (tcptw->tw_ts_recent_stamp && (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { - tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; - if (tp->write_seq == 0) - tp->write_seq = 1; - tp->rx_opt.ts_recent = tcptw->tw_ts_recent; - tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + /* In case of repair and re-using TIME-WAIT sockets we still + * want to be sure that it is safe as above but honor the + * sequence numbers and time stamps set as part of the repair + * process. + * + * Without this check re-using a TIME-WAIT socket with TCP + * repair would accumulate a -1 on the repair assigned + * sequence number. The first time it is reused the sequence + * is -1, the second time -2, etc. This fixes that issue + * without appearing to create any others. + */ + if (likely(!tp->repair)) { + tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; + if (tp->write_seq == 0) + tp->write_seq = 1; + tp->rx_opt.ts_recent = tcptw->tw_ts_recent; + tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + } sock_hold(sktw); return 1; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e08b409c71e..00e5a300ddb9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3523,8 +3523,6 @@ void tcp_send_delayed_ack(struct sock *sk) int ato = icsk->icsk_ack.ato; unsigned long timeout; - tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); - if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; @@ -3581,8 +3579,6 @@ void tcp_send_ack(struct sock *sk) if (sk->sk_state == TCP_CLOSE) return; - tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); - /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9bb27df4dac5..24e116ddae79 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket - * @events - events to wait for + * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd @@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, * but then block when reading it. Add special case code * to work around these arguably broken applications. */ -__poll_t udp_poll_mask(struct socket *sock, __poll_t events) +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Check for false positives due to checksum errors */ - if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) && + if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) mask &= ~(EPOLLIN | EPOLLRDNORM); return mask; } -EXPORT_SYMBOL(udp_poll_mask); +EXPORT_SYMBOL(udp_poll); int udp_abort(struct sock *sk, int err) { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 92dc9e5a7ff3..69c54540d5b4 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -394,7 +394,7 @@ unflush: out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } EXPORT_SYMBOL(udp_gro_receive); diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 0eff75525da1..b3885ca22d6f 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -108,6 +108,7 @@ config IPV6_MIP6 config IPV6_ILA tristate "IPv6: Identifier Locator Addressing (ILA)" depends on NETFILTER + select DST_CACHE select LWTUNNEL ---help--- Support for IPv6 Identifier Locator Addressing (ILA). diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c134286d6a41..91580c62bb86 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, u32 flags) { struct fib6_info *f6i; + u32 prio; f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, @@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, if (!f6i) return -ENOENT; - if (f6i->fib6_metric != ifp->rt_priority) { + prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; + if (f6i->fib6_metric != prio) { + /* delete old one */ + ip6_del_rt(dev_net(ifp->idev->dev), f6i); + /* add new one */ addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); - /* delete old one */ - ip6_del_rt(dev_net(ifp->idev->dev), f6i); } else { if (!expires) fib6_clean_expires(f6i); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 74f2a261e8df..9ed0eae91758 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = inet_accept, /* ok */ .getname = inet6_getname, - .poll_mask = tcp_poll_mask, /* ok */ + .poll = tcp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = inet_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ @@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, - .poll_mask = udp_poll_mask, /* ok */ + .poll = udp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1323b9679cf7..1c0bb9fb76e6 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) { struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; - txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, - hop, hop ? ipv6_optlen(hop) : 0); + txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop); txopt_put(old); if (IS_ERR(txopts)) return PTR_ERR(txopts); @@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req, if (IS_ERR(new)) return PTR_ERR(new); - txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, - new, new ? ipv6_optlen(new) : 0); + txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); kfree(new); @@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req) if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) return; /* Nothing to do */ - txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, - new, new ? ipv6_optlen(new) : 0); + txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); if (!IS_ERR(txopts)) { txopts = xchg(&req_inet->ipv6_opt, txopts); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 5bc2bf3733ab..20291c2036fc 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) } EXPORT_SYMBOL_GPL(ipv6_dup_options); -static int ipv6_renew_option(void *ohdr, - struct ipv6_opt_hdr __user *newopt, int newoptlen, - int inherit, - struct ipv6_opt_hdr **hdr, - char **p) +static void ipv6_renew_option(int renewtype, + struct ipv6_opt_hdr **dest, + struct ipv6_opt_hdr *old, + struct ipv6_opt_hdr *new, + int newtype, char **p) { - if (inherit) { - if (ohdr) { - memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); - *hdr = (struct ipv6_opt_hdr *)*p; - *p += CMSG_ALIGN(ipv6_optlen(*hdr)); - } - } else { - if (newopt) { - if (copy_from_user(*p, newopt, newoptlen)) - return -EFAULT; - *hdr = (struct ipv6_opt_hdr *)*p; - if (ipv6_optlen(*hdr) > newoptlen) - return -EINVAL; - *p += CMSG_ALIGN(newoptlen); - } - } - return 0; + struct ipv6_opt_hdr *src; + + src = (renewtype == newtype ? new : old); + if (!src) + return; + + memcpy(*p, src, ipv6_optlen(src)); + *dest = (struct ipv6_opt_hdr *)*p; + *p += CMSG_ALIGN(ipv6_optlen(*dest)); } /** @@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr, */ struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr __user *newopt, int newoptlen) + int newtype, struct ipv6_opt_hdr *newopt) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; - int err; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) @@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } - if (newopt && newoptlen) - tot_len += CMSG_ALIGN(newoptlen); + if (newopt) + tot_len += CMSG_ALIGN(ipv6_optlen(newopt)); if (!tot_len) return NULL; @@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, opt2->tot_len = tot_len; p = (char *)(opt2 + 1); - err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, - newtype != IPV6_HOPOPTS, - &opt2->hopopt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, - newtype != IPV6_RTHDRDSTOPTS, - &opt2->dst0opt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, - newtype != IPV6_RTHDR, - (struct ipv6_opt_hdr **)&opt2->srcrt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, - newtype != IPV6_DSTOPTS, - &opt2->dst1opt, &p); - if (err) - goto out; + ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt, + (opt ? opt->hopopt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt, + (opt ? opt->dst0opt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_RTHDR, + (struct ipv6_opt_hdr **)&opt2->srcrt, + (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt, + (opt ? opt->dst1opt : NULL), + newopt, newtype, &p); opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + @@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; -out: - sock_kfree_s(sk, opt2, opt2->tot_len); - return ERR_PTR(err); -} - -/** - * ipv6_renew_options_kern - replace a specific ext hdr with a new one. - * - * @sk: sock from which to allocate memory - * @opt: original options - * @newtype: option type to replace in @opt - * @newopt: new option of type @newtype to replace (kernel-mem) - * @newoptlen: length of @newopt - * - * See ipv6_renew_options(). The difference is that @newopt is - * kernel memory, rather than user memory. - */ -struct ipv6_txoptions * -ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt, - int newtype, struct ipv6_opt_hdr *newopt, - int newoptlen) -{ - struct ipv6_txoptions *ret_val; - const mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret_val = ipv6_renew_options(sk, opt, newtype, - (struct ipv6_opt_hdr __user *)newopt, - newoptlen); - set_fs(old_fs); - return ret_val; } struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1fb2f3118d60..d212738e9d10 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -935,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, { struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&rt->fib6_table->tb6_lock)); - enum fib_event_type event = FIB_EVENT_ENTRY_ADD; - struct fib6_info *iter = NULL, *match = NULL; + struct fib6_info *iter = NULL; struct fib6_info __rcu **ins; + struct fib6_info __rcu **fallback_ins = NULL; int replace = (info->nlh && (info->nlh->nlmsg_flags & NLM_F_REPLACE)); - int append = (info->nlh && - (info->nlh->nlmsg_flags & NLM_F_APPEND)); int add = (!info->nlh || (info->nlh->nlmsg_flags & NLM_F_CREATE)); int found = 0; + bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); u16 nlflags = NLM_F_EXCL; int err; - if (append) + if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND)) nlflags |= NLM_F_APPEND; ins = &fn->leaf; @@ -970,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, nlflags &= ~NLM_F_EXCL; if (replace) { - found++; - break; + if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { + found++; + break; + } + if (rt_can_ecmp) + fallback_ins = fallback_ins ?: ins; + goto next_iter; } if (rt6_duplicate_nexthop(iter, rt)) { @@ -986,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); return -EEXIST; } - - /* first route that matches */ - if (!match) - match = iter; + /* If we have the same destination and the same metric, + * but not the same gateway, then the route we try to + * add is sibling to this route, increment our counter + * of siblings, and later we will add our route to the + * list. + * Only static routes (which don't have flag + * RTF_EXPIRES) are used for ECMPv6. + * + * To avoid long list, we only had siblings if the + * route have a gateway. + */ + if (rt_can_ecmp && + rt6_qualify_for_ecmp(iter)) + rt->fib6_nsiblings++; } if (iter->fib6_metric > rt->fib6_metric) break; +next_iter: ins = &iter->fib6_next; } + if (fallback_ins && !found) { + /* No ECMP-able route found, replace first non-ECMP one */ + ins = fallback_ins; + iter = rcu_dereference_protected(*ins, + lockdep_is_held(&rt->fib6_table->tb6_lock)); + found++; + } + /* Reset round-robin state, if necessary */ if (ins == &fn->leaf) fn->rr_ptr = NULL; /* Link this route to others same route. */ - if (append && match) { + if (rt->fib6_nsiblings) { + unsigned int fib6_nsiblings; struct fib6_info *sibling, *temp_sibling; - if (rt->fib6_flags & RTF_REJECT) { - NL_SET_ERR_MSG(extack, - "Can not append a REJECT route"); - return -EINVAL; - } else if (match->fib6_flags & RTF_REJECT) { - NL_SET_ERR_MSG(extack, - "Can not append to a REJECT route"); - return -EINVAL; + /* Find the first route that have the same metric */ + sibling = leaf; + while (sibling) { + if (sibling->fib6_metric == rt->fib6_metric && + rt6_qualify_for_ecmp(sibling)) { + list_add_tail(&rt->fib6_siblings, + &sibling->fib6_siblings); + break; + } + sibling = rcu_dereference_protected(sibling->fib6_next, + lockdep_is_held(&rt->fib6_table->tb6_lock)); } - event = FIB_EVENT_ENTRY_APPEND; - rt->fib6_nsiblings = match->fib6_nsiblings; - list_add_tail(&rt->fib6_siblings, &match->fib6_siblings); - match->fib6_nsiblings++; - /* For each sibling in the list, increment the counter of * siblings. BUG() if counters does not match, list of siblings * is broken! */ + fib6_nsiblings = 0; list_for_each_entry_safe(sibling, temp_sibling, - &match->fib6_siblings, fib6_siblings) { + &rt->fib6_siblings, fib6_siblings) { sibling->fib6_nsiblings++; - BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings); + BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings); + fib6_nsiblings++; } - - rt6_multipath_rebalance(match); + BUG_ON(fib6_nsiblings != rt->fib6_nsiblings); + rt6_multipath_rebalance(temp_sibling); } /* @@ -1043,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, add: nlflags |= NLM_F_CREATE; - err = call_fib6_entry_notifiers(info->nl_net, event, rt, - extack); + err = call_fib6_entry_notifiers(info->nl_net, + FIB_EVENT_ENTRY_ADD, + rt, extack); if (err) return err; @@ -1062,7 +1087,7 @@ add: } } else { - struct fib6_info *tmp; + int nsiblings; if (!found) { if (add) @@ -1077,57 +1102,48 @@ add: if (err) return err; - /* if route being replaced has siblings, set tmp to - * last one, otherwise tmp is current route. this is - * used to set fib6_next for new route - */ - if (iter->fib6_nsiblings) - tmp = list_last_entry(&iter->fib6_siblings, - struct fib6_info, - fib6_siblings); - else - tmp = iter; - - /* insert new route */ atomic_inc(&rt->fib6_ref); rcu_assign_pointer(rt->fib6_node, fn); - rt->fib6_next = tmp->fib6_next; + rt->fib6_next = iter->fib6_next; rcu_assign_pointer(*ins, rt); - if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } + nsiblings = iter->fib6_nsiblings; + iter->fib6_node = NULL; + fib6_purge_rt(iter, fn, info->nl_net); + if (rcu_access_pointer(fn->rr_ptr) == iter) + fn->rr_ptr = NULL; + fib6_info_release(iter); - /* delete old route */ - rt = iter; - - if (rt->fib6_nsiblings) { - struct fib6_info *tmp; - + if (nsiblings) { /* Replacing an ECMP route, remove all siblings */ - list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings, - fib6_siblings) { - iter->fib6_node = NULL; - fib6_purge_rt(iter, fn, info->nl_net); - if (rcu_access_pointer(fn->rr_ptr) == iter) - fn->rr_ptr = NULL; - fib6_info_release(iter); - - rt->fib6_nsiblings--; - info->nl_net->ipv6.rt6_stats->fib_rt_entries--; + ins = &rt->fib6_next; + iter = rcu_dereference_protected(*ins, + lockdep_is_held(&rt->fib6_table->tb6_lock)); + while (iter) { + if (iter->fib6_metric > rt->fib6_metric) + break; + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->fib6_next; + iter->fib6_node = NULL; + fib6_purge_rt(iter, fn, info->nl_net); + if (rcu_access_pointer(fn->rr_ptr) == iter) + fn->rr_ptr = NULL; + fib6_info_release(iter); + nsiblings--; + info->nl_net->ipv6.rt6_stats->fib_rt_entries--; + } else { + ins = &iter->fib6_next; + } + iter = rcu_dereference_protected(*ins, + lockdep_is_held(&rt->fib6_table->tb6_lock)); } + WARN_ON(nsiblings != 0); } - - WARN_ON(rt->fib6_nsiblings != 0); - - rt->fib6_node = NULL; - fib6_purge_rt(rt, fn, info->nl_net); - if (rcu_access_pointer(fn->rr_ptr) == rt) - fn->rr_ptr = NULL; - fib6_info_release(rt); } return 0; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c8cf2fdbb13b..cd2cfb04e5d8 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -927,7 +927,6 @@ tx_err: static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ip6_tnl *t = netdev_priv(dev); struct dst_entry *dst = skb_dst(skb); struct net_device_stats *stats; @@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, goto tx_err; } } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + switch (skb->protocol) { case htons(ETH_P_IP): memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4d780c7f0130..568ca4187cd1 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; + struct ipv6_opt_hdr *new = NULL; + + /* hop-by-hop / destination options are privileged option */ + retv = -EPERM; + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) + break; /* remove any sticky options header with a zero option * length, per RFC3542. @@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; - - /* hop-by-hop / destination options are privileged option */ - retv = -EPERM; - if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) - break; + else { + new = memdup_user(optval, optlen); + if (IS_ERR(new)) { + retv = PTR_ERR(new); + break; + } + if (unlikely(ipv6_optlen(new) > optlen)) { + kfree(new); + goto e_inval; + } + } opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); - opt = ipv6_renew_options(sk, opt, optname, - (struct ipv6_opt_hdr __user *)optval, - optlen); + opt = ipv6_renew_options(sk, opt, optname, new); + kfree(new); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; @@ -718,8 +729,9 @@ done: struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; - retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, - &psin6->sin6_addr); + retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface, + &psin6->sin6_addr, + MCAST_INCLUDE); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index c0c74088f2af..2699be7202be 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, int delta); static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); +static int __ipv6_dev_mc_inc(struct net_device *dev, + const struct in6_addr *addr, unsigned int mode); #define MLD_QRV_DEFAULT 2 /* RFC3810, 9.2. Query Interval */ @@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev) return iv > 0 ? iv : 1; } -int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) +static int __ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr, unsigned int mode) { struct net_device *dev = NULL; struct ipv6_mc_socklist *mc_lst; @@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) } mc_lst->ifindex = dev->ifindex; - mc_lst->sfmode = MCAST_EXCLUDE; + mc_lst->sfmode = mode; rwlock_init(&mc_lst->sflock); mc_lst->sflist = NULL; @@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) * now add/increase the group membership on the device */ - err = ipv6_dev_mc_inc(dev, addr); + err = __ipv6_dev_mc_inc(dev, addr, mode); if (err) { sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); @@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } + +int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) +{ + return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE); +} EXPORT_SYMBOL(ipv6_sock_mc_join); +int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, + const struct in6_addr *addr, unsigned int mode) +{ + return __ipv6_sock_mc_join(sk, ifindex, addr, mode); +} + /* * socket leave on multicast group */ @@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, return rv; } -static void igmp6_group_added(struct ifmcaddr6 *mc) +static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; @@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) } /* else v2 */ - mc->mca_crcount = mc->idev->mc_qrv; + /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we + * should not send filter-mode change record as the mode + * should be from IN() to IN(A). + */ + if (mode == MCAST_EXCLUDE) + mc->mca_crcount = mc->idev->mc_qrv; + mld_ifc_event(mc->idev); } @@ -770,13 +790,14 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) spin_lock_bh(&im->mca_lock); if (pmc) { im->idev = pmc->idev; - im->mca_crcount = idev->mc_qrv; im->mca_sfmode = pmc->mca_sfmode; if (pmc->mca_sfmode == MCAST_INCLUDE) { im->mca_tomb = pmc->mca_tomb; im->mca_sources = pmc->mca_sources; for (psf = im->mca_sources; psf; psf = psf->sf_next) - psf->sf_crcount = im->mca_crcount; + psf->sf_crcount = idev->mc_qrv; + } else { + im->mca_crcount = idev->mc_qrv; } in6_dev_put(pmc->idev); kfree(pmc); @@ -831,7 +852,8 @@ static void ma_put(struct ifmcaddr6 *mc) } static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, - const struct in6_addr *addr) + const struct in6_addr *addr, + unsigned int mode) { struct ifmcaddr6 *mc; @@ -849,9 +871,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, refcount_set(&mc->mca_refcnt, 1); spin_lock_init(&mc->mca_lock); - /* initial mode is (EX, empty) */ - mc->mca_sfmode = MCAST_EXCLUDE; - mc->mca_sfcount[MCAST_EXCLUDE] = 1; + mc->mca_sfmode = mode; + mc->mca_sfcount[mode] = 1; if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) @@ -863,7 +884,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, /* * device multicast group inc (add if not found) */ -int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) +static int __ipv6_dev_mc_inc(struct net_device *dev, + const struct in6_addr *addr, unsigned int mode) { struct ifmcaddr6 *mc; struct inet6_dev *idev; @@ -887,14 +909,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) if (ipv6_addr_equal(&mc->mca_addr, addr)) { mc->mca_users++; write_unlock_bh(&idev->lock); - ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, - NULL, 0); + ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0); in6_dev_put(idev); return 0; } } - mc = mca_alloc(idev, addr); + mc = mca_alloc(idev, addr, mode); if (!mc) { write_unlock_bh(&idev->lock); in6_dev_put(idev); @@ -911,11 +932,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) write_unlock_bh(&idev->lock); mld_del_delrec(idev, mc); - igmp6_group_added(mc); + igmp6_group_added(mc, mode); ma_put(mc); return 0; } +int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) +{ + return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE); +} + /* * device multicast group del */ @@ -1751,7 +1777,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, psf_next = psf->sf_next; - if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { + if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) { psf_prev = psf; continue; } @@ -2066,7 +2092,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev) if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_CHANGE_TO_EXCLUDE; else - type = MLD2_CHANGE_TO_INCLUDE; + type = MLD2_ALLOW_NEW_SOURCES; skb = add_grec(skb, pmc, type, 0, 0, 1); spin_unlock_bh(&pmc->mca_lock); } @@ -2546,7 +2572,7 @@ void ipv6_mc_up(struct inet6_dev *idev) ipv6_mc_reset(idev); for (i = idev->mc_list; i; i = i->next) { mld_del_delrec(idev, i); - igmp6_group_added(i); + igmp6_group_added(i, i->mca_sfmode); } read_unlock_bh(&idev->lock); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index e640d2f3c55c..0ec273997d1d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) return; } } - if (ndopts.nd_opts_nonce) + if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1) memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6); inc = ipv6_addr_is_multicast(daddr); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7eab959734bc..daf2e9e9193d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = { .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, + .me = THIS_MODULE, }, }; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5e0332014c17..e4d9e6976d3c 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) if (hdr == NULL) goto err_reg; - net->nf_frag.sysctl.frags_hdr = hdr; + net->nf_frag_frags_hdr = hdr; return 0; err_reg: @@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; - table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); + table = net->nf_frag_frags_hdr->ctl_table_arg; + unregister_net_sysctl_table(net->nf_frag_frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } @@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; + else + skb_dst_drop(skb); out_unlock: spin_unlock_bh(&fq->q.lock); diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c index bf1d6c421e3b..5dfd33af6451 100644 --- a/net/ipv6/netfilter/nf_tproxy_ipv6.c +++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c @@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, * to a listener socket if there's one */ struct sock *sk2; - sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto, + sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto, &iph->saddr, nf_tproxy_laddr6(skb, laddr, &iph->daddr), hp->source, @@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6); struct sock * -nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, +nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, const __be16 sport, const __be16 dport, @@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; - struct tcphdr *tcph; switch (protocol) { - case IPPROTO_TCP: + case IPPROTO_TCP: { + struct tcphdr _hdr, *hp; + + hp = skb_header_pointer(skb, thoff, + sizeof(struct tcphdr), &_hdr); + if (hp == NULL) + return NULL; + switch (lookup_type) { case NF_TPROXY_LOOKUP_LISTENER: - tcph = hp; sk = inet6_lookup_listener(net, &tcp_hashinfo, skb, - thoff + __tcp_hdrlen(tcph), + thoff + __tcp_hdrlen(hp), saddr, sport, daddr, ntohs(dport), in->ifindex, 0); @@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, BUG(); } break; + } case IPPROTO_UDP: sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ce6f0d15b5dd..afc307c89d1a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1334,7 +1334,7 @@ void raw6_proc_exit(void) } #endif /* CONFIG_PROC_FS */ -/* Same as inet6_dgram_ops, sans udp_poll_mask. */ +/* Same as inet6_dgram_ops, sans udp_poll. */ const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, @@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, - .poll_mask = datagram_poll_mask, /* ok */ + .poll = datagram_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 86a0e4333d42..2ce0bd17de4f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3842,7 +3842,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) lockdep_is_held(&rt->fib6_table->tb6_lock)); while (iter) { if (iter->fib6_metric == rt->fib6_metric && - iter->fib6_nsiblings) + rt6_qualify_for_ecmp(iter)) return iter; iter = rcu_dereference_protected(iter->fib6_next, lockdep_is_held(&rt->fib6_table->tb6_lock)); @@ -4388,6 +4388,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, rt = NULL; goto cleanup; } + if (!rt6_qualify_for_ecmp(rt)) { + err = -EINVAL; + NL_SET_ERR_MSG(extack, + "Device only routes can not be added for IPv6 using the multipath API."); + fib6_info_release(rt); + goto cleanup; + } rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; @@ -4439,7 +4446,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); - cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND; nhn++; } diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac1..558fe8cc6d43 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void) return -ENOMEM; for_each_possible_cpu(cpu) { - tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL); + tfm = crypto_alloc_shash(algo->name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); p_tfm = per_cpu_ptr(algo->tfms, cpu); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 19ccf0dc996c..a8854dd3e9c5 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb, if (do_flowlabel > 0) { hash = skb_get_hash(skb); - rol32(hash, 16); + hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) { flowlabel = ip6_flowlabel(inner_hdr); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 68e86257a549..893a022f9620 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent) return 0; } -static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events) +__poll_t iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); @@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = { .getname = iucv_sock_getname, .sendmsg = iucv_sock_sendmsg, .recvmsg = iucv_sock_recvmsg, - .poll_mask = iucv_sock_poll_mask, + .poll = iucv_sock_poll, .ioctl = sock_no_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 84b7d5c6fec8..d3601d421571 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) struct list_head *head; int index = 0; - /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state, - * so we set sk_state, otherwise epoll_wait always returns right away - * with EPOLLHUP + /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so + * we set sk_state, otherwise epoll_wait always returns right away with + * EPOLLHUP */ kcm->sk.sk_state = TCP_ESTABLISHED; @@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = kcm_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = kcm_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/key/af_key.c b/net/key/af_key.c index 8bdc1cbe490a..5e1d2946ffbf 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = { /* Now the operations that really occur. */ .release = pfkey_release, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .sendmsg = pfkey_sendmsg, .recvmsg = pfkey_recvmsg, }; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 181073bf6925..a9c05b2bc1b0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 336e4c00abbc..957369192ca1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip6_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet6_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 55188382845c..e398797878a9 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1818,7 +1818,7 @@ static const struct proto_ops pppol2tp_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppol2tp_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = pppol2tp_setsockopt, diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 804de8490186..1beeea9549fa 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = { .socketpair = sock_no_socketpair, .accept = llc_ui_accept, .getname = llc_ui_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = llc_ui_ioctl, .listen = llc_ui_listen, .shutdown = llc_ui_shutdown, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 44b5dfe8727d..fa1f1e63a264 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, skb_reset_network_header(skb); skb_reset_mac_header(skb); + local_bh_disable(); __ieee80211_subif_start_xmit(skb, skb->dev, flags); + local_bh_enable(); return 0; } diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index dbd7d1fad277..f0a1c536ef15 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -460,6 +460,13 @@ config NF_TABLES if NF_TABLES +config NF_TABLES_SET + tristate "Netfilter nf_tables set infrastructure" + help + This option enables the nf_tables set infrastructure that allows to + look up for elements in a set and to build one-way mappings between + matchings and actions. + config NF_TABLES_INET depends on IPV6 select NF_TABLES_IPV4 @@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD This option adds the "flow_offload" expression that you can use to choose what flows are placed into the hardware. -config NFT_SET_RBTREE - tristate "Netfilter nf_tables rbtree set module" - help - This option adds the "rbtree" set type (Red Black tree) that is used - to build interval-based sets. - -config NFT_SET_HASH - tristate "Netfilter nf_tables hash set module" - help - This option adds the "hash" set type that is used to build one-way - mappings between matchings and actions. - -config NFT_SET_BITMAP - tristate "Netfilter nf_tables bitmap set module" - help - This option adds the "bitmap" set type that is used to build sets - whose keys are smaller or equal to 16 bits. - config NFT_COUNTER tristate "Netfilter nf_tables counter module" help diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 44449389e527..8a76dced974d 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \ nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \ nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o +nf_tables_set-objs := nf_tables_set_core.o \ + nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o + obj-$(CONFIG_NF_TABLES) += nf_tables.o +obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o obj-$(CONFIG_NFT_COMPAT) += nft_compat.o obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o @@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o obj-$(CONFIG_NFT_QUOTA) += nft_quota.o obj-$(CONFIG_NFT_REJECT) += nft_reject.o obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o -obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o -obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o -obj-$(CONFIG_NFT_SET_BITMAP) += nft_set_bitmap.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o obj-$(CONFIG_NFT_MASQ) += nft_masq.o diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index d8383609fe28..510039862aa9 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -47,6 +47,8 @@ struct nf_conncount_tuple { struct hlist_node node; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; + int cpu; + u32 jiffies32; }; struct nf_conncount_rb { @@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head, return false; conn->tuple = *tuple; conn->zone = *zone; + conn->cpu = raw_smp_processor_id(); + conn->jiffies32 = (u32)jiffies; hlist_add_head(&conn->node, head); return true; } EXPORT_SYMBOL_GPL(nf_conncount_add); +static const struct nf_conntrack_tuple_hash * +find_or_evict(struct net *net, struct nf_conncount_tuple *conn) +{ + const struct nf_conntrack_tuple_hash *found; + unsigned long a, b; + int cpu = raw_smp_processor_id(); + __s32 age; + + found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); + if (found) + return found; + b = conn->jiffies32; + a = (u32)jiffies; + + /* conn might have been added just before by another cpu and + * might still be unconfirmed. In this case, nf_conntrack_find() + * returns no result. Thus only evict if this cpu added the + * stale entry or if the entry is older than two jiffies. + */ + age = a - b; + if (conn->cpu == cpu || age >= 2) { + hlist_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); + return ERR_PTR(-ENOENT); + } + + return ERR_PTR(-EAGAIN); +} + unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, @@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn; - struct hlist_node *n; struct nf_conn *found_ct; + struct hlist_node *n; unsigned int length = 0; *addit = tuple ? true : false; /* check the saved connections */ hlist_for_each_entry_safe(conn, n, head, node) { - found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); - if (found == NULL) { - hlist_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + found = find_or_evict(net, conn); + if (IS_ERR(found)) { + /* Not found, but might be about to be confirmed */ + if (PTR_ERR(found) == -EAGAIN) { + length++; + if (!tuple) + continue; + + if (nf_ct_tuple_equal(&conn->tuple, tuple) && + nf_ct_zone_id(&conn->zone, conn->zone.dir) == + nf_ct_zone_id(zone, zone->dir)) + *addit = false; + } continue; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3465da2a98bd..3d5280425027 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) + if (!nf_conntrack_hash) return param_set_uint(val, kp); rc = kstrtouint(val, 0, &hashsize); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 551a1eddf0fa..a75b11c39312 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); + + /* Maybe someone has gotten the helper already when unhelp above. + * So need to wait it. + */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 426457047578..a61d6df6e5f6 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, if (write) { struct ctl_table tmp = *table; + /* proc_dostring() can append to existing strings, so we need to + * initialize it as an empty string. + */ + buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) @@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { + struct ctl_table tmp = *table; + + tmp.data = buf; mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) - table->data = "NONE"; + strlcpy(buf, "NONE", sizeof(buf)); else - table->data = logger->name; - r = proc_dostring(table, write, buffer, lenp, ppos); + strlcpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); + r = proc_dostring(&tmp, write, buffer, lenp, ppos); } return r; diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c new file mode 100644 index 000000000000..814789644bd3 --- /dev/null +++ b/net/netfilter/nf_tables_set_core.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <net/netfilter/nf_tables_core.h> + +static int __init nf_tables_set_module_init(void) +{ + nft_register_set(&nft_set_hash_fast_type); + nft_register_set(&nft_set_hash_type); + nft_register_set(&nft_set_rhash_type); + nft_register_set(&nft_set_bitmap_type); + nft_register_set(&nft_set_rbtree_type); + + return 0; +} + +static void __exit nf_tables_set_module_exit(void) +{ + nft_unregister_set(&nft_set_rbtree_type); + nft_unregister_set(&nft_set_bitmap_type); + nft_unregister_set(&nft_set_rhash_type); + nft_unregister_set(&nft_set_hash_type); + nft_unregister_set(&nft_set_hash_fast_type); +} + +module_init(nf_tables_set_module_init); +module_exit(nf_tables_set_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NFT_SET(); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 4ccd2988f9db..ea4ba551abb2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, + [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, + [NFQA_CFG_MASK] = { .type = NLA_U32 }, + [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, }; static const struct nf_queue_handler nfqh = { diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 8d1ff654e5af..32535eea51b2 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx, rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); family = ctx->family; + if (strcmp(tg_name, XT_ERROR_TARGET) == 0 || + strcmp(tg_name, XT_STANDARD_TARGET) == 0 || + strcmp(tg_name, "standard") == 0) + return ERR_PTR(-EINVAL); + /* Re-use the existing target if it's already loaded. */ list_for_each_entry(nft_target, &nft_target_list, head) { struct xt_target *target = nft_target->ops.data; + if (!target->target) + continue; + if (nft_target_cmp(target, tg_name, rev, family)) return &nft_target->ops; } @@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx, if (IS_ERR(target)) return ERR_PTR(-ENOENT); + if (!target->target) { + err = -EINVAL; + goto err; + } + if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) { err = -EINVAL; goto err; diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index d6626e01c7ee..128bc16f52dd 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c @@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, return true; } -static struct nft_set_type nft_bitmap_type __read_mostly = { +struct nft_set_type nft_set_bitmap_type __read_mostly = { .owner = THIS_MODULE, .ops = { .privsize = nft_bitmap_privsize, @@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = { .get = nft_bitmap_get, }, }; - -static int __init nft_bitmap_module_init(void) -{ - return nft_register_set(&nft_bitmap_type); -} - -static void __exit nft_bitmap_module_exit(void) -{ - nft_unregister_set(&nft_bitmap_type); -} - -module_init(nft_bitmap_module_init); -module_exit(nft_bitmap_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); -MODULE_ALIAS_NFT_SET(); diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 6f9a1365a09f..72ef35b51cac 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -654,7 +654,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features return true; } -static struct nft_set_type nft_rhash_type __read_mostly = { +struct nft_set_type nft_set_rhash_type __read_mostly = { .owner = THIS_MODULE, .features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT | NFT_SET_EVAL, @@ -677,7 +677,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = { }, }; -static struct nft_set_type nft_hash_type __read_mostly = { +struct nft_set_type nft_set_hash_type __read_mostly = { .owner = THIS_MODULE, .features = NFT_SET_MAP | NFT_SET_OBJECT, .ops = { @@ -697,7 +697,7 @@ static struct nft_set_type nft_hash_type __read_mostly = { }, }; -static struct nft_set_type nft_hash_fast_type __read_mostly = { +struct nft_set_type nft_set_hash_fast_type __read_mostly = { .owner = THIS_MODULE, .features = NFT_SET_MAP | NFT_SET_OBJECT, .ops = { @@ -716,26 +716,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = { .get = nft_hash_get, }, }; - -static int __init nft_hash_module_init(void) -{ - if (nft_register_set(&nft_hash_fast_type) || - nft_register_set(&nft_hash_type) || - nft_register_set(&nft_rhash_type)) - return 1; - return 0; -} - -static void __exit nft_hash_module_exit(void) -{ - nft_unregister_set(&nft_rhash_type); - nft_unregister_set(&nft_hash_type); - nft_unregister_set(&nft_hash_fast_type); -} - -module_init(nft_hash_module_init); -module_exit(nft_hash_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); -MODULE_ALIAS_NFT_SET(); diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 7f3a9a211034..1f8f257cb518 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -462,7 +462,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, return true; } -static struct nft_set_type nft_rbtree_type __read_mostly = { +struct nft_set_type nft_set_rbtree_type __read_mostly = { .owner = THIS_MODULE, .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, .ops = { @@ -481,20 +481,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = { .get = nft_rbtree_get, }, }; - -static int __init nft_rbtree_module_init(void) -{ - return nft_register_set(&nft_rbtree_type); -} - -static void __exit nft_rbtree_module_exit(void) -{ - nft_unregister_set(&nft_rbtree_type); -} - -module_init(nft_rbtree_module_init); -module_exit(nft_rbtree_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); -MODULE_ALIAS_NFT_SET(); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 58fce4e749a9..d76550a8b642 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ - sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, + sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); @@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ - sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, + sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NF_TPROXY_LOOKUP_LISTENER); @@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ - sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto, + sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED); @@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ - sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, + sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto, &iph->saddr, laddr, hp->source, lport, xt_in(par), NF_TPROXY_LOOKUP_LISTENER); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1189b84413d5..393573a99a5a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = netlink_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 93fbcafbf388..03f37c4e64fe 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = { .socketpair = sock_no_socketpair, .accept = nr_accept, .getname = nr_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = nr_ioctl, .listen = nr_listen, .shutdown = sock_no_shutdown, diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 2ceefa183cee..6a196e438b6c 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, frag_len + LLCP_HEADER_SIZE, &err); if (pdu == NULL) { - pr_err("Could not allocate PDU\n"); - continue; + pr_err("Could not allocate PDU (error=%d)\n", err); + len -= remaining_len; + if (len == 0) + len = err; + break; } pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index ab5bb14b49af..ea0c0c6f1874 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent) return 0; } -static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events) +static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; pr_debug("%p\n", sk); + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); @@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = { .socketpair = sock_no_socketpair, .accept = llcp_sock_accept, .getname = llcp_sock_getname, - .poll_mask = llcp_sock_poll_mask, + .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, @@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = llcp_sock_getname, - .poll_mask = llcp_sock_poll_mask, + .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 60c322531c49..e2188deb08dc 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index 9696ef96b719..1a30e165eeb4 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, __skb_pull(skb, nsh_len); skb_reset_mac_header(skb); - skb_reset_mac_len(skb); + skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0; skb->protocol = proto; features &= NETIF_F_SG; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ff8e7e245c37..9b27d0cd766d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2878,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } else if (reserve) { skb_reserve(skb, -reserve); + if (len < reserve) + skb_reset_network_header(skb); } /* Returns -EFAULT on error */ @@ -4076,11 +4078,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, return 0; } -static __poll_t packet_poll_mask(struct socket *sock, __poll_t events) +static __poll_t packet_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { @@ -4422,7 +4425,7 @@ static const struct proto_ops packet_ops_spkt = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -4443,7 +4446,7 @@ static const struct proto_ops packet_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, - .poll_mask = packet_poll_mask, + .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index c295c4e20f01..30187990257f 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, return sizeof(struct sockaddr_pn); } -static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events) +static __poll_t pn_socket_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct pep_sock *pn = pep_sk(sk); __poll_t mask = 0; + poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == TCP_CLOSE) return EPOLLERR; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pn_socket_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = pn_socket_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = { .socketpair = sock_no_socketpair, .accept = pn_socket_accept, .getname = pn_socket_getname, - .poll_mask = pn_socket_poll_mask, + .poll = pn_socket_poll, .ioctl = pn_socket_ioctl, .listen = pn_socket_listen, .shutdown = sock_no_shutdown, diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 1b5025ea5b04..86e1e37eb4e8 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->type = cpu_to_le32(type); hdr->src_node_id = cpu_to_le32(from->sq_node); hdr->src_port_id = cpu_to_le32(from->sq_port); - hdr->dst_node_id = cpu_to_le32(to->sq_node); - hdr->dst_port_id = cpu_to_le32(to->sq_port); + if (to->sq_port == QRTR_PORT_CTRL) { + hdr->dst_node_id = cpu_to_le32(node->nid); + hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST); + } else { + hdr->dst_node_id = cpu_to_le32(to->sq_node); + hdr->dst_port_id = cpu_to_le32(to->sq_port); + } hdr->size = cpu_to_le32(len); hdr->confirm_rx = 0; @@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) node = NULL; if (addr->sq_node == QRTR_NODE_BCAST) { enqueue_fn = qrtr_bcast_enqueue; + if (addr->sq_port != QRTR_PORT_CTRL) { + release_sock(sk); + return -ENOTCONN; + } } else if (addr->sq_node == ipc->us.sq_node) { enqueue_fn = qrtr_local_enqueue; } else { @@ -1023,7 +1032,7 @@ static const struct proto_ops qrtr_proto_ops = { .recvmsg = qrtr_recvmsg, .getname = qrtr_getname, .ioctl = qrtr_ioctl, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, diff --git a/net/rds/connection.c b/net/rds/connection.c index abef75da89a7..cfb05953b0e5 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len, int rds_conn_init(void) { + int ret; + + ret = rds_loop_net_init(); /* register pernet callback */ + if (ret) + return ret; + rds_conn_slab = kmem_cache_create("rds_connection", sizeof(struct rds_connection), 0, 0, NULL); - if (!rds_conn_slab) + if (!rds_conn_slab) { + rds_loop_net_exit(); return -ENOMEM; + } rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); rds_info_register_func(RDS_INFO_SEND_MESSAGES, @@ -676,6 +684,7 @@ int rds_conn_init(void) void rds_conn_exit(void) { + rds_loop_net_exit(); /* unregister pernet callback */ rds_loop_exit(); WARN_ON(!hlist_empty(rds_conn_hash)); diff --git a/net/rds/loop.c b/net/rds/loop.c index dac6218a460e..feea1f96ee2a 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -33,6 +33,8 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/in.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> #include "rds_single_path.h" #include "rds.h" @@ -40,6 +42,17 @@ static DEFINE_SPINLOCK(loop_conns_lock); static LIST_HEAD(loop_conns); +static atomic_t rds_loop_unloading = ATOMIC_INIT(0); + +static void rds_loop_set_unloading(void) +{ + atomic_set(&rds_loop_unloading, 1); +} + +static bool rds_loop_is_unloading(struct rds_connection *conn) +{ + return atomic_read(&rds_loop_unloading) != 0; +} /* * This 'loopback' transport is a special case for flows that originate @@ -165,6 +178,8 @@ void rds_loop_exit(void) struct rds_loop_connection *lc, *_lc; LIST_HEAD(tmp_list); + rds_loop_set_unloading(); + synchronize_rcu(); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&loop_conns_lock); list_splice(&loop_conns, &tmp_list); @@ -177,6 +192,46 @@ void rds_loop_exit(void) } } +static void rds_loop_kill_conns(struct net *net) +{ + struct rds_loop_connection *lc, *_lc; + LIST_HEAD(tmp_list); + + spin_lock_irq(&loop_conns_lock); + list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) { + struct net *c_net = read_pnet(&lc->conn->c_net); + + if (net != c_net) + continue; + list_move_tail(&lc->loop_node, &tmp_list); + } + spin_unlock_irq(&loop_conns_lock); + + list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { + WARN_ON(lc->conn->c_passive); + rds_conn_destroy(lc->conn); + } +} + +static void __net_exit rds_loop_exit_net(struct net *net) +{ + rds_loop_kill_conns(net); +} + +static struct pernet_operations rds_loop_net_ops = { + .exit = rds_loop_exit_net, +}; + +int rds_loop_net_init(void) +{ + return register_pernet_device(&rds_loop_net_ops); +} + +void rds_loop_net_exit(void) +{ + unregister_pernet_device(&rds_loop_net_ops); +} + /* * This is missing .xmit_* because loop doesn't go through generic * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and @@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = { .inc_free = rds_loop_inc_free, .t_name = "loopback", .t_type = RDS_TRANS_LOOP, + .t_unloading = rds_loop_is_unloading, }; diff --git a/net/rds/loop.h b/net/rds/loop.h index 469fa4b2da4f..bbc8cdd030df 100644 --- a/net/rds/loop.h +++ b/net/rds/loop.h @@ -5,6 +5,8 @@ /* loop.c */ extern struct rds_transport rds_loop_transport; +int rds_loop_net_init(void); +void rds_loop_net_exit(void); void rds_loop_exit(void); #endif diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ebe42e7eb456..d00a0ef39a56 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = { .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 3b1ac93efee2..2b463047dd7b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname, /* * permit an RxRPC socket to be polled */ -static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t rxrpc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct rxrpc_sock *rx = rxrpc_sk(sk); - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* the socket is readable if there are any messages waiting on the Rx * queue */ @@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = rxrpc_poll_mask, + .poll = rxrpc_poll, .ioctl = sock_no_ioctl, .listen = rxrpc_listen, .shutdown = rxrpc_shutdown, diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 526a8e491626..6e7124e57918 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, } params_old = rtnl_dereference(p->params); - params_new->action = parm->action; + p->tcf_action = parm->action; params_new->update_flags = parm->update_flags; rcu_assign_pointer(p->params, params_new); if (params_old) @@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&p->tcf_tm); bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); - action = params->action; + action = READ_ONCE(p->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop_stats; @@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, .index = p->tcf_index, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, + .action = p->tcf_action, }; struct tcf_t t; params = rtnl_dereference(p->params); - opt.action = params->action; opt.update_flags = params->update_flags; if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 626dac81a48a..9bc6c2ae98a5 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&t->tcf_tm); bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); - action = params->action; + action = READ_ONCE(t->tcf_action); switch (params->tcft_action) { case TCA_TUNNEL_KEY_ACT_RELEASE: @@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, params_old = rtnl_dereference(t->params); - params_new->action = parm->action; + t->tcf_action = parm->action; params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; @@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, .index = t->tcf_index, .refcnt = t->tcf_refcnt - ref, .bindcnt = t->tcf_bindcnt - bind, + .action = t->tcf_action, }; struct tcf_t tm; params = rtnl_dereference(t->params); opt.t_action = params->tcft_action; - opt.action = params->action; if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) goto nla_put_failure; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index cdc3c87c53e6..f74513a7c7a8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, for (tp = rtnl_dereference(chain->filter_chain); tp; tp = rtnl_dereference(tp->next)) tfilter_notify(net, oskb, n, tp, block, - q, parent, 0, event, false); + q, parent, NULL, event, false); } static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, @@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (cb->args[1] == 0) { - if (tcf_fill_node(net, skb, tp, block, q, parent, 0, + if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index cd2e0e342fb6..6c0a9d5dbf94 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, q->cparams.mtu = psched_mtu(qdisc_dev(sch)); if (opt) { - int err = fq_codel_change(sch, opt, extack); + err = fq_codel_change(sch, opt, extack); if (err) - return err; + goto init_failure; } err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) - return err; + goto init_failure; if (!q->flows) { q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_codel_flow), GFP_KERNEL); - if (!q->flows) - return -ENOMEM; + if (!q->flows) { + err = -ENOMEM; + goto init_failure; + } q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); - if (!q->backlogs) - return -ENOMEM; + if (!q->backlogs) { + err = -ENOMEM; + goto alloc_failure; + } for (i = 0; i < q->flows_cnt; i++) { struct fq_codel_flow *flow = q->flows + i; @@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; + +alloc_failure: + kvfree(q->flows); + q->flows = NULL; +init_failure: + q->flows_cnt = 0; + return err; } static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 7339918a805d..0cd2e764f47f 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = sctp_getname, - .poll_mask = sctp_poll_mask, + .poll = sctp_poll, .ioctl = inet6_ioctl, .listen = sctp_inet_listen, .shutdown = inet_shutdown, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 5dffbc493008..67f73d3a1356 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* Semantics are different. */ - .poll_mask = sctp_poll_mask, + .poll = sctp_poll, .ioctl = inet_ioctl, .listen = sctp_inet_listen, .shutdown = inet_shutdown, /* Looks harmless. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d20f7addee19..ce620e878538 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7717,12 +7717,14 @@ out: * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ -__poll_t sctp_poll_mask(struct socket *sock, __poll_t events) +__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); __poll_t mask; + poll_wait(file, sk_sleep(sk), wait); + sock_rps_record_flow(sk); /* A TCP-style listening socket becomes readable when the accept queue diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 445b7ef61677..12cac85da994 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) if (dst) { /* Re-fetch, as under layers may have a higher minimum size */ - pmtu = SCTP_TRUNC4(dst_mtu(dst)); + pmtu = sctp_dst_mtu(dst); change = t->pathmtu != pmtu; } t->pathmtu = pmtu; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da7f02edcd37..05e4ffe5aabd 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group */ static void smc_tcp_listen_work(struct work_struct *); +static void smc_connect_work(struct work_struct *); static void smc_set_keepalive(struct sock *sk, int val) { @@ -122,6 +123,12 @@ static int smc_release(struct socket *sock) goto out; smc = smc_sk(sk); + + /* cleanup for a dangling non-blocking connect */ + flush_work(&smc->connect_work); + kfree(smc->connect_info); + smc->connect_info = NULL; + if (sk->sk_state == SMC_LISTEN) /* smc_close_non_accepted() is called and acquires * sock lock for child sockets again @@ -140,7 +147,8 @@ static int smc_release(struct socket *sock) smc->clcsock = NULL; } if (smc->use_fallback) { - sock_put(sk); /* passive closing */ + if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) + sock_put(sk); /* passive closing */ sk->sk_state = SMC_CLOSED; sk->sk_state_change(sk); } @@ -186,6 +194,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, sk->sk_protocol = protocol; smc = smc_sk(sk); INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); + INIT_WORK(&smc->connect_work, smc_connect_work); INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); INIT_LIST_HEAD(&smc->accept_q); spin_lock_init(&smc->accept_q_lock); @@ -409,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code) { int rc; - if (reason_code < 0) /* error, fallback is not possible */ + if (reason_code < 0) { /* error, fallback is not possible */ + if (smc->sk.sk_state == SMC_INIT) + sock_put(&smc->sk); /* passive closing */ return reason_code; + } if (reason_code != SMC_CLC_DECL_REPLY) { rc = smc_clc_send_decline(smc, reason_code); - if (rc < 0) + if (rc < 0) { + if (smc->sk.sk_state == SMC_INIT) + sock_put(&smc->sk); /* passive closing */ return rc; + } } return smc_connect_fallback(smc); } @@ -427,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code, smc_lgr_forget(smc->conn.lgr); mutex_unlock(&smc_create_lgr_pending); smc_conn_free(&smc->conn); - if (reason_code < 0 && smc->sk.sk_state == SMC_INIT) - sock_put(&smc->sk); /* passive closing */ return reason_code; } @@ -576,6 +589,35 @@ static int __smc_connect(struct smc_sock *smc) return 0; } +static void smc_connect_work(struct work_struct *work) +{ + struct smc_sock *smc = container_of(work, struct smc_sock, + connect_work); + int rc; + + lock_sock(&smc->sk); + rc = kernel_connect(smc->clcsock, &smc->connect_info->addr, + smc->connect_info->alen, smc->connect_info->flags); + if (smc->clcsock->sk->sk_err) { + smc->sk.sk_err = smc->clcsock->sk->sk_err; + goto out; + } + if (rc < 0) { + smc->sk.sk_err = -rc; + goto out; + } + + rc = __smc_connect(smc); + if (rc < 0) + smc->sk.sk_err = -rc; + +out: + smc->sk.sk_state_change(&smc->sk); + kfree(smc->connect_info); + smc->connect_info = NULL; + release_sock(&smc->sk); +} + static int smc_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -605,15 +647,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, smc_copy_sock_settings_to_clc(smc); tcp_sk(smc->clcsock->sk)->syn_smc = 1; - rc = kernel_connect(smc->clcsock, addr, alen, flags); - if (rc) - goto out; + if (flags & O_NONBLOCK) { + if (smc->connect_info) { + rc = -EALREADY; + goto out; + } + smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL); + if (!smc->connect_info) { + rc = -ENOMEM; + goto out; + } + smc->connect_info->alen = alen; + smc->connect_info->flags = flags ^ O_NONBLOCK; + memcpy(&smc->connect_info->addr, addr, alen); + schedule_work(&smc->connect_work); + rc = -EINPROGRESS; + } else { + rc = kernel_connect(smc->clcsock, addr, alen, flags); + if (rc) + goto out; - rc = __smc_connect(smc); - if (rc < 0) - goto out; - else - rc = 0; /* success cases including fallback */ + rc = __smc_connect(smc); + if (rc < 0) + goto out; + else + rc = 0; /* success cases including fallback */ + } out: release_sock(sk); @@ -1273,40 +1332,26 @@ static __poll_t smc_accept_poll(struct sock *parent) return mask; } -static __poll_t smc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t smc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; struct smc_sock *smc; - int rc; if (!sk) return EPOLLNVAL; smc = smc_sk(sock->sk); - sock_hold(sk); - lock_sock(sk); if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { /* delegate to CLC child sock */ - release_sock(sk); - mask = smc->clcsock->ops->poll_mask(smc->clcsock, events); - lock_sock(sk); + mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); sk->sk_err = smc->clcsock->sk->sk_err; - if (sk->sk_err) { + if (sk->sk_err) mask |= EPOLLERR; - } else { - /* if non-blocking connect finished ... */ - if (sk->sk_state == SMC_INIT && - mask & EPOLLOUT && - smc->clcsock->sk->sk_state != TCP_CLOSE) { - rc = __smc_connect(smc); - if (rc < 0) - mask |= EPOLLERR; - /* success cases including fallback */ - mask |= EPOLLOUT | EPOLLWRNORM; - } - } } else { + if (sk->sk_state != SMC_CLOSED) + sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_err) mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || @@ -1332,10 +1377,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events) } if (smc->conn.urg_state == SMC_URG_VALID) mask |= EPOLLPRI; - } - release_sock(sk); - sock_put(sk); return mask; } @@ -1415,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - get_user(val, (int __user *)optval); + if (get_user(val, (int __user *)optval)) + return -EFAULT; lock_sock(sk); switch (optname) { @@ -1483,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, return -EBADF; return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); } + lock_sock(&smc->sk); switch (cmd) { case SIOCINQ: /* same as FIONREAD */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@ -1495,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, break; case SIOCOUTQ: /* output queue size (not send + not acked) */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@ -1506,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, break; case SIOCOUTQNSD: /* output queue size (not send only) */ - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; @@ -1515,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, answ = smc_tx_prepared_sends(&smc->conn); break; case SIOCATMARK: - if (smc->sk.sk_state == SMC_LISTEN) + if (smc->sk.sk_state == SMC_LISTEN) { + release_sock(&smc->sk); return -EINVAL; + } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) { answ = 0; @@ -1532,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, } break; default: + release_sock(&smc->sk); return -ENOIOCTLCMD; } + release_sock(&smc->sk); return put_user(answ, (int __user *)arg); } @@ -1619,7 +1673,7 @@ static const struct proto_ops smc_sock_ops = { .socketpair = sock_no_socketpair, .accept = smc_accept, .getname = smc_getname, - .poll_mask = smc_poll_mask, + .poll = smc_poll, .ioctl = smc_ioctl, .listen = smc_listen, .shutdown = smc_shutdown, diff --git a/net/smc/smc.h b/net/smc/smc.h index 51ae1f10d81a..d7ca26570482 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -187,11 +187,19 @@ struct smc_connection { struct work_struct close_work; /* peer sent some closing */ }; +struct smc_connect_info { + int flags; + int alen; + struct sockaddr addr; +}; + struct smc_sock { /* smc sock container */ struct sock sk; struct socket *clcsock; /* internal tcp socket */ struct smc_connection conn; /* smc connection */ struct smc_sock *listen_smc; /* listen parent */ + struct smc_connect_info *connect_info; /* connect address & flags */ + struct work_struct connect_work; /* handle non-blocking connect*/ struct work_struct tcp_listen_work;/* handle tcp socket accepts */ struct work_struct smc_listen_work;/* prepare new accept socket */ struct list_head accept_q; /* sockets to be accepted */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 717449b1da0b..ae5d168653ce 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -250,6 +250,7 @@ out: int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type) { + long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo; struct sock *clc_sk = smc->clcsock->sk; struct smc_clc_msg_hdr *clcm = buf; struct msghdr msg = {NULL, 0}; @@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, memset(&msg, 0, sizeof(struct msghdr)); iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); krflags = MSG_WAITALL; - smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; len = sock_recvmsg(smc->clcsock, &msg, krflags); if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { smc->sk.sk_err = EPROTO; @@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, } out: + smc->clcsock->sk->sk_rcvtimeo = rcvtimeo; return reason_code; } diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index fa41d9881741..ac961dfb1ea1 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc) } switch (sk->sk_state) { case SMC_INIT: + sk->sk_state = SMC_PEERABORTWAIT; + break; case SMC_ACTIVE: sk->sk_state = SMC_PEERABORTWAIT; release_sock(sk); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index cee666400752..f82886b7d1d8 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -495,7 +495,8 @@ out: void smc_tx_consumer_update(struct smc_connection *conn, bool force) { - union smc_host_cursor cfed, cons; + union smc_host_cursor cfed, cons, prod; + int sender_free = conn->rmb_desc->len; int to_confirm; smc_curs_write(&cons, @@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) smc_curs_read(&conn->rx_curs_confirmed, conn), conn); to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); + if (to_confirm > conn->rmbe_update_limit) { + smc_curs_write(&prod, + smc_curs_read(&conn->local_rx_ctrl.prod, conn), + conn); + sender_free = conn->rmb_desc->len - + smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); + } if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || force || ((to_confirm > conn->rmbe_update_limit) && - ((to_confirm > (conn->rmb_desc->len / 2)) || + ((sender_free <= (conn->rmb_desc->len / 2)) || conn->local_rx_ctrl.prod_flags.write_blocked))) { if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && conn->alert_token_local) { /* connection healthy */ diff --git a/net/socket.c b/net/socket.c index 8a109012608a..85633622c94d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); -static struct wait_queue_head *sock_get_poll_head(struct file *file, - __poll_t events); -static __poll_t sock_poll_mask(struct file *file, __poll_t); -static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait); +static __poll_t sock_poll(struct file *file, + struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, @@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = { .llseek = no_llseek, .read_iter = sock_read_iter, .write_iter = sock_write_iter, - .get_poll_head = sock_get_poll_head, - .poll_mask = sock_poll_mask, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT @@ -1130,48 +1126,16 @@ out_release: } EXPORT_SYMBOL(sock_create_lite); -static struct wait_queue_head *sock_get_poll_head(struct file *file, - __poll_t events) -{ - struct socket *sock = file->private_data; - - if (!sock->ops->poll_mask) - return NULL; - sock_poll_busy_loop(sock, events); - return sk_sleep(sock->sk); -} - -static __poll_t sock_poll_mask(struct file *file, __poll_t events) -{ - struct socket *sock = file->private_data; - - /* - * We need to be sure we are in sync with the socket flags modification. - * - * This memory barrier is paired in the wq_has_sleeper. - */ - smp_mb(); - - /* this socket can poll_ll so tell the system call */ - return sock->ops->poll_mask(sock, events) | - (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0); -} - /* No kernel lock held - perfect */ static __poll_t sock_poll(struct file *file, poll_table *wait) { struct socket *sock = file->private_data; - __poll_t events = poll_requested_events(wait), mask = 0; - - if (sock->ops->poll) { - sock_poll_busy_loop(sock, events); - mask = sock->ops->poll(file, sock, wait); - } else if (sock->ops->poll_mask) { - sock_poll_wait(file, sock_get_poll_head(file, events), wait); - mask = sock->ops->poll_mask(sock, events); - } + __poll_t events = poll_requested_events(wait); - return mask | sock_poll_busy_flag(sock); + sock_poll_busy_loop(sock, events); + if (!sock->ops->poll) + return 0; + return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 373836615c57..625acb27efcc 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -35,7 +35,6 @@ struct _strp_msg { */ struct strp_msg strp; int accum_len; - int early_eaten; }; static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) @@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, head = strp->skb_head; if (head) { /* Message already in progress */ - - stm = _strp_msg(head); - if (unlikely(stm->early_eaten)) { - /* Already some number of bytes on the receive sock - * data saved in skb_head, just indicate they - * are consumed. - */ - eaten = orig_len <= stm->early_eaten ? - orig_len : stm->early_eaten; - stm->early_eaten -= eaten; - - return eaten; - } - if (unlikely(orig_offset)) { /* Getting data with a non-zero offset when a message is * in progress is not expected. If it does happen, we @@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, } stm->accum_len += cand_len; + eaten += cand_len; strp->need_bytes = stm->strp.full_len - stm->accum_len; - stm->early_eaten = cand_len; STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ break; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 9f666e0650e2..2830709957bd 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr, } /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer + * Returns true if message should be dropped by caller, i.e., if it is a + * trial message or we are inside trial period. Otherwise false. */ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, struct tipc_media_addr *maddr, @@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); } + /* Accept regular link requests/responses only after trial period */ if (mtyp != DSC_TRIAL_MSG) - return false; + return trial; sugg_addr = tipc_node_try_addr(net, peer_id, src); if (sugg_addr) @@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t) { struct tipc_discoverer *d = from_timer(d, t, timer); struct tipc_net *tn = tipc_net(d->net); - u32 self = tipc_own_addr(d->net); struct tipc_media_addr maddr; struct sk_buff *skb = NULL; struct net *net = d->net; @@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t) goto exit; } - /* Did we just leave the address trial period ? */ - if (!self && !time_before(jiffies, tn->addr_trial_end)) { - self = tn->trial_addr; - tipc_net_finalize(net, self); - msg_set_prevnode(buf_msg(d->skb), self); + /* Trial period over ? */ + if (!time_before(jiffies, tn->addr_trial_end)) { + /* Did we just leave it ? */ + if (!tipc_own_addr(net)) + tipc_net_finalize(net, tn->trial_addr); + msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); + msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net)); } /* Adjust timeout interval according to discovery phase */ diff --git a/net/tipc/net.c b/net/tipc/net.c index 4fbaa0464405..a7f6964c3a4b 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr) void tipc_net_finalize(struct net *net, u32 addr) { - tipc_set_node_addr(net, addr); - smp_mb(); - tipc_named_reinit(net); - tipc_sk_reinit(net); - tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, - TIPC_CLUSTER_SCOPE, 0, addr); + struct tipc_net *tn = tipc_net(net); + + spin_lock_bh(&tn->node_list_lock); + if (!tipc_own_addr(net)) { + tipc_set_node_addr(net, addr); + tipc_named_reinit(net); + tipc_sk_reinit(net); + tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, + TIPC_CLUSTER_SCOPE, 0, addr); + } + spin_unlock_bh(&tn->node_list_lock); } void tipc_net_stop(struct net *net) diff --git a/net/tipc/node.c b/net/tipc/node.c index 6a44eb812baf..0453bd451ce8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr) } /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not + * Returns suggested address if any, otherwise 0 */ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) { @@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) if (n) { addr = n->addr; tipc_node_put(n); + return addr; } - /* Even this node may be in trial phase */ + + /* Even this node may be in conflict */ if (tn->trial_addr == addr) return tipc_node_suggest_addr(net, addr); - return addr; + return 0; } void tipc_node_check_dest(struct net *net, u32 addr, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 14a5d055717d..930852c54d7a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, } /** - * tipc_poll - read pollmask + * tipc_poll - read and possibly block on pollmask * @file: file structure associated with the socket * @sock: socket for which to calculate the poll bits + * @wait: ??? * * Returns pollmask value * @@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, * imply that the operation will succeed, merely that it should be performed * and will not block. */ -static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t tipc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); __poll_t revents = 0; + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_shutdown & RCV_SHUTDOWN) revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) @@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = { .socketpair = tipc_socketpair, .accept = sock_no_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = sock_no_listen, .shutdown = tipc_shutdown, @@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = { .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = tipc_listen, .shutdown = tipc_shutdown, @@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = { .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = tipc_listen, .shutdown = tipc_shutdown, diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index a127d61e8af9..301f22430469 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -712,7 +712,7 @@ static int __init tls_register(void) build_protos(tls_prots[TLSV4], &tcp_prot); tls_sw_proto_ops = inet_stream_ops; - tls_sw_proto_ops.poll_mask = tls_sw_poll_mask; + tls_sw_proto_ops.poll = tls_sw_poll; tls_sw_proto_ops.splice_read = tls_sw_splice_read; #ifdef CONFIG_TLS_DEVICE diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f127fac88acf..4618f1c31137 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -440,7 +440,7 @@ alloc_encrypted: ret = tls_push_record(sk, msg->msg_flags, record_type); if (!ret) continue; - if (ret == -EAGAIN) + if (ret < 0) goto send_end; copied -= try_to_copy; @@ -701,6 +701,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, nsg = skb_to_sgvec(skb, &sgin[1], rxm->offset + tls_ctx->rx.prepend_size, rxm->full_len - tls_ctx->rx.prepend_size); + if (nsg < 0) { + ret = nsg; + goto out; + } tls_make_aad(ctx->rx_aad_ciphertext, rxm->full_len - tls_ctx->rx.overhead_size, @@ -712,6 +716,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, rxm->full_len - tls_ctx->rx.overhead_size, skb, sk->sk_allocation); +out: if (sgin != &sgin_arr[0]) kfree(sgin); @@ -919,22 +924,23 @@ splice_read_end: return copied ? : err; } -__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events) +unsigned int tls_sw_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) { + unsigned int ret; struct sock *sk = sock->sk; struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - __poll_t mask; - /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */ - mask = ctx->sk_poll_mask(sock, events); + /* Grab POLLOUT and POLLHUP from the underlying socket */ + ret = ctx->sk_poll(file, sock, wait); - /* Clear EPOLLIN bits, and set based on recv_pkt */ - mask &= ~(EPOLLIN | EPOLLRDNORM); + /* Clear POLLIN bits, and set based on recv_pkt */ + ret &= ~(POLLIN | POLLRDNORM); if (ctx->recv_pkt) - mask |= EPOLLIN | EPOLLRDNORM; + ret |= POLLIN | POLLRDNORM; - return mask; + return ret; } static int tls_read_size(struct strparser *strp, struct sk_buff *skb) @@ -1191,7 +1197,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) sk->sk_data_ready = tls_data_ready; write_unlock_bh(&sk->sk_callback_lock); - sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask; + sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll; strp_check_rcv(&sw_ctx_rx->strp); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 95b02a71fd47..e5473c03d667 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *, static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, int, bool); static int unix_getname(struct socket *, struct sockaddr *, int); -static __poll_t unix_poll_mask(struct socket *, __poll_t); -static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t); +static __poll_t unix_poll(struct file *, struct socket *, poll_table *); +static __poll_t unix_dgram_poll(struct file *, struct socket *, + poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); @@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = { .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, - .poll_mask = unix_poll_mask, + .poll = unix_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, @@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = { .socketpair = unix_socketpair, .accept = sock_no_accept, .getname = unix_getname, - .poll_mask = unix_dgram_poll_mask, + .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = sock_no_listen, .shutdown = unix_shutdown, @@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = { .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, - .poll_mask = unix_dgram_poll_mask, + .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, @@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return err; } -static __poll_t unix_poll_mask(struct socket *sock, __poll_t events) +static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err) @@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events) return mask; } -static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events) +static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk, *other; - int writable; - __poll_t mask = 0; + unsigned int writable; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) @@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events) } /* No write status requested, avoid expensive OUT tests. */ - if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) + if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) return mask; writable = unix_writable(sk); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index bb5d5fa68c35..c1076c19b858 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode) return err; } -static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events) +static __poll_t vsock_poll(struct file *file, struct socket *sock, + poll_table *wait) { - struct sock *sk = sock->sk; - struct vsock_sock *vsk = vsock_sk(sk); - __poll_t mask = 0; + struct sock *sk; + __poll_t mask; + struct vsock_sock *vsk; + + sk = sock->sk; + vsk = vsock_sk(sk); + + poll_wait(file, sk_sleep(sk), wait); + mask = 0; if (sk->sk_err) /* Signify that there has been an error on this socket. */ @@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = vsock_getname, - .poll_mask = vsock_poll_mask, + .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, @@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = { .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, - .poll_mask = vsock_poll_mask, + .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c7bbe5f0aae8..4eece06be1e7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6231,7 +6231,7 @@ do { \ nl80211_check_s32); /* * Check HT operation mode based on - * IEEE 802.11 2012 8.4.2.59 HT Operation element. + * IEEE 802.11-2016 9.4.2.57 HT Operation element. */ if (tb[NL80211_MESHCONF_HT_OPMODE]) { ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); @@ -6241,22 +6241,9 @@ do { \ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) return -EINVAL; - if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && - (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; + /* NON_HT_STA bit is reserved, but some programs set it */ + ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; - switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { - case IEEE80211_HT_OP_MODE_PROTECTION_NONE: - case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: - if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) - return -EINVAL; - break; - case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: - case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; - break; - } cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } @@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, - info->extack); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, + info->extack); + if (err) + goto error; + err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) @@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, NULL); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, NULL); + if (err) + return err; + if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index f93365ae0fdd..d49aa79b7997 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = { .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3b3410ada097..72335c2e8108 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -199,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb) { u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; struct xdp_sock *xs = xdp_sk(skb->sk); + unsigned long flags; + spin_lock_irqsave(&xs->tx_completion_lock, flags); WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr)); + spin_unlock_irqrestore(&xs->tx_completion_lock, flags); sock_wfree(skb); } @@ -215,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, struct sk_buff *skb; int err = 0; - if (unlikely(!xs->tx)) - return -ENOBUFS; - mutex_lock(&xs->mutex); while (xskq_peek_desc(xs->tx, &desc)) { @@ -230,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, goto out; } - if (xskq_reserve_addr(xs->umem->cq)) { - err = -EAGAIN; - goto out; - } - - len = desc.len; - if (unlikely(len > xs->dev->mtu)) { - err = -EMSGSIZE; + if (xskq_reserve_addr(xs->umem->cq)) goto out; - } - if (xs->queue_id >= xs->dev->real_num_tx_queues) { - err = -ENXIO; + if (xs->queue_id >= xs->dev->real_num_tx_queues) goto out; - } + len = desc.len; skb = sock_alloc_send_skb(sk, len, 1, &err); if (unlikely(!skb)) { err = -EAGAIN; @@ -268,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, skb->destructor = xsk_destruct_skb; err = dev_direct_xmit(skb, xs->queue_id); + xskq_discard_desc(xs->tx); /* Ignore NET_XMIT_CN as packet might have been sent */ if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { - err = -EAGAIN; - /* SKB consumed by dev_direct_xmit() */ + /* SKB completed but not sent */ + err = -EBUSY; goto out; } sent_frame = true; - xskq_discard_desc(xs->tx); } out: @@ -297,15 +288,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return -ENXIO; if (unlikely(!(xs->dev->flags & IFF_UP))) return -ENETDOWN; + if (unlikely(!xs->tx)) + return -ENOBUFS; if (need_wait) return -EOPNOTSUPP; return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len); } -static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events) +static unsigned int xsk_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) { - __poll_t mask = datagram_poll_mask(sock, events); + unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); @@ -696,7 +690,7 @@ static const struct proto_ops xsk_proto_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = xsk_poll_mask, + .poll = xsk_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -754,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, xs = xdp_sk(sk); mutex_init(&xs->mutex); + spin_lock_init(&xs->tx_completion_lock); local_bh_disable(); sock_prot_inuse_add(net, &xsk_proto, 1); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index ef6a6f0ec949..52ecaf770642 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) return (entries > dcnt) ? dcnt : entries; } -static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer) -{ - return q->nentries - (producer - q->cons_tail); -} - static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) { - u32 free_entries = xskq_nb_free_lazy(q, producer); + u32 free_entries = q->nentries - (producer - q->cons_tail); if (free_entries >= dcnt) return free_entries; @@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; - if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0) + if (xskq_nb_free(q, q->prod_tail, 1) == 0) return -ENOSPC; ring->desc[q->prod_tail++ & q->ring_mask] = addr; diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore new file mode 100644 index 000000000000..8ae4940025f8 --- /dev/null +++ b/samples/bpf/.gitignore @@ -0,0 +1,49 @@ +cpustat +fds_example +lathist +load_sock_ops +lwt_len_hist +map_perf_test +offwaketime +per_socket_stats_example +sampleip +sock_example +sockex1 +sockex2 +sockex3 +spintest +syscall_nrs.h +syscall_tp +task_fd_query +tc_l2_redirect +test_cgrp2_array_pin +test_cgrp2_attach +test_cgrp2_attach2 +test_cgrp2_sock +test_cgrp2_sock2 +test_current_task_under_cgroup +test_lru_dist +test_map_in_map +test_overhead +test_probe_write_user +trace_event +trace_output +tracex1 +tracex2 +tracex3 +tracex4 +tracex5 +tracex6 +tracex7 +xdp1 +xdp2 +xdp_adjust_tail +xdp_fwd +xdp_monitor +xdp_redirect +xdp_redirect_cpu +xdp_redirect_map +xdp_router_ipv4 +xdp_rxq_info +xdp_tx_iptunnel +xdpsock diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c index 95c16324760c..0b6f22feb2c9 100644 --- a/samples/bpf/parse_varlen.c +++ b/samples/bpf/parse_varlen.c @@ -6,6 +6,7 @@ */ #define KBUILD_MODNAME "foo" #include <linux/if_ether.h> +#include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/in.h> @@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) return 0; } -struct vlan_hdr { - uint16_t h_vlan_TCI; - uint16_t h_vlan_encapsulated_proto; -}; - SEC("varlen") int handle_ingress(struct __sk_buff *skb) { diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c index 6caf47afa635..9d6dcaa9db92 100644 --- a/samples/bpf/test_overhead_user.c +++ b/samples/bpf/test_overhead_user.c @@ -6,6 +6,7 @@ */ #define _GNU_SOURCE #include <sched.h> +#include <errno.h> #include <stdio.h> #include <sys/types.h> #include <asm/unistd.h> @@ -44,8 +45,13 @@ static void test_task_rename(int cpu) exit(1); } start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - write(fd, buf, sizeof(buf)); + for (i = 0; i < MAX_CNT; i++) { + if (write(fd, buf, sizeof(buf)) < 0) { + printf("task rename failed: %s\n", strerror(errno)); + close(fd); + return; + } + } printf("task_rename:%d: %lld events per sec\n", cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); close(fd); @@ -63,8 +69,13 @@ static void test_urandom_read(int cpu) exit(1); } start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - read(fd, buf, sizeof(buf)); + for (i = 0; i < MAX_CNT; i++) { + if (read(fd, buf, sizeof(buf)) < 0) { + printf("failed to read from /dev/urandom: %s\n", strerror(errno)); + close(fd); + return; + } + } printf("urandom_read:%d: %lld events per sec\n", cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); close(fd); diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 1fa1becfa641..d08046ab81f0 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c @@ -122,6 +122,16 @@ static void print_stacks(void) } } +static inline int generate_load(void) +{ + if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) { + printf("failed to generate some load with dd: %s\n", strerror(errno)); + return -1; + } + + return 0; +} + static void test_perf_event_all_cpu(struct perf_event_attr *attr) { int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); @@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr) assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0); } - system("dd if=/dev/zero of=/dev/null count=5000k status=none"); + + if (generate_load() < 0) { + error = 1; + goto all_cpu_err; + } print_stacks(); all_cpu_err: for (i--; i >= 0; i--) { @@ -156,7 +170,7 @@ all_cpu_err: static void test_perf_event_task(struct perf_event_attr *attr) { - int pmu_fd; + int pmu_fd, error = 0; /* per task perf event, enable inherit so the "dd ..." command can be traced properly. * Enabling inherit will cause bpf_perf_prog_read_time helper failure. @@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr) } assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0); - system("dd if=/dev/zero of=/dev/null count=5000k status=none"); + + if (generate_load() < 0) { + error = 1; + goto err; + } print_stacks(); +err: ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); close(pmu_fd); + if (error) + int_exit(0); } static void test_bpf_perf_event(void) diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh index b9c9549c4c27..4bde9d066c46 100755 --- a/samples/bpf/xdp2skb_meta.sh +++ b/samples/bpf/xdp2skb_meta.sh @@ -16,8 +16,8 @@ BPF_FILE=xdp2skb_meta_kern.o DIR=$(dirname $0) -export TC=/usr/sbin/tc -export IP=/usr/sbin/ip +[ -z "$TC" ] && TC=tc +[ -z "$IP" ] && IP=ip function usage() { echo "" @@ -53,7 +53,7 @@ function _call_cmd() { local allow_fail="$2" shift 2 if [[ -n "$VERBOSE" ]]; then - echo "$(basename $cmd) $@" + echo "$cmd $@" fi if [[ -n "$DRYRUN" ]]; then return diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 6673cdb9f55c..a7e94e7ff87d 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c @@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) struct ethhdr *eth = data; struct ipv6hdr *ip6h; struct iphdr *iph; - int out_index; u16 h_proto; u64 nh_off; + int rc; nh_off = sizeof(*eth); if (data + nh_off > data_end) @@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) fib_params.ifindex = ctx->ingress_ifindex; - out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); + rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); /* verify egress index has xdp support * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with @@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) * NOTE: without verification that egress index supports XDP * forwarding packets are dropped. */ - if (out_index > 0) { + if (rc == 0) { if (h_proto == htons(ETH_P_IP)) ip_decrease_ttl(iph); else if (h_proto == htons(ETH_P_IPV6)) @@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); memcpy(eth->h_source, fib_params.smac, ETH_ALEN); - return bpf_redirect_map(&tx_port, out_index, 0); + return bpf_redirect_map(&tx_port, fib_params.ifindex, 0); } return XDP_PASS; diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index d69c8d78d3fd..5904b1543831 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -729,7 +729,7 @@ static void kick_tx(int fd) int ret; ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0); - if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN) + if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY) return; lassert(0); } diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 2960e26c6ea4..2535c3677c7b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -178,6 +178,8 @@ static const char *vbe_name(u32 index) return "(invalid)"; } +static struct page *__mbochs_get_page(struct mdev_state *mdev_state, + pgoff_t pgoff); static struct page *mbochs_get_page(struct mdev_state *mdev_state, pgoff_t pgoff); @@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { pos -= MBOCHS_MMIO_BAR_OFFSET; poff = pos & ~PAGE_MASK; - pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); + pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); map = kmap(pg); if (is_write) memcpy(map + poff, buf, count); @@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state) dev_dbg(dev, "%s: %d pages released\n", __func__, count); } -static int mbochs_region_vm_fault(struct vm_fault *vmf) +static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mdev_state *mdev_state = vma->vm_private_data; @@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) return 0; } -static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf) +static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mbochs_dmabuf *dmabuf = vma->vm_private_data; @@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf) mutex_unlock(&mdev_state->ops_lock); } -static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf, - unsigned long page_num) +static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) { struct mbochs_dmabuf *dmabuf = buf->priv; struct page *page = dmabuf->pages[page_num]; - return kmap_atomic(page); + return kmap(page); } -static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) +static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num, + void *vaddr) { - struct mbochs_dmabuf *dmabuf = buf->priv; - struct page *page = dmabuf->pages[page_num]; - - return kmap(page); + kunmap(vaddr); } static struct dma_buf_ops mbochs_dmabuf_ops = { .map_dma_buf = mbochs_map_dmabuf, .unmap_dma_buf = mbochs_unmap_dmabuf, .release = mbochs_release_dmabuf, - .map_atomic = mbochs_kmap_atomic_dmabuf, .map = mbochs_kmap_dmabuf, + .unmap = mbochs_kunmap_dmabuf, .mmap = mbochs_mmap_dmabuf, }; diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c8156d61678c..86321f06461e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj # Prefix -I with $(srctree) if it is not an absolute path. # skip if -I has no parameter addtree = $(if $(patsubst -I%,%,$(1)), \ -$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1))) +$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1)) # Find all -I options and call addtree flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index e7889f486ca1..514ed63ff571 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -590,7 +590,4 @@ endif # We never want them to be removed automatically. .SECONDARY: $(targets) -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 808d09f27ad4..17ef94c635cd 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -88,7 +88,4 @@ PHONY += $(subdir-ymn) $(subdir-ymn): $(Q)$(MAKE) $(clean)=$@ -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin index a763b4775d06..40867a41615b 100644 --- a/scripts/Makefile.modbuiltin +++ b/scripts/Makefile.modbuiltin @@ -54,8 +54,4 @@ PHONY += $(subdir-ym) $(subdir-ym): $(Q)$(MAKE) $(modbuiltin)=$@ - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index 51ca0244fc8a..ff5ca9817a85 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) $(modules): $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable so we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index df4174405feb..dd92dbbbaa68 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -149,8 +149,4 @@ ifneq ($(cmd_files),) include $(cmd_files) endif - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign index 171483bc0538..da56aa78d245 100644 --- a/scripts/Makefile.modsign +++ b/scripts/Makefile.modsign @@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) $(modules): $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir)) -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh index 208eb2825dab..6efcead31989 100755 --- a/scripts/cc-can-link.sh +++ b/scripts/cc-can-link.sh @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y" +cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 #include <stdio.h> int main(void) { diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e3b7362b0ee4..447857ffaf6b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2606,12 +2606,6 @@ sub process { "A patch subject line should describe the change not the tool that found it\n" . $herecurr); } -# Check for old stable address - if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) { - ERROR("STABLE_ADDRESS", - "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr); - } - # Check for unwanted Gerrit info if ($in_commit_log && $line =~ /^\s*change-id:/i) { ERROR("GERRIT_CHANGE_ID", @@ -5819,14 +5813,14 @@ sub process { defined $stat && $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s && $1 !~ /^_*volatile_*$/) { - my $specifier; - my $extension; - my $bad_specifier = ""; my $stat_real; my $lc = $stat =~ tr@\n@@; $lc = $lc + $linenr; for (my $count = $linenr; $count <= $lc; $count++) { + my $specifier; + my $extension; + my $bad_specifier = ""; my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0)); $fmt =~ s/%%//g; diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux index 5061abcc2540..e6239f39abad 100755 --- a/scripts/extract-vmlinux +++ b/scripts/extract-vmlinux @@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz try_decompress 'BZh' xy bunzip2 try_decompress '\135\0\0\0' xxx unlzma try_decompress '\211\114\132' xy 'lzop -d' +try_decompress '\002!L\030' xxx 'lz4 -d' +try_decompress '(\265/\375' xxx unzstd # Bail out: echo "$me: Cannot find vmlinux." >&2 diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 3755af0cd9f7..75e4e22b986a 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,4 +1,4 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index 94a383b21df6..f63b41b0dd49 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -171,6 +171,9 @@ struct symbol { * config BAZ * int "BAZ Value" * range 1..255 + * + * Please, also check zconf.y:print_symbol() when modifying the + * list of property types! */ enum prop_type { P_UNKNOWN, diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 65da87fce907..5ca2df790d3c 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[]) nread--; /* remove trailing new lines */ - while (buf[nread - 1] == '\n') + while (nread > 0 && buf[nread - 1] == '\n') nread--; buf[nread] = 0; diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 6f9b0aa32a82..4b68272ebdb9 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE]; static struct menu *current_menu, *current_entry; %} -%expect 32 +%expect 31 %union { @@ -337,7 +337,7 @@ choice_block: /* if entry */ -if_entry: T_IF expr nl +if_entry: T_IF expr T_EOL { printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); menu_add_entry(NULL); @@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu) print_quoted_string(out, prop->text); fputc('\n', out); break; + case P_SYMBOL: + fputs( " symbol ", out); + fprintf(out, "%s\n", prop->sym->name); + break; default: fprintf(out, " unknown prop %d!\n", prop->type); break; diff --git a/scripts/tags.sh b/scripts/tags.sh index 66f08bb1cce9..26de7d5aa5c8 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -152,6 +152,7 @@ regex_asm=( ) regex_c=( '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/' + '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/' '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/' '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/' '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/' @@ -245,7 +246,7 @@ exuberant() { setup_regex exuberant asm c all_target_sources | xargs $1 -a \ - -I __initdata,__exitdata,__initconst, \ + -I __initdata,__exitdata,__initconst,__ro_after_init \ -I __initdata_memblock \ -I __refdata,__attribute,__maybe_unused,__always_unused \ -I __acquires,__releases,__deprecated \ diff --git a/security/keys/dh.c b/security/keys/dh.c index f7403821db7f..b203f7758f97 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc) * The src pointer is defined as Z || other info where Z is the shared secret * from DH and other info is an arbitrary string (see SP800-56A section * 5.8.1.2). + * + * 'dlen' must be a multiple of the digest size. */ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen, unsigned int zlen) @@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, { uint8_t *outbuf = NULL; int ret; - size_t outbuf_len = round_up(buflen, - crypto_shash_digestsize(sdesc->shash.tfm)); + size_t outbuf_len = roundup(buflen, + crypto_shash_digestsize(sdesc->shash.tfm)); outbuf = kmalloc(outbuf_len, GFP_KERNEL); if (!outbuf) { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index f3d374d2ca04..79d3709b0671 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp) static ssize_t sel_read_policy(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { - struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info; struct policy_load_memory *plm = filp->private_data; int ret; - mutex_lock(&fsi->mutex); - ret = avc_has_perm(&selinux_state, current_sid(), SECINITSID_SECURITY, SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL); if (ret) - goto out; + return ret; - ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); -out: - mutex_unlock(&fsi->mutex); - return ret; + return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); } static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf) @@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf, ret = -EINVAL; if (index >= fsi->bool_num || strcmp(name, fsi->bool_pending_names[index])) - goto out; + goto out_unlock; ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - goto out; + goto out_unlock; cur_enforcing = security_get_bool_value(fsi->state, index); if (cur_enforcing < 0) { ret = cur_enforcing; - goto out; + goto out_unlock; } length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, fsi->bool_pending_values[index]); - ret = simple_read_from_buffer(buf, count, ppos, page, length); -out: mutex_unlock(&fsi->mutex); + ret = simple_read_from_buffer(buf, count, ppos, page, length); +out_free: free_page((unsigned long)page); return ret; + +out_unlock: + mutex_unlock(&fsi->mutex); + goto out_free; } static ssize_t sel_write_bool(struct file *filep, const char __user *buf, @@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; + if (count >= PAGE_SIZE) + return -ENOMEM; + + /* No partial writes. */ + if (*ppos != 0) + return -EINVAL; + + page = memdup_user_nul(buf, count); + if (IS_ERR(page)) + return PTR_ERR(page); + mutex_lock(&fsi->mutex); length = avc_has_perm(&selinux_state, @@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, fsi->bool_pending_names[index])) goto out; - length = -ENOMEM; - if (count >= PAGE_SIZE) - goto out; - - /* No partial writes. */ - length = -EINVAL; - if (*ppos != 0) - goto out; - - page = memdup_user_nul(buf, count); - if (IS_ERR(page)) { - length = PTR_ERR(page); - page = NULL; - goto out; - } - length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; @@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep, ssize_t length; int new_value; + if (count >= PAGE_SIZE) + return -ENOMEM; + + /* No partial writes. */ + if (*ppos != 0) + return -EINVAL; + + page = memdup_user_nul(buf, count); + if (IS_ERR(page)) + return PTR_ERR(page); + mutex_lock(&fsi->mutex); length = avc_has_perm(&selinux_state, @@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep, if (length) goto out; - length = -ENOMEM; - if (count >= PAGE_SIZE) - goto out; - - /* No partial writes. */ - length = -EINVAL; - if (*ppos != 0) - goto out; - - page = memdup_user_nul(buf, count); - if (IS_ERR(page)) { - length = PTR_ERR(page); - page = NULL; - goto out; - } - length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 7ad226018f51..19de675d4504 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) struct smack_known *skp = smk_of_task_struct(p); isp->smk_inode = skp; + isp->smk_flags |= SMK_INODE_INSTANT; } /* diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 69616d00481c..b53026a72e73 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card, int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, struct snd_rawmidi_params * params) { - char *newbuf; + char *newbuf, *oldbuf; struct snd_rawmidi_runtime *runtime = substream->runtime; if (substream->append && substream->use_count > 1) @@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - newbuf = krealloc(runtime->buffer, params->buffer_size, - GFP_KERNEL); + newbuf = kmalloc(params->buffer_size, GFP_KERNEL); if (!newbuf) return -ENOMEM; + spin_lock_irq(&runtime->lock); + oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; runtime->avail = runtime->buffer_size; + runtime->appl_ptr = runtime->hw_ptr = 0; + spin_unlock_irq(&runtime->lock); + kfree(oldbuf); } runtime->avail_min = params->avail_min; substream->active_sensing = !params->no_active_sensing; @@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params); int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, struct snd_rawmidi_params * params) { - char *newbuf; + char *newbuf, *oldbuf; struct snd_rawmidi_runtime *runtime = substream->runtime; snd_rawmidi_drain_input(substream); @@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - newbuf = krealloc(runtime->buffer, params->buffer_size, - GFP_KERNEL); + newbuf = kmalloc(params->buffer_size, GFP_KERNEL); if (!newbuf) return -ENOMEM; + spin_lock_irq(&runtime->lock); + oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; + runtime->appl_ptr = runtime->hw_ptr = 0; + spin_unlock_irq(&runtime->lock); + kfree(oldbuf); } runtime->avail_min = params->avail_min; return 0; diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 61a07fe34cd2..56ca78423040 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, struct snd_seq_client *cptr = NULL; /* search for next client */ - info->client++; + if (info->client < INT_MAX) + info->client++; if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { diff --git a/sound/core/timer.c b/sound/core/timer.c index 665089c45560..b6f076bbc72d 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) } else { if (id.subdevice < 0) id.subdevice = 0; - else + else if (id.subdevice < INT_MAX) id.subdevice++; } } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index d91c87e41756..20a171ac4bb2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev) list_for_each_entry(pcm, &codec->pcm_list_head, list) snd_pcm_suspend_all(pcm->pcm); state = hda_call_codec_suspend(codec); - if (codec_has_clkstop(codec) && codec_has_epss(codec) && - (state & AC_PWRST_CLK_STOP_OK)) + if (codec->link_down_at_suspend || + (codec_has_clkstop(codec) && codec_has_epss(codec) && + (state & AC_PWRST_CLK_STOP_OK))) snd_hdac_codec_link_down(&codec->core); snd_hdac_link_power(&codec->core, false); return 0; diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 681c360f29f9..a8b1b31f161c 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -258,6 +258,7 @@ struct hda_codec { unsigned int power_save_node:1; /* advanced PM for each widget */ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */ unsigned int force_pin_prefix:1; /* Add location prefix */ + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ #ifdef CONFIG_PM unsigned long power_on_acct; unsigned long power_off_acct; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 04e949aa01ad..321e95c409c1 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -991,6 +991,7 @@ struct ca0132_spec { enum { QUIRK_NONE, QUIRK_ALIENWARE, + QUIRK_ALIENWARE_M17XR4, QUIRK_SBZ, QUIRK_R3DI, }; @@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = { }; static const struct snd_pci_quirk ca0132_quirks[] = { + SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4), SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), - SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI), + SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), + SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), {} }; @@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = { * I think this has to do with the pin for rear surround being 0x11, * and the center/lfe being 0x10. Usually the pin order is the opposite. */ -const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = { +static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = { { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, @@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec) info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0]; /* With the DSP enabled, desktops don't use this ADC. */ - if (spec->use_alt_functions) { + if (!spec->use_alt_functions) { info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2"); if (!info) return -ENOMEM; @@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec) * Bit 6: set to select Data2, clear for Data1 * Bit 7: set to enable DMic, clear for AMic */ - val = 0x23; + if (spec->quirk == QUIRK_ALIENWARE_M17XR4) + val = 0x33; + else + val = 0x23; /* keep a copy of dmic ctl val for enable/disable dmic purpuse */ spec->dmic_ctl = val; snd_hda_codec_write(codec, spec->input_pins[0], 0, @@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec) snd_hda_sequence_write(codec, spec->base_init_verbs); - if (spec->quirk != QUIRK_NONE) + if (spec->use_alt_functions) ca0132_alt_init(codec); ca0132_download_dsp(codec); @@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec) case QUIRK_R3DI: r3di_setup_defaults(codec); break; - case QUIRK_NONE: - case QUIRK_ALIENWARE: + case QUIRK_SBZ: + break; + default: ca0132_setup_defaults(codec); ca0132_init_analog_mic2(codec); ca0132_init_dmic(codec); @@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = { static void ca0132_config(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; - struct auto_pin_cfg *cfg = &spec->autocfg; spec->dacs[0] = 0x2; spec->dacs[1] = 0x3; @@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; spec->dig_in = 0x09; - cfg->dig_in_pin = 0x0e; - cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; break; case QUIRK_R3DI: codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__); @@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; break; default: spec->num_outputs = 2; @@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; spec->dig_in = 0x09; - cfg->dig_in_pin = 0x0e; - cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; break; } } @@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec) static int ca0132_prepare_verbs(struct hda_codec *codec) { /* Verbs + terminator (an empty element) */ -#define NUM_SPEC_VERBS 4 +#define NUM_SPEC_VERBS 2 struct ca0132_spec *spec = codec->spec; spec->chip_init_verbs = ca0132_init_verbs0; @@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec) if (!spec->spec_init_verbs) return -ENOMEM; - /* HP jack autodetection */ - spec->spec_init_verbs[0].nid = spec->unsol_tag_hp; - spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE; - spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp; - - /* MIC1 jack autodetection */ - spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1; - spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE; - spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1; - /* config EAPD */ - spec->spec_init_verbs[2].nid = 0x0b; - spec->spec_init_verbs[2].param = 0x78D; - spec->spec_init_verbs[2].verb = 0x00; + spec->spec_init_verbs[0].nid = 0x0b; + spec->spec_init_verbs[0].param = 0x78D; + spec->spec_init_verbs[0].verb = 0x00; /* Previously commented configuration */ /* - spec->spec_init_verbs[3].nid = 0x0b; - spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE; + spec->spec_init_verbs[2].nid = 0x0b; + spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE; + spec->spec_init_verbs[2].verb = 0x02; + + spec->spec_init_verbs[3].nid = 0x10; + spec->spec_init_verbs[3].param = 0x78D; spec->spec_init_verbs[3].verb = 0x02; spec->spec_init_verbs[4].nid = 0x10; - spec->spec_init_verbs[4].param = 0x78D; + spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE; spec->spec_init_verbs[4].verb = 0x02; - - spec->spec_init_verbs[5].nid = 0x10; - spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE; - spec->spec_init_verbs[5].verb = 0x02; */ /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index e7fcfc3b8885..f641c20095f7 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 8840daf9c6a3..8a49415aebac 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -33,6 +33,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/pm_runtime.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/asoundef.h> @@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid, if (pin_idx < 0) return; + mutex_lock(&spec->pcm_lock); if (hdmi_present_sense(get_pin(spec, pin_idx), 1)) snd_hda_jack_report_sync(codec); + mutex_unlock(&spec->pcm_lock); } static void jack_callback(struct hda_codec *codec, @@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec, static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) { struct hda_codec *codec = per_pin->codec; - struct hdmi_spec *spec = codec->spec; int ret; /* no temporary power up/down needed for component notifier */ - if (!codec_has_acomp(codec)) - snd_hda_power_up_pm(codec); + if (!codec_has_acomp(codec)) { + ret = snd_hda_power_up_pm(codec); + if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) { + snd_hda_power_down_pm(codec); + return false; + } + } - mutex_lock(&spec->pcm_lock); if (codec_has_acomp(codec)) { sync_eld_via_acomp(codec, per_pin); ret = false; /* don't call snd_hda_jack_report_sync() */ } else { ret = hdmi_present_sense_via_verbs(per_pin, repoll); } - mutex_unlock(&spec->pcm_lock); if (!codec_has_acomp(codec)) snd_hda_power_down_pm(codec); @@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work) { struct hdmi_spec_per_pin *per_pin = container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); + struct hda_codec *codec = per_pin->codec; + struct hdmi_spec *spec = codec->spec; if (per_pin->repoll_count++ > 6) per_pin->repoll_count = 0; + mutex_lock(&spec->pcm_lock); if (hdmi_present_sense(per_pin, per_pin->repoll_count)) snd_hda_jack_report_sync(per_pin->codec); + mutex_unlock(&spec->pcm_lock); } static void intel_haswell_fixup_connect_list(struct hda_codec *codec, @@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec) spec->chmap.channels_max = max(spec->chmap.channels_max, 8u); + /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing + * the link-down as is. Tell the core to allow it. + */ + codec->link_down_at_suspend = 1; + return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e9bd33ea538f..f6af3e1c2b93 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), @@ -2545,6 +2546,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110), SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), + SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), @@ -4995,7 +4997,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->shutup = alc_no_shutup; /* reduce click noise */ spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ @@ -5394,6 +5395,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" +static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */ + hda_fixup_thinkpad_acpi(codec, fix, action); +} + /* for dell wmi mic mute led */ #include "dell_wmi_helper.c" @@ -5946,7 +5954,7 @@ static const struct hda_fixup alc269_fixups[] = { }, [ALC269_FIXUP_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, - .v.func = hda_fixup_thinkpad_acpi, + .v.func = alc_fixup_thinkpad_acpi, .chained = true, .chain_id = ALC269_FIXUP_SKU_IGNORE, }, @@ -6562,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), @@ -6603,8 +6612,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), - SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), @@ -6782,6 +6791,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x14, 0x90170110}, {0x19, 0x02a11030}, {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, + {0x14, 0x90170110}, + {0x19, 0x02a11030}, + {0x1a, 0x02a11040}, + {0x1b, 0x01014020}, + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, + {0x14, 0x90170110}, + {0x19, 0x02a11020}, + {0x1a, 0x02a11030}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170110}, diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c index 6c85f13ab23f..54f6252faca6 100644 --- a/sound/pci/lx6464es/lx6464es.c +++ b/sound/pci/lx6464es/lx6464es.c @@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card, chip->port_dsp_bar = pci_ioremap_bar(pci, 2); if (!chip->port_dsp_bar) { dev_err(card->dev, "cannot remap PCI memory region\n"); + err = -ENOMEM; goto remap_pci_failed; } diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index caae4843cb70..16e006f708ca 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 04b3256f8e6d..4e76630dd655 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 833ed9a16adf..1b32b56a03d3 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) +#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h index 389c36fd8299..ac5ba55066dd 100644 --- a/tools/arch/powerpc/include/uapi/asm/unistd.h +++ b/tools/arch/powerpc/include/uapi/asm/unistd.h @@ -398,5 +398,6 @@ #define __NR_pkey_alloc 384 #define __NR_pkey_free 385 #define __NR_pkey_mprotect 386 +#define __NR_rseq 387 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index fb00a2fca990..5701f5cecd31 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -282,7 +282,9 @@ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ +#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 05f42a46d6ed..959aa53ab678 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -694,15 +694,19 @@ static int do_load(int argc, char **argv) return -1; } - if (do_pin_fd(prog_fd, argv[1])) { - p_err("failed to pin program"); - return -1; - } + if (do_pin_fd(prog_fd, argv[1])) + goto err_close_obj; if (json_output) jsonw_null(json_wtr); + bpf_object__close(obj); + return 0; + +err_close_obj: + bpf_object__close(obj); + return -1; } static int do_help(int argc, char **argv) diff --git a/tools/build/Build.include b/tools/build/Build.include index a4bbb984941d..950c1504ca37 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ rm -f $(depfile); \ mv -f $(dot-target).tmp $(dot-target).cmd, \ - printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ - printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \ + printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ + printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \ cat $(depfile) >> $(dot-target).cmd; \ printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) @@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX ### ## HOSTCC C flags -host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj)) +host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) diff --git a/tools/build/Makefile b/tools/build/Makefile index 5eb4b5ad79cb..5edf65e684ab 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE $(Q)$(MAKE) $(build)=fixdep $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o - $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $< + $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< FORCE: diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 6fdff5945c8a..9c660e1688ab 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -680,6 +680,13 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_ATOMIC 3 +/** + * DRM_CLIENT_CAP_ASPECT_RATIO + * + * If set to 1, the DRM core will provide aspect ratio information in modes. + */ +#define DRM_CLIENT_CAP_ASPECT_RATIO 4 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e0b06784f227..59b19b6a40d7 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2630,7 +2630,7 @@ struct bpf_fib_lookup { union { /* inputs to lookup */ __u8 tos; /* AF_INET */ - __be32 flowlabel; /* AF_INET6 */ + __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 68699f654118..cf01b6824244 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -333,6 +333,7 @@ enum { IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, + IFLA_BRPORT_ISOLATED, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -516,6 +517,7 @@ enum { IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, + IFLA_VXLAN_TTL_INHERIT, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 39e364c70caf..b6270a3b38e9 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_HYPERV_EVENTFD 154 +#define KVM_CAP_HYPERV_TLBFLUSH 155 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 4e60e105583e..7ec85d567598 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf) continue; sym->pfunc = sym->cfunc = sym; coldstr = strstr(sym->name, ".cold."); - if (coldstr) { - coldstr[0] = '\0'; - pfunc = find_symbol_by_name(elf, sym->name); - coldstr[0] = '.'; - - if (!pfunc) { - WARN("%s(): can't find parent function", - sym->name); - goto err; - } - - sym->pfunc = pfunc; - pfunc->cfunc = sym; + if (!coldstr) + continue; + + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; + + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + + /* + * Unfortunately, -fnoreorder-functions puts the child + * inside the parent. Remove the overlap so we can + * have sane assumptions. + * + * Note that pfunc->len now no longer matches + * pfunc->sym.st_size. + */ + if (sym->sec == pfunc->sec && + sym->offset >= pfunc->offset && + sym->offset + sym->len == pfunc->offset + pfunc->len) { + pfunc->len -= sym->len; } } } @@ -504,10 +519,12 @@ struct section *elf_create_section(struct elf *elf, const char *name, sec->sh.sh_flags = SHF_ALLOC; - /* Add section name to .shstrtab */ + /* Add section name to .shstrtab (or .strtab for Clang) */ shstrtab = find_section_by_name(elf, ".shstrtab"); + if (!shstrtab) + shstrtab = find_section_by_name(elf, ".strtab"); if (!shstrtab) { - WARN("can't find .shstrtab section"); + WARN("can't find .shstrtab or .strtab section"); return NULL; } diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index b5ac356ba323..f5a3b402589e 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil - PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) endif diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 3598b8b75d27..ef5d59a5742e 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) u64 ip; u64 skip_slot = -1; - if (chain->nr < 3) + if (!chain || chain->nr < 3) return skip_slot; ip = chain->ips[2]; diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 4dfe42666d0c..f0b1709a5ffb 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -341,6 +341,8 @@ 330 common pkey_alloc __x64_sys_pkey_alloc 331 common pkey_free __x64_sys_pkey_free 332 common statx __x64_sys_statx +333 common io_pgetevents __x64_sys_io_pgetevents +334 common rseq __x64_sys_rseq # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 4b2caf6d48e7..fead6b3b4206 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) else if (rm[2].rm_so != rm[2].rm_eo) prefix[0] = '+'; else - strncpy(prefix, "+0", 2); + scnprintf(prefix, sizeof(prefix), "+0"); } /* Rename register */ diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 63eb49082774..44195514b19e 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata) u8 *global_data; u8 *process_data; u8 *thread_data; - u64 bytes_done; + u64 bytes_done, secs; long work_done; u32 l; struct rusage rusage; @@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata) timersub(&stop, &start0, &diff); td->runtime_ns = diff.tv_sec * NSEC_PER_SEC; td->runtime_ns += diff.tv_usec * NSEC_PER_USEC; - td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9; + secs = td->runtime_ns / NSEC_PER_SEC; + td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0; getrusage(RUSAGE_THREAD, &rusage); td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC; diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 5eb22cc56363..8180319285af 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -283,6 +283,15 @@ out_put: return ret; } +static int process_feature_event(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session) +{ + if (event->feat.feat_id < HEADER_LAST_FEATURE) + return perf_event__process_feature(tool, event, session); + return 0; +} + static int hist_entry__tty_annotate(struct hist_entry *he, struct perf_evsel *evsel, struct perf_annotate *ann) @@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv) .attr = perf_event__process_attr, .build_id = perf_event__process_build_id, .tracing_data = perf_event__process_tracing_data, - .feature = perf_event__process_feature, + .feature = process_feature_event, .ordered_events = true, .ordering_requires_timestamps = true, }, diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cdb5b6949832..c04dc7b53797 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool, } /* - * All features are received, we can force the + * (feat_id = HEADER_LAST_FEATURE) is the end marker which + * means all features are received, now we can force the * group if needed. */ setup_forced_leader(rep, session->evlist); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index a31d7082188e..568ddfac3213 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1834,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist *evlist; struct perf_evsel *evsel, *pos; int err; + static struct perf_evsel_script *es; err = perf_event__process_attr(tool, event, pevlist); if (err) @@ -1842,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, evlist = *pevlist; evsel = perf_evlist__last(*pevlist); + if (!evsel->priv) { + if (scr->per_event_dump) { + evsel->priv = perf_evsel_script__new(evsel, + scr->session->data); + } else { + es = zalloc(sizeof(*es)); + if (!es) + return -ENOMEM; + es->fp = stdout; + evsel->priv = es; + } + } + if (evsel->attr.type >= PERF_TYPE_MAX && evsel->attr.type != PERF_TYPE_SYNTH) return 0; @@ -3030,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused, return set_maps(script); } +static int process_feature_event(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session) +{ + if (event->feat.feat_id < HEADER_LAST_FEATURE) + return perf_event__process_feature(tool, event, session); + return 0; +} + #ifdef HAVE_AUXTRACE_SUPPORT static int perf_script__process_auxtrace_info(struct perf_tool *tool, union perf_event *event, @@ -3074,7 +3097,7 @@ int cmd_script(int argc, const char **argv) .attr = process_attr, .event_update = perf_event__process_event_update, .tracing_data = perf_event__process_tracing_data, - .feature = perf_event__process_feature, + .feature = process_feature_event, .build_id = perf_event__process_build_id, .id_index = perf_event__process_id_index, .auxtrace_info = perf_script__process_auxtrace_info, @@ -3125,8 +3148,9 @@ int cmd_script(int argc, const char **argv) "+field to add and -field to remove." "Valid types: hw,sw,trace,raw,synth. " "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," - "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags," - "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr", + "addr,symoff,srcline,period,iregs,uregs,brstack," + "brstacksym,flags,bpf-output,brstackinsn,brstackoff," + "callindent,insn,insnlen,synth,phys_addr,metric,misc", parse_output_fields), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 22547a490e1f..05be023c3f0e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1742,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts) } } - if ((num_print_interval == 0 && metric_only) || interval_clear) + if ((num_print_interval == 0 || interval_clear) && metric_only) print_metric_headers(" ", true); if (++num_print_interval == 25) num_print_interval = 0; diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index 0c6d1002b524..ac1bcdc17dae 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c @@ -35,6 +35,7 @@ #include <sys/mman.h> #include <syscall.h> /* for gettid() */ #include <err.h> +#include <linux/kernel.h> #include "jvmti_agent.h" #include "../util/jitdump.h" @@ -249,7 +250,7 @@ void *jvmti_open(void) /* * jitdump file name */ - snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); + scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); if (fd == -1) diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 17783913d330..215ba30b8534 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build @@ -1,7 +1,7 @@ hostprogs := jevents jevents-y += json.o jsmn.o jevents.o -CHOSTFLAGS_jevents.o = -I$(srctree)/tools/include +HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include pmu-events-y += pmu-events.o JDIR = pmu-events/arch/$(SRCARCH) JSON = $(shell [ -d $(JDIR) ] && \ diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py index 38dfb720fb6f..54ace2f6bc36 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py @@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: - print_delim = 0 - keys = flag_fields[event_name][field_name]['values'].keys() - keys.sort() - for idx in keys: + print_delim = 0 + for idx in sorted(flag_fields[event_name][field_name]['values']): if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break @@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: - keys = symbolic_fields[event_name][field_name]['values'].keys() - keys.sort() - for idx in keys: + for idx in sorted(symbolic_fields[event_name][field_name]['values']): if not value and not idx: - string = symbolic_fields[event_name][field_name]['values'][idx] + string = symbolic_fields[event_name][field_name]['values'][idx] break - if (value == idx): - string = symbolic_fields[event_name][field_name]['values'][idx] + if (value == idx): + string = symbolic_fields[event_name][field_name]['values'][idx] break return string @@ -74,19 +70,17 @@ def trace_flag_str(value): string = "" print_delim = 0 - keys = trace_flags.keys() - - for idx in keys: - if not value and not idx: - string += "NONE" - break - - if idx and (value & idx) == idx: - if print_delim: - string += " | "; - string += trace_flags[idx] - print_delim = 1 - value &= ~idx + for idx in trace_flags: + if not value and not idx: + string += "NONE" + break + + if idx and (value & idx) == idx: + if print_delim: + string += " | "; + string += trace_flags[idx] + print_delim = 1 + value &= ~idx return string diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py index 81a56cd2b3c1..21a7a1298094 100755 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py @@ -8,6 +8,7 @@ # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. +from __future__ import print_function import struct @@ -44,7 +45,8 @@ class PerfEvent(object): PerfEvent.event_num += 1 def show(self): - print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) + print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % + (self.name, self.symbol, self.comm, self.dso)) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py index fdd92f699055..cac7b2542ee8 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py @@ -11,7 +11,7 @@ try: import wx except ImportError: - raise ImportError, "You need to install the wxpython lib for this script" + raise ImportError("You need to install the wxpython lib for this script") class RootFrame(wx.Frame): diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index f6c84966e4f8..7384dcb628c4 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -5,6 +5,7 @@ # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. +from __future__ import print_function import errno, os @@ -33,7 +34,7 @@ def nsecs_str(nsecs): return str def add_stats(dict, key, value): - if not dict.has_key(key): + if key not in dict: dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] @@ -72,10 +73,10 @@ try: except: if not audit_package_warned: audit_package_warned = True - print "Install the audit-libs-python package to get syscall names.\n" \ - "For example:\n # apt-get install python-audit (Ubuntu)" \ - "\n # yum install audit-libs-python (Fedora)" \ - "\n etc.\n" + print("Install the audit-libs-python package to get syscall names.\n" + "For example:\n # apt-get install python-audit (Ubuntu)" + "\n # yum install audit-libs-python (Fedora)" + "\n etc.\n") def syscall_name(id): try: diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py index de66cb3b72c9..3473e7f66081 100644 --- a/tools/perf/scripts/python/sched-migration.py +++ b/tools/perf/scripts/python/sched-migration.py @@ -9,13 +9,17 @@ # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. - +from __future__ import print_function import os import sys from collections import defaultdict -from UserList import UserList +try: + from UserList import UserList +except ImportError: + # Python 3: UserList moved to the collections package + from collections import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') @@ -300,7 +304,7 @@ class TimeSliceList(UserList): if i == -1: return - for i in xrange(i, len(self.data)): + for i in range(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return @@ -336,8 +340,8 @@ class SchedEventProxy: on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: - print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ - (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) + print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ + headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 2bde505e2e7e..dd850a26d579 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size, #define for_each_shell_test(dir, base, ent) \ while ((ent = readdir(dir)) != NULL) \ - if (!is_directory(base, ent)) + if (!is_directory(base, ent) && ent->d_name[0] != '.') static const char *shell_tests__dir(char *path, size_t size) { diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 7d4077068454..61211918bfba 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist) return 0; } +static bool test__intel_pt_valid(void) +{ + return !!perf_pmu__find("intel_pt"); +} + static int test__intel_pt(struct perf_evlist *evlist) { struct perf_evsel *evsel = perf_evlist__first(evlist); @@ -1375,6 +1380,7 @@ struct evlist_test { const char *name; __u32 type; const int id; + bool (*valid)(void); int (*check)(struct perf_evlist *evlist); }; @@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = { }, { .name = "intel_pt//u", + .valid = test__intel_pt_valid, .check = test__intel_pt, .id = 52, }, @@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = { static int test_event(struct evlist_test *e) { + struct parse_events_error err = { .idx = 0, }; struct perf_evlist *evlist; int ret; + if (e->valid && !e->valid()) { + pr_debug("... SKIP"); + return 0; + } + evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; - ret = parse_events(evlist, e->name, NULL); + ret = parse_events(evlist, e->name, &err); if (ret) { - pr_debug("failed to parse event '%s', err %d\n", - e->name, ret); + pr_debug("failed to parse event '%s', err %d, str '%s'\n", + e->name, ret, err.str); + parse_events_print_error(&err, e->name); } else { ret = e->check(evlist); } @@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt) for (i = 0; i < cnt; i++) { struct evlist_test *e = &events[i]; - pr_debug("running test %d '%s'\n", e->id, e->name); + pr_debug("running test %d '%s'", e->id, e->name); ret1 = test_event(e); if (ret1) ret2 = ret1; + pr_debug("\n"); } return ret2; @@ -1799,7 +1814,7 @@ static int test_pmu_events(void) } while (!ret && (ent = readdir(dir))) { - struct evlist_test e; + struct evlist_test e = { .id = 0, }; char name[2 * NAME_MAX + 1 + 12 + 3]; /* Names containing . are special and cannot be used directly */ diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 263057039693..94e513e62b34 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1 nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 trace_libc_inet_pton_backtrace() { - idx=0 - expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" - expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" + + expected=`mktemp -u /tmp/expected.XXX` + + echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected + echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected case "$(uname -m)" in s390x) eventattr='call-graph=dwarf,max-stack=4' - expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" - expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" - expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected + echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected + echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; *) eventattr='max-stack=3' - expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" - expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" + echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; esac - file=`mktemp -u /tmp/perf.data.XXX` + perf_data=`mktemp -u /tmp/perf.data.XXX` + perf_script=`mktemp -u /tmp/perf.script.XXX` + perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1 + perf script -i $perf_data > $perf_script - perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1 - perf script -i $file | while read line ; do + exec 3<$perf_script + exec 4<$expected + while read line <&3 && read -r pattern <&4; do + [ -z "$pattern" ] && break echo $line - echo "$line" | egrep -q "${expected[$idx]}" + echo "$line" | egrep -q "$pattern" if [ $? -ne 0 ] ; then - printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line" + printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line" exit 1 fi - let idx+=1 - [ -z "${expected[$idx]}" ] && break done # If any statements are executed from this point onwards, @@ -58,6 +63,6 @@ skip_if_no_perf_probe && \ perf probe -q $libc inet_pton && \ trace_libc_inet_pton_backtrace err=$? -rm -f ${file} +rm -f ${perf_data} ${perf_script} ${expected} perf probe -q -d probe_libc:inet_pton exit $err diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 55ad9793d544..4ce276efe6b4 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2 file=$(mktemp /tmp/temporary_file.XXXXX) trace_open_vfs_getname() { - evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') + evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') perf trace -e $evts touch $file 2>&1 | \ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 40e30a26b23c..9497d02f69e6 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -45,6 +45,7 @@ static int session_write_header(char *path) perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); perf_header__set_feat(&session->header, HEADER_NRCPUS); + perf_header__set_feat(&session->header, HEADER_ARCH); session->header.data_size += DATA_SIZE; diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index bf31ceab33bd..89512504551b 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp @@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module) raw_svector_ostream ostream(*Buffer); legacy::PassManager PM; - if (TargetMachine->addPassesToEmitFile(PM, ostream, - TargetMachine::CGFT_ObjectFile)) { + bool NotAdded; +#if CLANG_VERSION_MAJOR < 7 + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, + TargetMachine::CGFT_ObjectFile); +#else + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr, + TargetMachine::CGFT_ObjectFile); +#endif + if (NotAdded) { llvm::errs() << "TargetMachine can't emit a file of this type\n"; return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);; } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 540cd2dcd3e7..653ff65aa2c3 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) int cpu_nr = ff->ph->env.nr_cpus_avail; u64 size = 0; struct perf_header *ph = ff->ph; + bool do_core_id_test = true; ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); if (!ph->env.cpu) @@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) return 0; } + /* On s390 the socket_id number is not related to the numbers of cpus. + * The socket_id number might be higher than the numbers of cpus. + * This depends on the configuration. + */ + if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) + do_core_id_test = false; + for (i = 0; i < (u32)cpu_nr; i++) { if (do_read_u32(ff, &nr)) goto free_cpu; @@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) if (do_read_u32(ff, &nr)) goto free_cpu; - if (nr != (u32)-1 && nr > (u32)cpu_nr) { + if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { pr_debug("socket_id number is too big." "You may need to upgrade the perf tool.\n"); goto free_cpu; @@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool, pr_warning("invalid record type %d in pipe-mode\n", type); return 0; } - if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) { + if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { pr_warning("invalid record type %d in pipe-mode\n", type); return -1; } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index ba4c9dd18643..d426761a549d 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf, if (len < offs) return INTEL_PT_NEED_MORE_BYTES; byte = buf[offs++]; - payload |= (byte >> 1) << shift; + payload |= ((uint64_t)byte >> 1) << shift; } packet->type = INTEL_PT_CYC; diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 976e658e38dc..5e94857dfca2 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -266,16 +266,16 @@ static const char *kinc_fetch_script = "#!/usr/bin/env sh\n" "if ! test -d \"$KBUILD_DIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "TMPDIR=`mktemp -d`\n" "if test -z \"$TMPDIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "cat << EOF > $TMPDIR/Makefile\n" "obj-y := dummy.o\n" diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d2fb597c9a8c..3ba6a1742f91 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, return 0; } +static void perf_pmu_assign_str(char *name, const char *field, char **old_str, + char **new_str) +{ + if (!*old_str) + goto set_new; + + if (*new_str) { /* Have new string, check with old */ + if (strcasecmp(*old_str, *new_str)) + pr_debug("alias %s differs in field '%s'\n", + name, field); + zfree(old_str); + } else /* Nothing new --> keep old string */ + return; +set_new: + *old_str = *new_str; + *new_str = NULL; +} + +static void perf_pmu_update_alias(struct perf_pmu_alias *old, + struct perf_pmu_alias *newalias) +{ + perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc); + perf_pmu_assign_str(old->name, "long_desc", &old->long_desc, + &newalias->long_desc); + perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic); + perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr, + &newalias->metric_expr); + perf_pmu_assign_str(old->name, "metric_name", &old->metric_name, + &newalias->metric_name); + perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str); + old->scale = newalias->scale; + old->per_pkg = newalias->per_pkg; + old->snapshot = newalias->snapshot; + memcpy(old->unit, newalias->unit, sizeof(old->unit)); +} + +/* Delete an alias entry. */ +static void perf_pmu_free_alias(struct perf_pmu_alias *newalias) +{ + zfree(&newalias->name); + zfree(&newalias->desc); + zfree(&newalias->long_desc); + zfree(&newalias->topic); + zfree(&newalias->str); + zfree(&newalias->metric_expr); + zfree(&newalias->metric_name); + parse_events_terms__purge(&newalias->terms); + free(newalias); +} + +/* Merge an alias, search in alias list. If this name is already + * present merge both of them to combine all information. + */ +static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias, + struct list_head *alist) +{ + struct perf_pmu_alias *a; + + list_for_each_entry(a, alist, list) { + if (!strcasecmp(newalias->name, a->name)) { + perf_pmu_update_alias(a, newalias); + perf_pmu_free_alias(newalias); + return true; + } + } + return false; +} + static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, char *desc, char *val, char *long_desc, char *topic, @@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, char *metric_expr, char *metric_name) { + struct parse_events_term *term; struct perf_pmu_alias *alias; int ret; int num; + char newval[256]; alias = malloc(sizeof(*alias)); if (!alias) @@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, return ret; } + /* Scan event and remove leading zeroes, spaces, newlines, some + * platforms have terms specified as + * event=0x0091 (read from files ../<PMU>/events/<FILE> + * and terms specified as event=0x91 (read from JSON files). + * + * Rebuild string to make alias->str member comparable. + */ + memset(newval, 0, sizeof(newval)); + ret = 0; + list_for_each_entry(term, &alias->terms, list) { + if (ret) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + ","); + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + "%s=%#x", term->config, term->val.num); + else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + "%s=%s", term->config, term->val.str); + } + alias->name = strdup(name); if (dir) { /* @@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, snprintf(alias->unit, sizeof(alias->unit), "%s", unit); } alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1; - alias->str = strdup(val); + alias->str = strdup(newval); - list_add_tail(&alias->list, list); + if (!perf_pmu_merge_alias(alias, list)) + list_add_tail(&alias->list, list); return 0; } @@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI buf[ret] = 0; + /* Remove trailing newline from sysfs file */ + rtrim(buf); + return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL, NULL, NULL, NULL); } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 46e9e19ab1ac..bc32e57d17be 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample, if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); - if (!dict) { + if (!dict) call_object(handler, t, handler_name); - } else { + else call_object(handler, t, default_handler_name); - Py_DECREF(dict); - } - Py_XDECREF(all_entries_dict); Py_DECREF(t); } @@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample, call_object(handler, t, handler_name); - Py_DECREF(dict); Py_DECREF(t); } @@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "# See the perf-script-python Documentation for the list " "of available functions.\n\n"); + fprintf(ofp, "from __future__ import print_function\n\n"); fprintf(ofp, "import os\n"); fprintf(ofp, "import sys\n\n"); @@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "from Core import *\n\n\n"); fprintf(ofp, "def trace_begin():\n"); - fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); + fprintf(ofp, "\tprint(\"in trace_begin\")\n\n"); fprintf(ofp, "def trace_end():\n"); - fprintf(ofp, "\tprint \"in trace_end\"\n\n"); + fprintf(ofp, "\tprint(\"in trace_end\")\n\n"); while ((event = trace_find_next_event(pevent, event))) { fprintf(ofp, "def %s__%s(", event->system, event->name); @@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) "common_secs, common_nsecs,\n\t\t\t" "common_pid, common_comm)\n\n"); - fprintf(ofp, "\t\tprint \""); + fprintf(ofp, "\t\tprint(\""); not_first = 0; count = 0; @@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "%s", f->name); } - fprintf(ofp, ")\n\n"); + fprintf(ofp, "))\n\n"); - fprintf(ofp, "\t\tprint 'Sample: {'+" - "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); + fprintf(ofp, "\t\tprint('Sample: {'+" + "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); fprintf(ofp, "\t\tfor node in common_callchain:"); fprintf(ofp, "\n\t\t\tif 'sym' in node:"); - fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])"); + fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))"); fprintf(ofp, "\n\t\t\telse:"); - fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n"); - fprintf(ofp, "\t\tprint \"\\n\"\n\n"); + fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n"); + fprintf(ofp, "\t\tprint()\n\n"); } fprintf(ofp, "def trace_unhandled(event_name, context, " "event_fields_dict, perf_sample_dict):\n"); - fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n"); - fprintf(ofp, "\t\tprint 'Sample: {'+" - "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); + fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n"); + fprintf(ofp, "\t\tprint('Sample: {'+" + "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); fprintf(ofp, "def print_header(" "event_name, cpu, secs, nsecs, pid, comm):\n" - "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" - "(event_name, cpu, secs, nsecs, pid, comm),\n\n"); + "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" + "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n"); fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n" "\treturn delimiter.join" diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index a8fb63edcf89..e2926f72a821 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t) pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; pcap->header.length = sizeof(*pcap); pcap->highest_capability = 1; - pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | - ACPI_NFIT_CAPABILITY_MEM_FLUSH; + pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH; offset += pcap->header.length; if (t->setup_hotplug) { diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 7eb613ffef55..b4994a94968b 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -6,6 +6,7 @@ CONFIG_TEST_BPF=m CONFIG_CGROUP_BPF=y CONFIG_NETDEVSIM=m CONFIG_NET_CLS_ACT=y +CONFIG_NET_SCHED=y CONFIG_NET_SCH_INGRESS=y CONFIG_NET_IPIP=y CONFIG_IPV6=y diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 35669ccd4d23..9df0d2ac45f8 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh @@ -1,6 +1,15 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ "$(id -u)" != "0" ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + SRC_TREE=../../../../ test_run() diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ce2e15e4f976..677686198df3 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -1,6 +1,15 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ $UID != 0 ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + GREEN='\033[0;92m' RED='\033[0;31m' NC='\033[0m' # No Color diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 1c77994b5e71..270fa8f49573 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh @@ -21,6 +21,15 @@ # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this # datagram can be read on NS6 when binding to fb00::6. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ $UID != 0 ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + TMP_FILE="/tmp/selftest_lwt_seg6local.txt" cleanup() diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 05c8cb71724a..9e78df207919 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1413,18 +1413,12 @@ out: int main(int argc, char **argv) { - struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; int iov_count = 1, length = 1024, rate = 1; struct sockmap_options options = {0}; int opt, longindex, err, cg_fd = 0; char *bpf_file = BPF_SOCKMAP_FILENAME; int test = PING_PONG; - if (setrlimit(RLIMIT_MEMLOCK, &r)) { - perror("setrlimit(RLIMIT_MEMLOCK)"); - return 1; - } - if (argc < 2) return test_suite(); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 2ecd27b670d7..f5f7bcc96046 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -4975,6 +4975,24 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_LWT_XMIT, }, { + "make headroom for LWT_XMIT", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_skb_change_head), + /* split for s390 to succeed */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 42), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_skb_change_head), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, + }, + { "invalid access of tc_classid for LWT_IN", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, @@ -12554,8 +12572,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv, } if (fd_prog >= 0) { + __u8 tmp[TEST_DATA_LEN << 2]; + __u32 size_tmp = sizeof(tmp); + err = bpf_prog_test_run(fd_prog, 1, test->data, - sizeof(test->data), NULL, NULL, + sizeof(test->data), tmp, &size_tmp, &retval, NULL); if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) { printf("Unexpected bpf_prog_test_run error\n"); diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 78245d60d8bc..0f45633bd634 100644..100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -740,13 +740,6 @@ ipv6_rt_add() run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" log_test $? 2 "Attempt to add duplicate route - reject route" - # iproute2 prepend only sets NLM_F_CREATE - # - adds a new route; does NOT convert existing route to ECMP - add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" - run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2" - check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024" - log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)" - # route append with same prefix adds a new route # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" @@ -754,27 +747,6 @@ ipv6_rt_add() check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" log_test $? 0 "Append nexthop to existing route - gw" - add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" - run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3" - check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1" - log_test $? 0 "Append nexthop to existing route - dev only" - - # multipath route can not have a nexthop that is a reject route - add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" - run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64" - log_test $? 2 "Append nexthop to existing route - reject route" - - # reject route can not be converted to multipath route - run_cmd "$IP -6 ro flush 2001:db8:104::/64" - run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" - run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2" - log_test $? 2 "Append nexthop to existing reject route - gw" - - run_cmd "$IP -6 ro flush 2001:db8:104::/64" - run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" - run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3" - log_test $? 2 "Append nexthop to existing reject route - dev only" - # insert mpath directly add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" @@ -819,13 +791,6 @@ ipv6_rt_replace_single() check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" log_test $? 0 "Single path with multipath" - # single path with reject - # - add_initial_route6 "nexthop via 2001:db8:101::2" - run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64" - check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024" - log_test $? 0 "Single path with reject route" - # single path with single path using MULTIPATH attribute # add_initial_route6 "via 2001:db8:101::2" @@ -873,12 +838,6 @@ ipv6_rt_replace_mpath() check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024" log_test $? 0 "Multipath with single path via multipath attribute" - # multipath with reject - add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" - run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64" - check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024" - log_test $? 0 "Multipath with reject route" - # route replace fails - invalid nexthop 1 add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3" diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 792fa4d0285e..850767befa47 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -35,9 +35,6 @@ run_udp() { echo "udp gso" run_in_netns ${args} -S - - echo "udp gso zerocopy" - run_in_netns ${args} -S -z } run_tcp() { diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index a4684112676c..86ce22417e0d 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -133,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void) return cpu; } +static inline void rseq_clear_rseq_cs(void) +{ +#ifdef __LP64__ + __rseq_abi.rseq_cs.ptr = 0; +#else + __rseq_abi.rseq_cs.ptr.ptr32 = 0; +#endif +} + /* - * rseq_prepare_unload() should be invoked by each thread using rseq_finish*() - * at least once between their last rseq_finish*() and library unload of the - * library defining the rseq critical section (struct rseq_cs). This also - * applies to use of rseq in code generated by JIT: rseq_prepare_unload() - * should be invoked at least once by each thread using rseq_finish*() before - * reclaim of the memory holding the struct rseq_cs. + * rseq_prepare_unload() should be invoked by each thread executing a rseq + * critical section at least once between their last critical section and + * library unload of the library defining the rseq critical section + * (struct rseq_cs). This also applies to use of rseq in code generated by + * JIT: rseq_prepare_unload() should be invoked at least once by each + * thread executing a rseq critical section before reclaim of the memory + * holding the struct rseq_cs. */ static inline void rseq_prepare_unload(void) { - __rseq_abi.rseq_cs = 0; + rseq_clear_rseq_cs(); } #endif /* RSEQ_H_ */ diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c index 246145b84a12..4d9dc3f2fd70 100644 --- a/tools/testing/selftests/x86/sigreturn.c +++ b/tools/testing/selftests/x86/sigreturn.c @@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) */ for (int i = 0; i < NGREG; i++) { greg_t req = requested_regs[i], res = resulting_regs[i]; + if (i == REG_TRAPNO || i == REG_IP) continue; /* don't care */ - if (i == REG_SP) { - printf("\tSP: %llx -> %llx\n", (unsigned long long)req, - (unsigned long long)res); + if (i == REG_SP) { /* - * In many circumstances, the high 32 bits of rsp - * are zeroed. For example, we could be a real - * 32-bit program, or we could hit any of a number - * of poorly-documented IRET or segmented ESP - * oddities. If this happens, it's okay. + * If we were using a 16-bit stack segment, then + * the kernel is a bit stuck: IRET only restores + * the low 16 bits of ESP/RSP if SS is 16-bit. + * The kernel uses a hack to restore bits 31:16, + * but that hack doesn't help with bits 63:32. + * On Intel CPUs, bits 63:32 end up zeroed, and, on + * AMD CPUs, they leak the high bits of the kernel + * espfix64 stack pointer. There's very little that + * the kernel can do about it. + * + * Similarly, if we are returning to a 32-bit context, + * the CPU will often lose the high 32 bits of RSP. */ - if (res == (req & 0xFFFFFFFF)) - continue; /* OK; not expected to work */ + + if (res == req) + continue; + + if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) { + printf("[NOTE]\tSP: %llx -> %llx\n", + (unsigned long long)req, + (unsigned long long)res); + continue; + } + + printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n", + (unsigned long long)requested_regs[i], + (unsigned long long)resulting_regs[i]); + nerrs++; + continue; } bool ignore_reg = false; @@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) #endif /* Sanity check on the kernel */ - if (i == REG_CX && requested_regs[i] != resulting_regs[i]) { + if (i == REG_CX && req != res) { printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n", - (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + (unsigned long long)req, + (unsigned long long)res); nerrs++; continue; } - if (requested_regs[i] != resulting_regs[i] && !ignore_reg) { - /* - * SP is particularly interesting here. The - * usual cause of failures is that we hit the - * nasty IRET case of returning to a 16-bit SS, - * in which case bits 16:31 of the *kernel* - * stack pointer persist in ESP. - */ + if (req != res && !ignore_reg) { printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n", - i, (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + i, (unsigned long long)req, + (unsigned long long)res); nerrs++; } } diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h index 9a45f90e2d08..369ee308b668 100644 --- a/tools/virtio/linux/scatterlist.h +++ b/tools/virtio/linux/scatterlist.h @@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) */ BUG_ON((unsigned long) page & 0x03); #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif sg->page_link = page_link | (unsigned long) page; @@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, static inline struct page *sg_page(struct scatterlist *sg) { #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif return (struct page *)((sg)->page_link & ~0x3); @@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, **/ static inline void sg_mark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif /* * Set termination bit, clear potential chain bit */ @@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg) **/ static inline void sg_unmark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif sg->page_link &= ~0x02; } static inline struct scatterlist *sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif if (sg_is_last(sg)) return NULL; @@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg) static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) { memset(sgl, 0, sizeof(*sgl) * nents); -#ifdef CONFIG_DEBUG_SG - { - unsigned int i; - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; - } -#endif sg_mark_end(&sgl[nents - 1]); } diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 90d30fbe95ae..b20b751286fc 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, shutdown); + struct kvm *kvm = irqfd->kvm; u64 cnt; + /* Make sure irqfd has been initalized in assign path. */ + synchronize_srcu(&kvm->irq_srcu); + /* * Synchronize with the wait-queue and unhook ourselves to prevent * further events. @@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) idx = srcu_read_lock(&kvm->irq_srcu); irqfd_update(kvm, irqfd); - srcu_read_unlock(&kvm->irq_srcu, idx); list_add_tail(&irqfd->list, &kvm->irqfds.items); @@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) if (events & EPOLLIN) schedule_work(&irqfd->inject); - /* - * do not drop the file until the irqfd is fully initialized, otherwise - * we might race against the EPOLLHUP - */ - fdput(f); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (kvm_arch_has_irq_bypass()) { irqfd->consumer.token = (void *)irqfd->eventfd; @@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) } #endif + srcu_read_unlock(&kvm->irq_srcu, idx); + + /* + * do not drop the file until the irqfd is fully initialized, otherwise + * we might race against the EPOLLHUP + */ + fdput(f); return 0; fail: |