Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+
Both KASAN modes work with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
+To augment reports with last allocation and freeing stack of the physical page,
+it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
+
To disable instrumentation for specific files or directories, add a line
similar to the following to the respective kernel Makefile:
- description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
items:
- - const: tsd,rk3368-uq7-haikou
+ - const: tsd,rk3368-lion-haikou
- const: rockchip,rk3368
- description: Theobroma Systems RK3399-Q7 with Haikou baseboard
items:
- - const: tsd,rk3399-q7-haikou
+ - const: tsd,rk3399-puma-haikou
- const: rockchip,rk3399
- description: Tronsmart Orion R68 Meta
* Advanced Interrupt Controller (AIC)
Required properties:
-- compatible: Should be "atmel,<chip>-aic"
- <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+ - "atmel,<chip>-aic" where <chip> can be "at91rm9200", "sama5d2",
+ "sama5d3" or "sama5d4"
+ - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/allwinner,sun4i-a10-csi.yaml#
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-csi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
clocks:
items:
- description: The CSI interface clock
- - description: The CSI module clock
- description: The CSI ISP clock
- description: The CSI DRAM clock
clock-names:
items:
- const: bus
- - const: mod
- const: isp
- const: ram
compatible = "allwinner,sun7i-a20-csi0";
reg = <0x01c09000 0x1000>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
- <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
- clock-names = "bus", "mod", "isp", "ram";
+ clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+ clock-names = "bus", "isp", "ram";
resets = <&ccu RST_CSI0>;
port {
allOf:
- $ref: "/schemas/types.yaml#/definitions/string"
- enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
- ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1,
- GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2,
- GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12,
- I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7,
- I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC,
- LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
+ ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
+ GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+ GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
+ I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
+ I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+ LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
- SALT6, SALT7, SALT8, SALT9, SD1, SD2, SD3, SD3DAT4, SD3DAT5,
- SD3DAT6, SD3DAT7, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO,
- SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1,
- SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
- TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5,
- TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1,
- TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13, UART6, UART7,
- UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
- WDTRST4, ]
+ SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
+ SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+ SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+ TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+ TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+ THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
+ UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+ WDTRST3, WDTRST4, ]
groups:
allOf:
- $ref: "/schemas/types.yaml#/definitions/string"
- enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
- ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, GPIT0,
- GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
- GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1,
- I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3,
- I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6,
- JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
- MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3,
- MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
- NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1,
- NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE,
- PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0, PWM12G1,
- PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2, PWM3,
- PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
- QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
- RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1,
- SALT11G0, SALT11G1, SALT12G0, SALT12G1, SALT13G0, SALT13G1,
- SALT14G0, SALT14G1, SALT15G0, SALT15G1, SALT16G0, SALT16G1,
- SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9G0,
- SALT9G1, SD1, SD2, SD3, SD3DAT4, SD3DAT5, SD3DAT6, SD3DAT7,
- SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD,
- SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
- SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13,
- TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8,
- TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4,
- UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
- UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
- WDTRST4, ]
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
+ EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
+ FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
+ GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
+ GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
+ I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
+ I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
+ LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
+ MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
+ NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+ NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+ OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
+ PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
+ PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
+ PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
+ RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
+ SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
+ SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
+ SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
+ SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+ SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+ SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+ TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+ TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+ THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
+ UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
+ VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
required:
- compatible
properties:
compatible:
enum:
- - const: regulator-fixed
- - const: regulator-fixed-clock
+ - regulator-fixed
+ - regulator-fixed-clock
regulator-name: true
properties:
compatible:
- items:
- - enum:
- - sifive,rocket0
- - sifive,e5
- - sifive,e51
- - sifive,u54-mc
- - sifive,u54
- - sifive,u5
- - const: riscv
+ oneOf:
+ - items:
+ - enum:
+ - sifive,rocket0
+ - sifive,e5
+ - sifive,e51
+ - sifive,u54-mc
+ - sifive,u54
+ - sifive,u5
+ - const: riscv
+ - const: riscv # Simulator only
description:
Identifies that the hart uses the RISC-V instruction set
and identifies the type of the hart.
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.
- timebase-frequency:
- type: integer
- minimum: 1
- description:
- Specifies the clock frequency of the system timer in Hz.
- This value is common to all harts on a single system image.
+ # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+ timebase-frequency: false
interrupt-controller:
type: object
required:
- riscv,isa
- - timebase-frequency
- interrupt-controller
examples:
=======
For general Linux networking support, please use the netdev mailing
list, which is monitored by Pensando personnel::
+
netdev@vger.kernel.org
For more specific support needs, please use the Pensando driver support
email::
- drivers@pensando.io
+
+ drivers@pensando.io
Part III: Registering a Network Device to DIM
==============================================
-Net DIM API exposes the main function net_dim(struct net_dim *dim,
-struct net_dim_sample end_sample). This function is the entry point to the Net
+Net DIM API exposes the main function net_dim(struct dim *dim,
+struct dim_sample end_sample). This function is the entry point to the Net
DIM algorithm and has to be called every time the driver would like to check if
it should change interrupt moderation parameters. The driver should provide two
-data structures: struct net_dim and struct net_dim_sample. Struct net_dim
+data structures: struct dim and struct dim_sample. Struct dim
describes the state of DIM for a specific object (RX queue, TX queue,
other queues, etc.). This includes the current selected profile, previous data
samples, the callback function provided by the driver and more.
-Struct net_dim_sample describes a data sample, which will be compared to the
-data sample stored in struct net_dim in order to decide on the algorithm's next
+Struct dim_sample describes a data sample, which will be compared to the
+data sample stored in struct dim in order to decide on the algorithm's next
step. The sample should include bytes, packets and interrupts, measured by
the driver.
interrupt. Since Net DIM has a built-in moderation and it might decide to skip
iterations under certain conditions, there is no need to moderate the net_dim()
calls as well. As mentioned above, the driver needs to provide an object of type
-struct net_dim to the net_dim() function call. It is advised for each entity
-using Net DIM to hold a struct net_dim as part of its data structure and use it
-as the main Net DIM API object. The struct net_dim_sample should hold the latest
+struct dim to the net_dim() function call. It is advised for each entity
+using Net DIM to hold a struct dim as part of its data structure and use it
+as the main Net DIM API object. The struct dim_sample should hold the latest
bytes, packets and interrupts count. No need to perform any calculations, just
include the raw data.
my_driver.c:
-#include <linux/net_dim.h>
+#include <linux/dim.h>
/* Callback for net DIM to schedule on a decision to change moderation */
void my_driver_do_dim_work(struct work_struct *work)
{
- /* Get struct net_dim from struct work_struct */
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ /* Get struct dim from struct work_struct */
+ struct dim *dim = container_of(work, struct dim,
+ work);
/* Do interrupt moderation related stuff */
...
/* Signal net DIM work is done and it should move to next iteration */
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
/* My driver's interrupt handler */
{
...
/* A struct to hold current measured data */
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
...
/* Initiate data sample struct with current data */
- net_dim_sample(my_entity->events,
- my_entity->packets,
- my_entity->bytes,
- &dim_sample);
+ dim_update_sample(my_entity->events,
+ my_entity->packets,
+ my_entity->bytes,
+ &dim_sample);
/* Call net DIM */
net_dim(&my_entity->dim, dim_sample);
...
ARM/SPREADTRUM SoC SUPPORT
M: Orson Zhai <orsonzhai@gmail.com>
-M: Baolin Wang <baolin.wang@linaro.org>
+M: Baolin Wang <baolin.wang7@gmail.com>
M: Chunyan Zhang <zhang.lyra@gmail.com>
S: Maintained
F: arch/arm64/boot/dts/sprd
N: sprd
+N: sc27xx
+N: sc2731
ARM/STI ARCHITECTURE
M: Patrice Chotard <patrice.chotard@st.com>
N: kona
F: arch/arm/mach-bcm/
-BROADCOM BCM2835 ARM ARCHITECTURE
+BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Eric Anholt <eric@anholt.net>
M: Stefan Wahren <wahrenst@gmx.net>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/anholt/linux
S: Maintained
+N: bcm2711
N: bcm2835
F: drivers/staging/vc04_services
F: drivers/usb/gadget/udc/bcm63xx_udc.*
BROADCOM BCM7XXX ARM ARCHITECTURE
-M: Brian Norris <computersforpeace@gmail.com>
-M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
F: include/linux/ks0108.h
L3MDEV
-M: David Ahern <dsa@cumulusnetworks.com>
+M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: net/l3mdev
M: Felix Fietkau <nbd@openwrt.org>
M: John Crispin <john@phrozen.org>
M: Sean Wang <sean.wang@mediatek.com>
-M: Nelson Chang <nelson.chang@mediatek.com>
+M: Mark Lee <Mark-MC.Lee@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
F: Documentation/parisc/
F: drivers/parisc/
F: drivers/char/agp/parisc-agp.c
+F: drivers/input/misc/hp_sdc_rtc.c
F: drivers/input/serio/gscps2.c
+F: drivers/input/serio/hp_sdc*
F: drivers/parport/parport_gsc.*
F: drivers/tty/serial/8250/8250_gsc.c
F: drivers/video/fbdev/sti*
F: drivers/video/console/sti*
F: drivers/video/logo/logo_parisc*
+F: include/linux/hp_sdc.h
PARMAN
M: Jiri Pirko <jiri@mellanox.com>
F: drivers/scsi/qla1280.[ch]
QLOGIC QLA2XXX FC-SCSI DRIVER
-M: qla2xxx-upstream@qlogic.com
+M: hmadhani@marvell.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
K: regulator_get_optional
VRF
-M: David Ahern <dsa@cumulusnetworks.com>
+M: David Ahern <dsahern@kernel.org>
M: Shrijeet Mukherjee <shrijeet@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Nesting Opossum
# *DOCUMENTATION*
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
reg = <0x70>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c@0 {
/* FMC A */
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
- i2c-mux-idle-disconnect;
};
i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
- i2c-mux-idle-disconnect;
};
i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- i2c-mux-idle-disconnect;
};
i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
- i2c-mux-idle-disconnect;
};
i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- i2c-mux-idle-disconnect;
};
i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
- i2c-mux-idle-disconnect;
ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
- i2c-mux-idle-disconnect;
};
i2c@7 {
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
- i2c-mux-idle-disconnect;
u41: pca9575@20 {
compatible = "nxp,pca9575";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
+ bus-width = <4>;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;
status = "okay";
reg = <0 0x40000000>;
};
+ leds {
+ /*
+ * Since there is no upstream GPIO driver yet,
+ * remove the incomplete node.
+ */
+ /delete-node/ act;
+ };
+
reg_3v3: fixed-regulator {
compatible = "regulator-fixed";
regulator-name = "3V3";
vin-supply = <&sw1c_reg>;
};
+&snvs_poweroff {
+ status = "okay";
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
<&clks IMX7D_GPT1_ROOT_CLK>;
clock-names = "ipg", "per";
};
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
<&clks IMX7D_GPT2_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
<&clks IMX7D_GPT3_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
<&clks IMX7D_GPT4_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
&twl_gpio {
ti,use-leds;
};
+
+&twl_keypad {
+ status = "disabled";
+};
pinctrl-1 = <&ephy_leds_pins>;
status = "okay";
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
+ phy-mode = "gmii";
phy-handle = <&phy0>;
};
phy0: ethernet-phy@0 {
reg = <0>;
- phy-mode = "gmii";
};
};
};
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b128000 0x3000>;
#clock-cells = <1>;
- mediatek,physpeed = "2500";
};
sgmiisys1: syscon@1b130000 {
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b130000 0x3000>;
#clock-cells = <1>;
- mediatek,physpeed = "2500";
};
};
};
compatible = "ti,wl1285", "ti,wl1283";
reg = <2>;
/* gpio_100 with gpmc_wait2 pad as wakeirq */
- interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
<&omap4_pmx_core 0x4e>;
interrupt-names = "irq", "wakeup";
ref-clock-frequency = <26000000>;
compatible = "ti,wl1271";
reg = <2>;
/* gpio_53 with gpmc_ncs3 pad as wakeup */
- interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_HIGH>,
<&omap4_pmx_core 0x3a>;
interrupt-names = "irq", "wakeup";
ref-clock-frequency = <38400000>;
compatible = "ti,wl1281";
reg = <2>;
interrupt-parent = <&gpio1>;
- interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
ref-clock-frequency = <26000000>;
tcxo-clock-frequency = <26000000>;
};
compatible = "ti,wl1271";
reg = <2>;
interrupt-parent = <&gpio2>;
- interrupts = <9 IRQ_TYPE_EDGE_RISING>; /* gpio 41 */
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
ref-clock-frequency = <38400000>;
};
};
pinctrl-names = "default";
pinctrl-0 = <&wlcore_irq_pin>;
interrupt-parent = <&gpio1>;
- interrupts = <14 IRQ_TYPE_EDGE_RISING>; /* gpio 14 */
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>; /* gpio 14 */
ref-clock-frequency = <26000000>;
};
};
};
};
- gpu_cm: clock-controller@1500 {
+ gpu_cm: gpu_cm@1500 {
compatible = "ti,omap4-cm";
reg = <0x1500 0x100>;
#address-cells = <1>;
<STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
bias-disable;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
pins2 {
pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
bias-pull-up;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
};
<STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
bias-disable;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
pins2 {
pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
bias-pull-up;
drive-push-pull;
- slew-rate = <3>;
+ slew-rate = <1>;
};
};
compatible = "allwinner,sun7i-a20-csi0";
reg = <0x01c09000 0x1000>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
- <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
- clock-names = "bus", "mod", "isp", "ram";
+ clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+ clock-names = "bus", "isp", "ram";
resets = <&ccu RST_CSI0>;
status = "disabled";
};
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+ i2c-mux-idle-disconnect;
sff0_i2c: i2c@1 {
#address-cells = <1>;
reg = <0x71>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
sff5_i2c: i2c@1 {
#address-cells = <1>;
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_DA8XX=y
CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_GPIO=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_VIDEO_OV5645=m
CONFIG_IMX_IPUV3_CORE=y
CONFIG_DRM=y
+CONFIG_DRM_MSM=y
CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
CONFIG_DRM_OMAP_PANEL_DPI=m
CONFIG_DRM_OMAP_PANEL_DSI_CM=m
-CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
CONFIG_DRM_TILCDC=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_PANEL_LG_LB035Q02=m
+CONFIG_DRM_PANEL_NEC_NL8048HL11=m
+CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
+CONFIG_DRM_PANEL_SONY_ACX565AKM=m
+CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
+CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
unsigned int domain;
return domain;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
isb();
}
#else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
return 0;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
}
#endif
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
#endif
}
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
- * r0 = cp#15 control register
+ * r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
- .long 0 @ r3
+M_CLASS(.long exc_ret) @ r3
+AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
+ /* Pass exc_ret to __mmap_switched */
+ mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
};
static const struct dma_slave_map dm365_edma_map[] = {
- { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
- { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+ { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+ { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
{ "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
.reset_name = "mmu",
.assert_reset = omap_device_assert_hardreset,
.deassert_reset = omap_device_deassert_hardreset,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
+};
+
+static struct iommu_platform_data omap3_iommu_isp_pdata = {
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
};
static int omap3_sbc_t3730_twl_callback(struct device *dev,
.reset_name = "mmu_cache",
.assert_reset = omap_device_assert_hardreset,
.deassert_reset = omap_device_deassert_hardreset,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
};
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
&omap3_iommu_pdata),
+ OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu",
+ &omap3_iommu_isp_pdata),
OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000,
"480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]),
OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000,
__put32_unaligned_check("strbt", val, addr)
static void
-do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
{
if (!LDST_U_BIT(instr))
offset.un = -offset.un;
}
static int
-do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
}
static int
-do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
- struct pt_regs *regs)
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
unsigned int rd2;
}
static int
-do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
* PU = 10 A B
*/
static int
-do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd, rn, correction, nr_regs, regbits;
unsigned long eaddr, newaddr;
* processor for us.
*/
if (addr != eaddr) {
- pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
+ pr_err("LDMSTM: PC = %08lx, instr = %08x, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
* 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
*/
static void *
-do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
union offset_union *poffset)
{
- unsigned long instr = *pinstr;
+ u32 instr = *pinstr;
u16 tinst1 = (instr >> 16) & 0xffff;
u16 tinst2 = instr & 0xffff;
return NULL;
}
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
+{
+ u32 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_arm(instr);
+
+ return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+ u16 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_thumb16(instr);
+
+ return fault;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
union offset_union uninitialized_var(offset);
- unsigned long instr = 0, instrptr;
- int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+ unsigned long instrptr;
+ int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
- unsigned int fault;
+ u32 instr = 0;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
+ int fault;
if (interrupts_enabled(regs))
local_irq_enable();
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
- fault = probe_kernel_address(ptr, tinstr);
- tinstr = __mem_to_opcode_thumb16(tinstr);
+
+ fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
- u16 tinst2 = 0;
- fault = probe_kernel_address(ptr + 1, tinst2);
- tinst2 = __mem_to_opcode_thumb16(tinst2);
+ u16 tinst2;
+ fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
}
}
} else {
- fault = probe_kernel_address((void *)instrptr, instr);
- instr = __mem_to_opcode_arm(instr);
+ fault = alignment_get_arm(regs, (void *)instrptr, &instr);
}
if (fault) {
* Oops, we didn't handle the instruction.
*/
pr_err("Alignment trap: not handling instruction "
- "%0*lx at [<%08lx>]\n",
+ "%0*x at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
ai_user += 1;
if (ai_usermode & UM_WARN)
- printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
+ printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
isize << 1,
dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
- stmia sp, {r0-r3, r12}
cpsie i
svc #0
1: cpsid i
- ldr r0, =exc_ret
- orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
- str lr, [r0]
+ /* Calculate exc_ret */
+ orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
If unsure, say Y.
+config CAVIUM_TX2_ERRATUM_219
+ bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
+ default y
+ help
+ On Cavium ThunderX2, a load, store or prefetch instruction between a
+ TTBR update and the corresponding context synchronizing operation can
+ cause a spurious Data Abort to be delivered to any hardware thread in
+ the CPU core.
+
+ Work around the issue by avoiding the problematic code sequence and
+ trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
+ trap handler performs the corresponding register access, skips the
+ instruction and ensures context synchronization by virtue of the
+ exception return.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
reg = <1>;
};
};
+
+®_dc1sw {
+ /*
+ * Ethernet PHY needs 30ms to properly power up and some more
+ * to initialize. 100ms should be plenty of time to finish
+ * whole process.
+ */
+ regulator-enable-ramp-delay = <100000>;
+};
};
®_dc1sw {
+ /*
+ * Ethernet PHY needs 30ms to properly power up and some more
+ * to initialize. 100ms should be plenty of time to finish
+ * whole process.
+ */
+ regulator-enable-ramp-delay = <100000>;
regulator-name = "vcc-phy";
};
clock-output-names = "ext-osc32k";
};
- pmu {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
- };
-
psci {
compatible = "arm,psci-0.2";
method = "smc";
pinmux: pinmux@14029c {
compatible = "pinctrl-single";
- reg = <0x0014029c 0x250>;
+ reg = <0x0014029c 0x26c>;
#address-cells = <1>;
#size-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xf>;
pinctrl-single,gpio-range = <
- &range 0 154 MODE_GPIO
+ &range 0 91 MODE_GPIO
+ &range 95 60 MODE_GPIO
>;
range: gpio-range {
#pinctrl-single,gpio-range-cells = <3>;
<&pinmux 108 16 27>,
<&pinmux 135 77 6>,
<&pinmux 141 67 4>,
- <&pinmux 145 149 6>,
- <&pinmux 151 91 4>;
+ <&pinmux 145 149 6>;
};
i2c1: i2c@e0000 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@1 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@100 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@101 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@200 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@201 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@300 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@301 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@400 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@401 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@500 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@501 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@600 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@601 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@700 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cpu@701 {
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
- cpu-idle-states = <&cpu_pw20>;
+ cpu-idle-states = <&cpu_pw15>;
};
cluster0_l2: l2-cache0 {
cache-level = <2>;
};
- cpu_pw20: cpu-pw20 {
+ cpu_pw15: cpu-pw15 {
compatible = "arm,idle-state";
- idle-state-name = "PW20";
+ idle-state-name = "PW15";
arm,psci-suspend-param = <0x0>;
entry-latency-us = <2000>;
exit-latency-us = <2000>;
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b60000 0x10000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_DUMMY>,
+ clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
<&clk IMX8MM_CLK_NAND_USDHC_BUS>,
<&clk IMX8MM_CLK_USDHC3_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
reg = <0x30b60000 0x10000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_DUMMY>,
+ clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
<&clk IMX8MN_CLK_NAND_USDHC_BUS>,
<&clk IMX8MN_CLK_USDHC3_ROOT>;
clock-names = "ipg", "ahb", "per";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1000000>;
gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
- states = <1000000 0x0
- 900000 0x1>;
+ states = <1000000 0x1
+ 900000 0x0>;
regulator-always-on;
};
};
"fsl,imx7d-usdhc";
reg = <0x30b40000 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_DUMMY>,
+ clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
<&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
<&clk IMX8MQ_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
"fsl,imx7d-usdhc";
reg = <0x30b50000 0x10000>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_DUMMY>,
+ clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
<&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
<&clk IMX8MQ_CLK_USDHC2_ROOT>;
clock-names = "ipg", "ahb", "per";
gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
};
- usb3_phy: usb3-phy {
- compatible = "usb-nop-xceiv";
- vcc-supply = <&exp_usb3_vbus>;
- };
-
vsdc_reg: vsdc-reg {
compatible = "regulator-gpio";
regulator-name = "vsdc";
status = "okay";
};
+&comphy2 {
+ connector {
+ compatible = "usb-a-connector";
+ phy-supply = <&exp_usb3_vbus>;
+ };
+};
+
&usb3 {
status = "okay";
phys = <&comphy2 0>;
- usb-phy = <&usb3_phy>;
};
&mdio {
power-supply = <&pp3300_disp>;
panel-timing {
- clock-frequency = <266604720>;
+ clock-frequency = <266666667>;
hactive = <2400>;
hfront-porch = <48>;
hback-porch = <84>;
status = "okay";
u2phy0_host: host-port {
- phy-supply = <&vcc5v0_host>;
+ phy-supply = <&vcc5v0_typec>;
status = "okay";
};
&usbdrd_dwc3_0 {
status = "okay";
- dr_mode = "otg";
+ dr_mode = "host";
};
&usbdrd3_1 {
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
+ regulator-max-microvolt = <1700000>;
vin-supply = <&vcc5v0_sys>;
};
};
rk808: pmic@1b {
compatible = "rockchip,rk808";
reg = <0x1b>;
- interrupt-parent = <&gpio1>;
- interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
clock-output-names = "xin32k", "rk808-clkout2";
pinctrl-names = "default";
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
vsel1_gpio: vsel1-gpio {
&sdmmc {
bus-width = <4>;
- cap-mmc-highspeed;
cap-sd-highspeed;
cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
disable-wp;
&sdhci {
bus-width = <8>;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
+ mmc-hs200-1_8v;
non-removable;
status = "okay";
};
/*
* Remove the address tag from a virtual address, if present.
*/
- .macro clear_address_tag, dst, addr
- tst \addr, #(1 << 55)
- bic \dst, \addr, #(0xff << 56)
- csel \dst, \dst, \addr, eq
+ .macro untagged_addr, dst, addr
+ sbfx \dst, \addr, #0, #56
+ and \dst, \dst, \addr
.endm
#endif
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
-#define ARM64_NCAPS 45
+#define ARM64_NCAPS 47
#endif /* __ASM_CPUCAPS_H */
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
-#define untagged_addr(addr) \
+#define __untagged_addr(addr) \
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
+#define untagged_addr(addr) ({ \
+ u64 __addr = (__force u64)addr; \
+ __addr &= __untagged_addr(__addr); \
+ (__force __typeof__(addr))__addr; \
+})
+
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
-#define __tag_reset(addr) untagged_addr(addr)
+#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
#define __tag_shifted(tag) 0UL
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
-#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
-
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
-#define SYS_PAR_EL1_F BIT(1)
+#define SYS_PAR_EL1_F BIT(0)
#define SYS_PAR_EL1_FST GENMASK(6, 1)
/*** Statistical Profiling Extension ***/
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0);
}
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ {},
+};
+
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ int i;
+
+ if (!is_affected_midr_range_list(entry, scope) ||
+ !is_hyp_mode_available())
+ return false;
+
+ for_each_possible_cpu(i) {
+ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+ return true;
+ }
+
+ return false;
+}
+
#ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
},
+#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ .matches = needs_tx2_tvm_workaround,
+ },
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ },
#endif
{
}
};
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
ARM64_FTR_END,
};
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
- clear_address_tag x0, x3
+ untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
- bl preempt_schedule_irq // irq en/disable is done inside
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
mrs x26, far_el1
ct_user_exit_irqoff
enable_daif
- clear_address_tag x0, x26
+ untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
#else
ldr x30, =vectors
#endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
prfm plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
msr vbar_el1, x30
add x30, x30, #(1b - tramp_vectors)
isb
gfp_t mask)
{
int rc = 0;
+ pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
memcpy((void *)dst, src_start, length);
__flush_icache_range(dst, dst + length);
- pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+ trans_pgd = allocator(mask);
+ if (!trans_pgd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = allocator(mask);
if (!pudp) {
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
+#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <asm/alternative.h>
#include <asm/arch_gicv3.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (static_branch_likely(&arm64_const_caps_ready))
+ preempt_schedule_irq();
+}
{
u64 hcr = vcpu->arch.hcr_el2;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ hcr |= HCR_TVM;
+
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE)
- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+ if (vcpu->arch.hcr_el2 & HCR_VSE) {
+ vcpu->arch.hcr_el2 &= ~HCR_VSE;
+ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ }
if (has_vhe())
deactivate_traps_vhe();
return true;
}
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ /*
+ * The normal sysreg handling code expects to see the traps,
+ * let's not do anything here.
+ */
+ if (vcpu->arch.hcr_el2 & HCR_TVM)
+ return false;
+
+ switch (sysreg) {
+ case SYS_SCTLR_EL1:
+ write_sysreg_el1(val, SYS_SCTLR);
+ break;
+ case SYS_TTBR0_EL1:
+ write_sysreg_el1(val, SYS_TTBR0);
+ break;
+ case SYS_TTBR1_EL1:
+ write_sysreg_el1(val, SYS_TTBR1);
+ break;
+ case SYS_TCR_EL1:
+ write_sysreg_el1(val, SYS_TCR);
+ break;
+ case SYS_ESR_EL1:
+ write_sysreg_el1(val, SYS_ESR);
+ break;
+ case SYS_FAR_EL1:
+ write_sysreg_el1(val, SYS_FAR);
+ break;
+ case SYS_AFSR0_EL1:
+ write_sysreg_el1(val, SYS_AFSR0);
+ break;
+ case SYS_AFSR1_EL1:
+ write_sysreg_el1(val, SYS_AFSR1);
+ break;
+ case SYS_MAIR_EL1:
+ write_sysreg_el1(val, SYS_MAIR);
+ break;
+ case SYS_AMAIR_EL1:
+ write_sysreg_el1(val, SYS_AMAIR);
+ break;
+ case SYS_CONTEXTIDR_EL1:
+ write_sysreg_el1(val, SYS_CONTEXTIDR);
+ break;
+ default:
+ return false;
+ }
+
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+ handle_tx2_tvm(vcpu))
+ return true;
+
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
}
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
+ if (!system_supports_32bit_el0())
+ val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
par = read_sysreg(par_el1);
local_irq_restore(flags);
+ /*
+ * If we now have a valid translation, treat the translation fault as
+ * spurious.
+ */
if (!(par & SYS_PAR_EL1_F))
- return false;
+ return true;
/*
* If we got a different type of fault from the AT instruction,
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
})
#ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+# define __lock_aligned __section(.data..lock_aligned)
#endif
#endif /* __PARISC_LDCW_H */
* arch/parisc/mm/ioremap.c
*
* (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
* (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
- vfree(addr);
+ vunmap(addr);
return NULL;
}
}
EXPORT_SYMBOL(__ioremap);
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
{
- if (addr > high_memory)
- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+ if (is_vmalloc_addr((void *)addr))
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);
struct kvmppc_xive *xive = dev->private;
struct kvmppc_xive_vcpu *xc;
int i, r = -EBUSY;
+ u32 vp_id;
pr_devel("connect_vcpu(cpu=%d)\n", cpu);
return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
- if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
- pr_devel("Duplicate !\n");
- return -EEXIST;
- }
if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
- xc = kzalloc(sizeof(*xc), GFP_KERNEL);
- if (!xc)
- return -ENOMEM;
/* We need to synchronize with queue provisioning */
mutex_lock(&xive->lock);
+
+ vp_id = kvmppc_xive_vp(xive, cpu);
+ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+ pr_devel("Duplicate !\n");
+ r = -EEXIST;
+ goto bail;
+ }
+
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc) {
+ r = -ENOMEM;
+ goto bail;
+ }
+
vcpu->arch.xive_vcpu = xc;
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = kvmppc_xive_vp(xive, cpu);
+ xc->vp_id = vp_id;
xc->mfrr = 0xff;
xc->valid = true;
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
}
+static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+ return true;
+ }
+ return false;
+}
+
/*
* Mapping between guest priorities and host priorities
* is as follow.
struct kvmppc_xive *xive = dev->private;
struct kvmppc_xive_vcpu *xc = NULL;
int rc;
+ u32 vp_id;
pr_devel("native_connect_vcpu(server=%d)\n", server_num);
mutex_lock(&xive->lock);
- if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+ vp_id = kvmppc_xive_vp(xive, server_num);
+ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
pr_devel("Duplicate !\n");
rc = -EEXIST;
goto bail;
xc->vcpu = vcpu;
xc->server_num = server_num;
- xc->vp_id = kvmppc_xive_vp(xive, server_num);
+ xc->vp_id = vp_id;
xc->valid = true;
vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
chosen {
+ stdout-path = "serial0";
};
cpus {
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
-#define FIXADDR_TOP VMALLOC_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE PMD_SIZE
-#else
-#define FIXADDR_SIZE PGDIR_SIZE
-#endif
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
#define vmemmap ((struct page *)VMEMMAP_START)
+#define FIXADDR_TOP (VMEMMAP_START)
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE PMD_SIZE
+#else
+#define FIXADDR_SIZE PGDIR_SIZE
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
#include <linux/mm_types.h>
#include <asm/smp.h>
-/*
- * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
- * cache as well, so a 'fence.i' is not necessary.
- */
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
asmlinkage void do_trap_break(struct pt_regs *regs)
{
- if (!user_mode(regs)) {
+ if (user_mode(regs)) {
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *)(regs->sepc));
+ return;
+ }
+#ifdef CONFIG_GENERIC_BUG
+ {
enum bug_trap_type type;
type = report_bug(regs->sepc, regs);
- switch (type) {
-#ifdef CONFIG_GENERIC_BUG
- case BUG_TRAP_TYPE_WARN:
+ if (type == BUG_TRAP_TYPE_WARN) {
regs->sepc += get_break_insn_length(regs->sepc);
return;
- case BUG_TRAP_TYPE_BUG:
-#endif /* CONFIG_GENERIC_BUG */
- default:
- die(regs, "Kernel BUG");
}
- } else {
- force_sig_fault(SIGTRAP, TRAP_BRKPT,
- (void __user *)(regs->sepc));
}
+#endif /* CONFIG_GENERIC_BUG */
+
+ die(regs, "Kernel BUG");
}
#ifdef CONFIG_GENERIC_BUG
dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
for (rela = rela_start; rela < rela_end; rela++) {
loc = rela->r_offset + offset;
- val = rela->r_addend + offset;
+ val = rela->r_addend;
r_sym = ELF64_R_SYM(rela->r_info);
- if (r_sym)
- val += dynsym[r_sym].st_value;
+ if (r_sym) {
+ if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+ val += dynsym[r_sym].st_value + offset;
+ } else {
+ /*
+ * 0 == undefined symbol table index (STN_UNDEF),
+ * used for R_390_RELATIVE, only add KASLR offset
+ */
+ val += offset;
+ }
r_type = ELF64_R_TYPE(rela->r_info);
rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
if (rc)
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
+ case R_390_GLOB_DAT:
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
- select HAVE_FAST_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
*/
struct mem_vector immovable_mem[MAX_NUMNODES*2];
-/*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
- acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
- char val[MAX_ADDR_LEN] = { };
- int ret;
-
- ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
- if (ret < 0)
- return 0;
-
- if (kstrtoull(val, 16, &addr))
- return 0;
-#endif
- return addr;
-}
-
/*
* Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
* ACPI_TABLE_GUID are found, take the former, which has more features.
}
#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+ acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+ char val[MAX_ADDR_LEN] = { };
+ int ret;
+
+ ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+ if (ret < 0)
+ return 0;
+
+ if (kstrtoull(val, 16, &addr))
+ return 0;
+#endif
+ return addr;
+}
+
/* Compute SRAT address from RSDP. */
static unsigned long get_acpi_srat_table(void)
{
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ unsigned long needed_size;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ /*
+ * The memory hole needed for the kernel is the larger of either
+ * the entire decompressed kernel plus relocation table, or the
+ * entire decompressed kernel plus .bss and .brk sections.
+ *
+ * On X86_64, the memory is mapped with PMD pages. Round the
+ * size up so that the full extent of PMD pages mapped is
+ * included in the check against the valid memory table
+ * entries. This ensures the full mapped area is usable RAM
+ * and doesn't include any reserved areas.
+ */
+ needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+ needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
/* Report initial kernel position details. */
debug_putaddr(input_data);
debug_putaddr(input_len);
debug_putaddr(output);
debug_putaddr(output_len);
debug_putaddr(kernel_total_size);
+ debug_putaddr(needed_size);
#ifdef CONFIG_X86_64
/* Report address of 32-bit trampoline */
debug_putaddr(trampoline_32bit);
#endif
- /*
- * The memory hole needed for the kernel is the larger of either
- * the entire decompressed kernel plus relocation table, or the
- * entire decompressed kernel plus .bss and .brk sections.
- */
choose_random_location((unsigned long)input_data, input_len,
(unsigned long *)&output,
- max(output_len, kernel_total_size),
+ needed_size,
&virt_addr);
/* Validate memory location choices. */
}
if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
- pr_info("Hyper-V: Using MSR based APIC access\n");
+ pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+ x2apic_enabled() ? "x2apic" : "xapic");
+ /*
+ * With x2apic, architectural x2apic MSRs are equivalent to the
+ * respective synthetic MSRs, so there's no need to override
+ * the apic accessors. The only exception is
+ * hv_apic_eoi_write, because it benefits from lazy EOI when
+ * available, but it works for both xapic and x2apic modes.
+ */
apic_set_eoi_write(hv_apic_eoi_write);
- apic->read = hv_apic_read;
- apic->write = hv_apic_write;
- apic->icr_write = hv_apic_icr_write;
- apic->icr_read = hv_apic_icr_read;
+ if (!x2apic_enabled()) {
+ apic->read = hv_apic_read;
+ apic->write = hv_apic_write;
+ apic->icr_write = hv_apic_icr_write;
+ apic->icr_read = hv_apic_icr_read;
+ }
}
}
int (*set_nested_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state);
- void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+ bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+ if (cmsk)
+ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
return 0;
}
int hv_host_info_ecx;
int hv_host_info_edx;
+#ifdef CONFIG_PARAVIRT
+ pv_info.name = "Hyper-V";
+#endif
+
/*
* Extract the features and hints
*/
* we might write invalid pmds, when the kernel is relocated
* cleanup_highmap() fixes this up along with the mappings
* beyond _end.
+ *
+ * Only the region occupied by the kernel image has so far
+ * been checked against the table of usable memory regions
+ * provided by the firmware, so invalidate pages outside that
+ * region. A page table entry that maps to a reserved area of
+ * memory would allow processor speculation into that area,
+ * and on some hardware (particularly the UV platform) even
+ * speculative access to some reserved areas is caught as an
+ * error, causing the BIOS to halt the system.
*/
pmd = fixup_pointer(level2_kernel_pgt, physaddr);
- for (i = 0; i < PTRS_PER_PMD; i++) {
+
+ /* invalidate pages before the kernel image */
+ for (i = 0; i < pmd_index((unsigned long)_text); i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ /* fixup pages that are part of the kernel image */
+ for (; i <= pmd_index((unsigned long)_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
- }
+
+ /* invalidate pages after the kernel image */
+ for (; i < PTRS_PER_PMD; i++)
+ pmd[i] &= ~_PAGE_PRESENT;
/*
* Fixup phys_base - remove the memory encryption mask to obtain
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features =
- F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+ F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
-static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
-{
- return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
{
return apic->vcpu->vcpu_id;
return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
}
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+ return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
#endif
int ret = 0;
struct vcpu_svm *svm = to_svm(vcpu);
u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+ u32 id = kvm_xapic_id(vcpu->arch.apic);
if (ldr == svm->ldr_reg)
return 0;
avic_invalidate_logical_id_entry(vcpu);
if (ldr)
- ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+ ret = avic_ldr_write(vcpu, id, ldr);
if (!ret)
svm->ldr_reg = ldr;
{
u64 *old, *new;
struct vcpu_svm *svm = to_svm(vcpu);
- u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
- u32 id = (apic_id_reg >> 24) & 0xff;
+ u32 id = kvm_xapic_id(vcpu->arch.apic);
if (vcpu->vcpu_id == id)
return 0;
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->nested.apic_access_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
- /*
- * If translation failed, no matter: This feature asks
- * to exit when accessing the given address, and if it
- * can never be accessed, this feature won't do
- * anything anyway.
- */
if (!is_error_page(page)) {
vmx->nested.apic_access_page = page;
hpa = page_to_phys(vmx->nested.apic_access_page);
vmcs_write64(APIC_ACCESS_ADDR, hpa);
} else {
- secondary_exec_controls_clearbit(vmx,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+ __func__);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return false;
}
}
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+ return true;
}
/*
/*
* If from_vmentry is false, this is being called from state restore (either RSM
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ * 0 - success, i.e. proceed with actual VMEnter
-+ * 1 - consistency check VMExit
-+ * -1 - consistency check VMFail
+ *
+ * Returns:
+ * NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
+ * NVMX_ENTRY_VMFAIL: Consistency check VMFail
+ * NVMX_ENTRY_VMEXIT: Consistency check VMExit
+ * NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
*/
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
prepare_vmcs02_early(vmx, vmcs12);
if (from_vmentry) {
- nested_get_vmcs12_pages(vcpu);
+ if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+ return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
if (nested_vmx_check_vmentry_hw(vcpu)) {
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- return -1;
+ return NVMX_VMENTRY_VMFAIL;
}
if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
* returned as far as L1 is concerned. It will only return (and set
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
- return 0;
+ return NVMX_VMENTRY_SUCCESS;
/*
* A failed consistency check that leads to a VMExit during L1's
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
if (!from_vmentry)
- return 1;
+ return NVMX_VMENTRY_VMEXIT;
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = exit_qual;
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx->nested.need_vmcs12_to_shadow_sync = true;
- return 1;
+ return NVMX_VMENTRY_VMEXIT;
}
/*
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{
struct vmcs12 *vmcs12;
+ enum nvmx_vmentry_status status;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
- int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
* the nested entry.
*/
vmx->nested.nested_run_pending = 1;
- ret = nested_vmx_enter_non_root_mode(vcpu, true);
- vmx->nested.nested_run_pending = !ret;
- if (ret > 0)
- return 1;
- else if (ret)
- return nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ status = nested_vmx_enter_non_root_mode(vcpu, true);
+ if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+ goto vmentry_failed;
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
return kvm_vcpu_halt(vcpu);
}
return 1;
+
+vmentry_failed:
+ vmx->nested.nested_run_pending = 0;
+ if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
+ return 0;
+ if (status == NVMX_VMENTRY_VMEXIT)
+ return 1;
+ WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
}
/*
#include "vmcs12.h"
#include "vmx.h"
+/*
+ * Status returned by nested_vmx_enter_non_root_mode():
+ */
+enum nvmx_vmentry_status {
+ NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
+ NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
+ NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
+ NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
+};
+
void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
bool apicv);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_vcpu_setup(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry);
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info, unsigned long exit_qualification);
return 1;
}
-static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
-{
- kvm_skip_emulated_instruction(vcpu);
- WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
- vmcs_read32(VM_EXIT_REASON));
- return 1;
-}
-
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
[EXIT_REASON_INVVPID] = handle_vmx_instruction,
[EXIT_REASON_RDRAND] = handle_invalid_op,
[EXIT_REASON_RDSEED] = handle_invalid_op,
- [EXIT_REASON_XSAVES] = handle_unexpected_vmexit,
- [EXIT_REASON_XRSTORS] = handle_unexpected_vmexit,
[EXIT_REASON_PML_FULL] = handle_pml_full,
[EXIT_REASON_INVPCID] = handle_invpcid,
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
[EXIT_REASON_ENCLS] = handle_encls,
- [EXIT_REASON_UMWAIT] = handle_unexpected_vmexit,
- [EXIT_REASON_TPAUSE] = handle_unexpected_vmexit,
};
static const int kvm_vmx_max_exit_handlers =
asmlinkage __visible void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
- if (!kvm_rebooting)
- BUG();
+ BUG_ON(!kvm_rebooting);
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.pv_time_enabled = false;
+ vcpu->arch.time = 0;
}
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
case MSR_KVM_SYSTEM_TIME: {
struct kvm_arch *ka = &vcpu->kvm->arch;
- kvmclock_reset(vcpu);
-
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
+ vcpu->arch.pv_time_enabled = false;
if (!(data & 1))
break;
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
- vcpu->arch.pv_time_enabled = false;
- else
vcpu->arch.pv_time_enabled = true;
break;
bool req_immediate_exit = false;
if (kvm_request_pending(vcpu)) {
- if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
- kvm_x86_ops->get_vmcs12_pages(vcpu);
+ if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
+ if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+ r = 0;
+ goto out;
+ }
+ }
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+
+#ifdef CONFIG_X86_32
+ pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
+ "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
+ "from the Linux kernel!\n"
+ "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
+ "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
+#endif
}
static void __init xen_pv_init_platform(void)
reg = <0xf0100000 0x03f00000>;
// BUS_ADDRESS(3) CPU_PHYSICAL(1) SIZE(2)
- ranges = <0x01000000 0x0 0xf0000000 0xf0000000 0x0 0x00010000>,
+ ranges = <0x01000000 0x0 0x00000000 0xf0000000 0x0 0x00010000>,
<0x02000000 0x0 0xf4000000 0xf4000000 0x0 0x08000000>;
// PCI_DEVICE(3) INT#(1) CONTROLLER(PHANDLE) CONTROLLER_DATA(2)
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
- : "a" (~mask), "a" (p)
+ : "a" (mask), "a" (p)
: "memory");
}
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
break; \
} \
default: __put_user_bad(); \
#define __check_align_1 ""
#define __check_align_2 \
- " _bbci.l %3, 0, 1f \n" \
- " movi %0, %4 \n" \
+ " _bbci.l %[addr], 0, 1f \n" \
+ " movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
- " _bbsi.l %3, 0, 0f \n" \
- " _bbci.l %3, 1, 1f \n" \
- "0: movi %0, %4 \n" \
+ " _bbsi.l %[addr], 0, 0f \n" \
+ " _bbci.l %[addr], 1, 1f \n" \
+ "0: movi %[err], %[efault] \n" \
" _j 2f \n"
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __put_user_asm(x, addr, err, align, insn, cb) \
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
+ "1: "insn" %[x], %[addr], 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
- " movi %1, 2b \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
- :"=r" (err), "=r" (cb) \
- :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+ :[err] "+r"(err_), [tmp] "=r"(cb) \
+ :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = 0; \
__gu_err; \
})
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
- default: (x) = __get_user_bad(); \
+ case 8: { \
+ u64 __x; \
+ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
+ retval = -EFAULT; \
+ (x) = 0; \
+ } else { \
+ (x) = *(__force __typeof__((ptr)))&__x; \
+ } \
+ break; \
+ } \
+ default: (x) = 0; __get_user_bad(); \
} \
} while (0)
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__( \
- __check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
- "2: \n" \
- " .section .fixup,\"ax\" \n" \
- " .align 4 \n" \
- " .literal_position \n" \
- "5: \n" \
- " movi %1, 2b \n" \
- " movi %2, 0 \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " .long 1b, 5b \n" \
- " .previous" \
- :"=r" (err), "=r" (cb), "=r" (x) \
- :"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do { \
+ u32 __x = 0; \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[addr], 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+ :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+ (x_) = (__force __typeof__(*(addr_)))__x; \
+} while (0)
/*
// FIXME EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
extern long common_exception_return;
EXPORT_SYMBOL(common_exception_return);
const struct blkcg_policy *pol)
{
struct blkg_policy_data *pd_prealloc = NULL;
- struct blkcg_gq *blkg;
+ struct blkcg_gq *blkg, *pinned_blkg = NULL;
int ret;
if (blkcg_policy_enabled(q, pol))
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
-pd_prealloc:
- if (!pd_prealloc) {
- pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
- if (!pd_prealloc) {
- ret = -ENOMEM;
- goto out_bypass_end;
- }
- }
-
+retry:
spin_lock_irq(&q->queue_lock);
- /* blkg_list is pushed at the head, reverse walk to init parents first */
+ /* blkg_list is pushed at the head, reverse walk to allocate parents first */
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
- if (!pd)
- swap(pd, pd_prealloc);
+ /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
+ if (blkg == pinned_blkg) {
+ pd = pd_prealloc;
+ pd_prealloc = NULL;
+ } else {
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
+ blkg->blkcg);
+ }
+
if (!pd) {
+ /*
+ * GFP_NOWAIT failed. Free the existing one and
+ * prealloc for @blkg w/ GFP_KERNEL.
+ */
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
+ blkg_get(blkg);
+ pinned_blkg = blkg;
+
spin_unlock_irq(&q->queue_lock);
- goto pd_prealloc;
+
+ if (pd_prealloc)
+ pol->pd_free_fn(pd_prealloc);
+ pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
+ blkg->blkcg);
+ if (pd_prealloc)
+ goto retry;
+ else
+ goto enomem;
}
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
- if (pol->pd_init_fn)
- pol->pd_init_fn(pd);
}
+ /* all allocated, init in the same order */
+ if (pol->pd_init_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_init_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
spin_unlock_irq(&q->queue_lock);
-out_bypass_end:
+out:
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
+
+enomem:
+ /* alloc failed, nothing's initialized yet, free everything */
+ spin_lock_irq(&q->queue_lock);
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (blkg->pd[pol->plid]) {
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
+ }
+ spin_unlock_irq(&q->queue_lock);
+ ret = -ENOMEM;
+ goto out;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
- struct rq_qos *cur, *prev = NULL;
- for (cur = q->rq_qos; cur; cur = cur->next) {
- if (cur == rqos) {
- if (prev)
- prev->next = rqos->next;
- else
- q->rq_qos = cur;
+ struct rq_qos **cur;
+
+ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ if (*cur == rqos) {
+ *cur = rqos->next;
break;
}
- prev = cur;
}
blk_mq_debugfs_unregister_rqos(rqos);
static inline bool elv_support_iosched(struct request_queue *q)
{
- if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+ if (!q->mq_ops ||
+ (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
return false;
return true;
}
pcc_data[pcc_ss_id]->refcount--;
if (!pcc_data[pcc_ss_id]->refcount) {
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
kfree(pcc_data[pcc_ss_id]);
+ pcc_data[pcc_ss_id] = NULL;
}
}
}
pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
p->flags, p->processor_PD, p->memory_PD);
- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+ if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");
nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
acpi_desc = to_acpi_desc(nd_desc);
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- int cpu = policy->cpu;
if (event == CPUFREQ_CREATE_POLICY) {
- acpi_thermal_cpufreq_init(cpu);
- acpi_processor_ppc_init(cpu);
+ acpi_thermal_cpufreq_init(policy);
+ acpi_processor_ppc_init(policy);
} else if (event == CPUFREQ_REMOVE_POLICY) {
- acpi_processor_ppc_exit(cpu);
- acpi_thermal_cpufreq_exit(cpu);
+ acpi_processor_ppc_exit(policy);
+ acpi_thermal_cpufreq_exit(policy);
}
return 0;
pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count ||
- unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+ unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = dev_pm_qos_update_request(&pr->perflib_req,
+ ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
ignore_ppc = 0;
}
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
+ int cpu = policy->cpu;
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0) {
+ if (!pr)
+ return;
+
+ ret = freq_qos_add_request(&policy->constraints, &pr->perflib_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
- return;
- }
}
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ struct acpi_processor *pr = per_cpu(processors, policy->cpu);
- dev_pm_qos_remove_request(&pr->perflib_req);
+ if (pr)
+ freq_qos_remove_request(&pr->perflib_req);
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
pr = per_cpu(processors, i);
- if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+ if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
policy = cpufreq_cpu_get(i);
cpufreq_cpu_put(policy);
- ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
if (ret < 0) {
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
pr->id, ret);
return 0;
}
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
+ int cpu = policy->cpu;
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0) {
+ if (!pr)
+ return;
+
+ ret = freq_qos_add_request(&policy->constraints, &pr->thermal_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
- return;
- }
}
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ struct acpi_processor *pr = per_cpu(processors, policy->cpu);
- dev_pm_qos_remove_request(&pr->thermal_req);
+ if (pr)
+ freq_qos_remove_request(&pr->thermal_req);
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
- /*
- * https://bugzilla.kernel.org/show_bug.cgi?id=196907
- * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
- * S0 Idle firmware interface.
- */
- {
- .callback = init_default_s3,
- .ident = "Dell XPS13 9360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
- },
- },
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
- if (PTR_ERR(rstc) != -EPROBE_DEFER)
- dev_err(&dev->dev, "Can't get amba reset!\n");
- return PTR_ERR(rstc);
+ ret = PTR_ERR(rstc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "can't get reset: %d\n",
+ ret);
+ goto err_reset;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
release_resource(&dev->res);
err_out:
return ret;
+
+ err_reset:
+ amba_put_disable_pclk(dev);
+ iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
+ goto err_release;
}
/*
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+ /* Skip applying the quirk on Denverton and beyond */
+ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
return;
/*
{
int rc, i;
- if (hpriv->ahci_regulator) {
- rc = regulator_enable(hpriv->ahci_regulator);
- if (rc)
- return rc;
- }
+ rc = regulator_enable(hpriv->ahci_regulator);
+ if (rc)
+ return rc;
- if (hpriv->phy_regulator) {
- rc = regulator_enable(hpriv->phy_regulator);
- if (rc)
- goto disable_ahci_pwrs;
- }
+ rc = regulator_enable(hpriv->phy_regulator);
+ if (rc)
+ goto disable_ahci_pwrs;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->phy_regulator);
disable_ahci_pwrs:
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
regulator_disable(hpriv->target_pwrs[i]);
}
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
struct regulator *target_pwr;
int rc = 0;
- target_pwr = regulator_get_optional(dev, "target");
+ target_pwr = regulator_get(dev, "target");
if (!IS_ERR(target_pwr))
hpriv->target_pwrs[port] = target_pwr;
hpriv->clks[i] = clk;
}
- hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+ hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
if (IS_ERR(hpriv->ahci_regulator)) {
rc = PTR_ERR(hpriv->ahci_regulator);
- if (rc == -EPROBE_DEFER)
+ if (rc != 0)
goto err_out;
- rc = 0;
- hpriv->ahci_regulator = NULL;
}
- hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+ hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator);
if (rc == -EPROBE_DEFER)
*/
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
wait_for_device_probe();
device_block_probing();
+ cpufreq_suspend();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
+ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+ if (!pfn_to_online_page(pfn))
+ return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
spin_lock_irqsave(&dev->power.lock, flags);
- switch (type) {
- case DEV_PM_QOS_RESUME_LATENCY:
+ if (type == DEV_PM_QOS_RESUME_LATENCY) {
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
- break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->min_frequency);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->max_frequency);
- break;
- default:
+ } else {
WARN_ON(1);
ret = 0;
}
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = pm_qos_update_target(&qos->min_frequency,
- &req->data.pnode, action, value);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = pm_qos_update_target(&qos->max_frequency,
- &req->data.pnode, action, value);
- break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
- c = &qos->min_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MAX;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
- c = &qos->max_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MIN;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
memset(req, 0, sizeof(*req));
}
- c = &qos->min_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
- c = &qos->max_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
- case DEV_PM_QOS_MIN_FREQUENCY:
- case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
+ mutex_unlock(&cmd->lock);
goto done;
}
config = nbd->config;
- if (!mutex_trylock(&cmd->lock)) {
- nbd_config_put(nbd);
- return BLK_EH_RESET_TIMER;
- }
-
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
ret = -ENOENT;
goto out;
}
+ if (cmd->status != BLK_STS_OK) {
+ dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
req);
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ mutex_lock(&cmd->lock);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
+
blk_mq_complete_request(req);
return true;
}
return ret;
}
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+ int *err)
+{
+ struct socket *sock;
+
+ *err = 0;
+ sock = sockfd_lookup(fd, err);
+ if (!sock)
+ return NULL;
+
+ if (sock->ops->shutdown == sock_no_shutdown) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ return sock;
+}
+
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
bool netlink)
{
struct nbd_sock *nsock;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
int i;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
- if (ret > 0)
+ if (ret > 0) {
ret = rbd_dev->acquire_err;
- else if (!ret)
- ret = -ETIMEDOUT;
+ } else {
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ }
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct file *file;
struct zram *zram = dev_to_zram(dev);
- struct file *file = zram->backing_dev;
char *p;
ssize_t ret;
down_read(&zram->init_lock);
- if (!zram->backing_dev) {
+ file = zram->backing_dev;
+ if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
* @clk_disable_quirk: module specific clock disable quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
*/
struct sysc {
struct device *dev;
void (*clk_disable_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
+ void (*module_disable_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
+ if (ddata->module_disable_quirk)
+ ddata->module_disable_quirk(ddata);
+
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
SYSC_MODULE_QUIRK_SGX),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* Watchdog on am3 and am4 */
+ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable spr failed\n");
+ dev_warn(ddata->dev, "wdt disable step1 failed\n");
- sysc_write(ddata, wps, 0x5555);
+ sysc_write(ddata, spr, 0x5555);
error = readl_poll_timeout(ddata->module_va + wps, val,
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable wps failed\n");
+ dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+ ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
if (ret != 1) \
return -EINVAL; \
\
- ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+ ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \
}
goto err_free_real_cpus;
}
+ freq_constraints_init(&policy->constraints);
+
policy->nb_min.notifier_call = cpufreq_notifier_min;
policy->nb_max.notifier_call = cpufreq_notifier_max;
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
if (ret) {
dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
goto err_kobj_remove;
}
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
if (ret) {
dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
return policy;
err_min_qos_notifier:
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
err_kobj_remove:
cpufreq_policy_put_kobj(policy);
err_free_real_cpus:
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
- struct device *dev = get_cpu_device(policy->cpu);
unsigned long flags;
int cpu;
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- dev_pm_qos_remove_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+
+ /* Cancel any pending policy->update work before freeing the policy. */
+ cancel_work_sync(&policy->update);
if (policy->max_freq_req) {
/*
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
- dev_pm_qos_remove_request(policy->max_freq_req);
+ freq_qos_remove_request(policy->max_freq_req);
}
- dev_pm_qos_remove_request(policy->min_freq_req);
+ freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
cpufreq_policy_put_kobj(policy);
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (new_policy) {
- struct device *dev = get_cpu_device(cpu);
-
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j);
if (!policy->min_freq_req)
goto out_destroy_policy;
- ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
- DEV_PM_QOS_MIN_FREQUENCY,
- policy->min);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->min_freq_req, FREQ_QOS_MIN,
+ policy->min);
if (ret < 0) {
/*
- * So we don't call dev_pm_qos_remove_request() for an
+ * So we don't call freq_qos_remove_request() for an
* uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL;
-
- dev_err(dev, "Failed to add min-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
/*
* This must be initialized right here to avoid calling
- * dev_pm_qos_remove_request() on uninitialized request in case
+ * freq_qos_remove_request() on uninitialized request in case
* of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
- ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->max);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->max_freq_req, FREQ_QOS_MAX,
+ policy->max);
if (ret < 0) {
policy->max_freq_req = NULL;
- dev_err(dev, "Failed to add max-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int ret;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
- new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+ new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy);
break;
}
- ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
- .shutdown = cpufreq_suspend,
-};
-
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
-
return 0;
}
module_param(off, int, 0444);
static struct cpufreq_driver intel_pstate;
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
{
int max_state, turbo_max, freq, i, perf_pct;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpufreq_policy *policy;
for_each_possible_cpu(i) {
else
turbo_max = cpu->pstate.turbo_pstate;
- if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+ if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct;
} else {
req++;
freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
freq *= cpu->pstate.scaling;
- if (dev_pm_qos_update_request(req, freq) < 0)
+ if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i);
}
}
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+ update_qos_request(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+ update_qos_request(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int max_state, turbo_max, min_freq, max_freq, ret;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpudata *cpu;
struct device *dev;
max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
max_freq *= cpu->pstate.scaling;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
- min_freq);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+ min_freq);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_req;
}
- ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
+ ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+ max_freq);
if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
goto remove_min_req;
return 0;
remove_min_req:
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
free_req:
kfree(req);
pstate_exit:
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
req = policy->driver_data;
- dev_pm_qos_remove_request(req + 1);
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req + 1);
+ freq_qos_remove_request(req);
kfree(req);
return intel_pstate_cpu_exit(policy);
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{
struct cpufreq_policy *policy;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
u8 node, slow_mode;
int cpu, ret;
req = policy->driver_data;
- ret = dev_pm_qos_update_request(req,
+ ret = freq_qos_update_request(req,
policy->freq_table[slow_mode].frequency);
if (ret < 0)
pr_warn("Failed to update freq constraint: %d\n", ret);
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
int ret;
if (!cbe_cpufreq_has_pmi)
if (!req)
return;
- ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+ policy->freq_table[0].frequency);
if (ret < 0) {
pr_err("Failed to add freq constraint (%d)\n", ret);
kfree(req);
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req = policy->driver_data;
+ struct freq_qos_request *req = policy->driver_data;
if (cbe_cpufreq_has_pmi) {
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
kfree(req);
}
}
int ret;
struct cpuidle_driver *drv = &haltpoll_driver;
+ /* Do not load haltpoll if idle= is passed */
+ if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
cpuidle_poll_state_init(drv);
if (!kvm_para_available() ||
if (pfence_excl)
*pfence_excl = fence_excl;
else if (fence_excl)
- shared[++shared_count] = fence_excl;
+ shared[shared_count++] = fence_excl;
if (!shared_count) {
kfree(shared);
if (!ghes_pvt)
return;
+ if (atomic_dec_return(&ghes_init))
+ return;
+
mci = ghes_pvt->mci;
+ ghes_pvt = NULL;
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
}
bytes = ~0ull;
else if (size & 0x8000)
bytes = (u64)(size & 0x7fff) << 10;
- else if (size != 0x7fff)
+ else if (size != 0x7fff || dm->length < 0x20)
bytes = (u64)size << 20;
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
chip->irq_eoi(data);
}
-static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+static int intel_mid_irq_init_hw(struct gpio_chip *chip)
{
+ struct intel_mid_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned base;
reg = gpio_reg(&priv->chip, base, GEDR);
writel(~0, reg);
}
+
+ return 0;
}
static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
girq = &priv->chip.irq;
girq->chip = &intel_mid_irqchip;
+ girq->init_hw = intel_mid_irq_init_hw;
girq->parent_handler = intel_mid_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
- intel_mid_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
+
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
{
+ struct lp_gpio *lg = gpiochip_get_data(chip);
unsigned long reg;
unsigned base;
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
outl(0xffffffff, reg);
}
+
+ return 0;
}
static int lp_gpio_probe(struct platform_device *pdev)
girq = &gc->irq;
girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
girq->parent_handler = lp_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
return -ENOMEM;
girq->parents[0] = (unsigned)irq_rc->start;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- lp_gpio_irq_init_hw(lg);
+ girq->handler = handle_bad_irq;
}
ret = devm_gpiochip_add_data(dev, gc, lg);
chained_irq_exit(irqchip, desc);
}
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
+
+ return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
+ girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- mrfld_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
machine_gpiochip_add(chip);
+ ret = gpiochip_irqchip_init_hw(chip);
+ if (ret)
+ goto err_remove_acpi_chip;
+
ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
* The following is irqchip helper code for gpiochips.
*/
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
+{
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ if (!girq->init_hw)
+ return 0;
+
+ return girq->init_hw(gc);
+}
+
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
struct gpio_irq_chip *girq = &gc->irq;
{
return 0;
}
-
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+{
+ return 0;
+}
+
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
return 0;
return 0;
error_free:
- while (i--) {
+ for (i = 0; i < last_entry; ++i) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+ amdgpu_bo_unref(&bo);
+ }
+ for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- bool binding_userptr = false;
struct mm_struct *usermm;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
- binding_userptr = true;
}
if (p->evictable == lobj)
if (r)
return r;
- if (binding_userptr) {
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
+ kvfree(lobj->user_pages);
+ lobj->user_pages = NULL;
}
return 0;
}
return -ENODEV;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
+ if (!amdgpu_si_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ if (!amdgpu_cik_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Get rid of things like offb */
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret)
struct amdgpu_device *adev;
int r, acpi_status;
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) {
return -ENOMEM;
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = false,
.resv = bp->resv,
- .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ .flags = bp->type != ttm_bo_type_kernel ?
+ TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
- data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
}
if (!in_range(&splitter->vsize, dflow->in_h)) {
- DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
- dflow->in_w);
+ DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+ dflow->in_h);
return -EINVAL;
}
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ /* Lenovo G50 */
+ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dvi = false;
info->supports_hdmi = false;
info->alternate_ddc_pin = 0;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dp = false;
info->alternate_aux_channel = 0;
}
return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
+ case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
if (!obj)
return -ENOENT;
+ if (i915_gem_object_never_bind_ggtt(obj)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
ret = create_mmap_offset(obj);
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+out:
i915_gem_object_put(obj);
return ret;
}
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
+static inline bool
+i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+}
+
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(3)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static void mark_eio(struct i915_request *rq)
+{
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
submit = true;
last = rq;
}
+ i915_request_put(rq);
/*
* Hmm, we have a bunch of virtual engine requests,
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ mark_eio(rq);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
+ mark_eio(rq);
__i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock);
- if (ve->request) {
- ve->request->engine = engine;
- __i915_request_submit(ve->request);
- dma_fence_set_error(&ve->request->fence, -EIO);
- i915_request_mark_complete(ve->request);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
}
spin_unlock(&ve->base.active.lock);
}
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct i915_request *old;
+ unsigned long flags;
GEM_TRACE("%s: rq=%llx:%lld\n",
ve->base.name,
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
- GEM_BUG_ON(ve->request);
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ old = ve->request;
+ if (old) { /* background completion event from preempt-to-busy */
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- WRITE_ONCE(ve->request, rq);
+ if (i915_request_completed(rq)) {
+ __i915_request_submit(rq);
- list_move_tail(&rq->sched.link, virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ ve->request = NULL;
+ } else {
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_schedule(&ve->base.execlists.tasklet);
+ }
- tasklet_schedule(&ve->base.execlists.tasklet);
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
}
static struct ve_bond *
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (i915_gem_object_never_bind_ggtt(obj))
+ return ERR_PTR(-ENODEV);
+
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
#include "dsi_cfg.h"
#include "msm_kms.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+ pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
- mutex_lock(&pfdev->reset_lock);
+ if (!mutex_trylock(&pfdev->reset_lock))
+ return;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ drm_sched_stop(sched, sched_job);
+ if (js != i)
+ /* Ensure any timeouts on other slots have finished */
+ cancel_delayed_work_sync(&sched->work_tdr);
+ }
- if (sched_job)
- drm_sched_increase_karma(sched_job);
+ drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- struct drm_device *ddev = pci_get_drvdata(pdev);
-
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
-
- /* Some adapters need to be suspended before a
- * shutdown occurs in order to prevent an error
- * during kexec.
- */
- radeon_suspend_kms(ddev, true, true, false);
}
static int radeon_pmops_suspend(struct device *dev)
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- depends on THERMAL || !THERMAL
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
if (!bo) {
if (busy_bo)
- ttm_bo_get(busy_bo);
+ kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- ttm_bo_put(busy_bo);
+ kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret;
}
else
ret = vmf_insert_pfn(&cvma, address, pfn);
- /*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
- */
-
- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
- break;
- else if (unlikely(ret & VM_FAULT_ERROR))
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ goto out_io_unlock;
+ else
+ break;
+ }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
#define ASPEED_I2CD_S_TX_CMD BIT(2)
#define ASPEED_I2CD_M_TX_CMD BIT(1)
#define ASPEED_I2CD_M_START_CMD BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK \
+ (ASPEED_I2CD_M_STOP_CMD | \
+ ASPEED_I2CD_M_S_RX_CMD_LAST | \
+ ASPEED_I2CD_M_RX_CMD | \
+ ASPEED_I2CD_M_TX_CMD | \
+ ASPEED_I2CD_M_START_CMD)
/* 0x18 : I2CD Slave Device Address Register */
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
u8 slave_addr = i2c_8bit_addr_from_msg(msg);
- bus->master_state = ASPEED_I2C_MASTER_START;
-
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If it's requested in the middle of a slave session, set the master
* state to 'pending' then H/W will continue handling this master
* command when the bus comes back to the idle state.
*/
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+ if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
bus->master_state = ASPEED_I2C_MASTER_PENDING;
+ return;
+ }
#endif /* CONFIG_I2C_SLAVE */
+ bus->master_state = ASPEED_I2C_MASTER_START;
bus->buf_index = 0;
if (msg->flags & I2C_M_RD) {
}
}
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
- /*
- * A pending master command will be started by H/W when the bus comes
- * back to idle state after completing a slave operation so change the
- * master state from 'pending' to 'start' at here if slave is inactive.
- */
- if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
- goto out_no_complete;
-
- bus->master_state = ASPEED_I2C_MASTER_START;
- }
-#endif /* CONFIG_I2C_SLAVE */
-
/* Master is not currently active, irq was for someone else. */
if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
bus->master_state == ASPEED_I2C_MASTER_PENDING)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If a peer master starts a xfer immediately after it queues a
- * master command, change its state to 'pending' then H/W will
- * continue the queued master xfer just after completing the
- * slave mode session.
+ * master command, clear the queued master command and change
+ * its state to 'pending'. To simplify handling of pending
+ * cases, it uses S/W solution instead of H/W command queue
+ * handling.
*/
if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+ writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+ ~ASPEED_I2CD_MASTER_CMDS_MASK,
+ bus->base + ASPEED_I2C_CMD_REG);
bus->master_state = ASPEED_I2C_MASTER_PENDING;
dev_dbg(bus->dev,
"master goes pending due to a slave start\n");
irq_handled |= aspeed_i2c_master_irq(bus,
irq_remaining);
}
+
+ /*
+ * Start a pending master command at here if a slave operation is
+ * completed.
+ */
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+ bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ aspeed_i2c_do_start(bus);
#else
irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
#endif /* CONFIG_I2C_SLAVE */
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ /*
+ * If timed out and the state is still pending, drop the pending
+ * master command.
+ */
+ spin_lock_irqsave(&bus->lock, flags);
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+ bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+ spin_unlock_irqrestore(&bus->lock, flags);
+
return -ETIMEDOUT;
}
static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
{
- if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+ if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
return I2C_FUNC_I2C |
(I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
else
struct regmap *regmap;
};
-/**
+/*
* All these values are coming from I2C Specification, Version 6.0, 4th of
* April 2014.
*
STM32F7_I2C_CR1_TXIE;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
+ /* Write 1st data byte */
+ writel_relaxed(value, base + STM32F7_I2C_TXDR);
} else {
/* Notify i2c slave that new write transfer is starting */
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
void __iomem *base = i2c_dev->base;
struct device *dev = i2c_dev->dev;
struct stm32_i2c_dma *dma = i2c_dev->dma;
- u32 mask, status;
+ u32 status;
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
f7_msg->result = -EINVAL;
}
- /* Disable interrupts */
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
- mask = STM32F7_I2C_XFER_IRQ_MASK;
- else
- mask = STM32F7_I2C_ALL_IRQ_MASK;
- stm32f7_i2c_disable_irq(i2c_dev, mask);
+ if (!i2c_dev->slave_running) {
+ u32 mask;
+ /* Disable interrupts */
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
+ else
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
+ }
/* Disable dma */
if (i2c_dev->use_dma) {
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
- if (onkey->key_power)
- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0 && gpio != -ENOENT) {
- error = gpio;
- goto err_free_mem;
- } else if (!gpio_is_valid(gpio)) {
- /* Skip GPIO if not present */
+ if (!gpio_is_valid(gpio)) {
+ /*
+ * Skip GPIO if not present. Note we deliberately
+ * ignore -EPROBE_DEFER errors here. On some devices
+ * Intel is using so called virtual GPIOs which are not
+ * GPIOs at all but some way for AML code to check some
+ * random status bits without need a custom opregion.
+ * In some cases the resources table we parse points to
+ * such a virtual GPIO, since these are not real GPIOs
+ * we do not have a driver for these so they will never
+ * show up, therefore we ignore -EPROBE_DEFER.
+ */
continue;
}
leave_breadcrumbs);
}
-static bool elantech_use_host_notify(struct psmouse *psmouse,
- struct elantech_device_info *info)
-{
- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
- return true;
-
- switch (info->bus) {
- case ETP_BUS_PS2_ONLY:
- /* expected case */
- break;
- case ETP_BUS_SMB_HST_NTFY_ONLY:
- case ETP_BUS_PS2_SMB_HST_NTFY:
- /* SMbus implementation is stable since 2018 */
- if (dmi_get_bios_year() >= 2018)
- return true;
- /* fall through */
- default:
- psmouse_dbg(psmouse,
- "Ignoring SMBus bus provider %d\n", info->bus);
- break;
- }
-
- return false;
-}
-
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!elantech_use_host_notify(psmouse, info) ||
+ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
return 0;
}
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+ struct elantech_device_info *info)
+{
+ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ return true;
+
+ switch (info->bus) {
+ case ETP_BUS_PS2_ONLY:
+ /* expected case */
+ break;
+ case ETP_BUS_SMB_ALERT_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_ALERT:
+ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+ break;
+ case ETP_BUS_SMB_HST_NTFY_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_HST_NTFY:
+ return true;
+ default:
+ psmouse_dbg(psmouse,
+ "Ignoring SMBus bus provider %d.\n",
+ info->bus);
+ }
+
+ return false;
+}
+
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
}
mutex_lock(&data->irq_mutex);
- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
data->irq_count);
/*
* At this point, irq_status has all bits that are set in the
bitmap_copy(data->current_irq_mask, data->new_irq_mask,
data->num_of_irq_regs);
+ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
error_unlock:
mutex_unlock(&data->irq_mutex);
return error;
struct device *dev = &rmi_dev->dev;
mutex_lock(&data->irq_mutex);
+ bitmap_andnot(data->fn_irq_bits,
+ data->fn_irq_bits, mask, data->irq_count);
bitmap_andnot(data->new_irq_mask,
data->current_irq_mask, mask, data->irq_count);
const char *cfg_name;
struct completion firmware_loading_complete;
unsigned long irq_flags;
+ unsigned int contact_size;
};
#define GOODIX_GPIO_INT_NAME "irq"
#define GOODIX_MAX_WIDTH 4096
#define GOODIX_INT_TRIGGER 1
#define GOODIX_CONTACT_SIZE 8
+#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
#define GOODIX_CONFIG_MAX_LENGTH 240
{}
};
+static const struct dmi_system_id nine_bytes_report[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "Lenovo YogaBook",
+ /* YB1-X91L/F and YB1-X90L/F */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+ }
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
do {
error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
- data, GOODIX_CONTACT_SIZE + 1);
+ data, ts->contact_size + 1);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n",
error);
return -EPROTO;
if (touch_num > 1) {
- data += 1 + GOODIX_CONTACT_SIZE;
+ data += 1 + ts->contact_size;
error = goodix_i2c_read(ts->client,
GOODIX_READ_COOR_ADDR +
- 1 + GOODIX_CONTACT_SIZE,
+ 1 + ts->contact_size,
data,
- GOODIX_CONTACT_SIZE *
+ ts->contact_size *
(touch_num - 1));
if (error)
return error;
return 0;
}
-static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[1]);
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
+{
+ int id = coor_data[1] & 0x0F;
+ int input_x = get_unaligned_le16(&coor_data[3]);
+ int input_y = get_unaligned_le16(&coor_data[5]);
+ int input_w = get_unaligned_le16(&coor_data[7]);
+
+ input_mt_slot(ts->input_dev, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input_dev, &ts->prop,
+ input_x, input_y, true);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
/**
* goodix_process_events - Process incoming events
*
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
- u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
for (i = 0; i < touch_num; i++)
- goodix_ts_report_touch(ts,
- &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+ if (ts->contact_size == 9)
+ goodix_ts_report_touch_9b(ts,
+ &point_data[1 + ts->contact_size * i]);
+ else
+ goodix_ts_report_touch_8b(ts,
+ &point_data[1 + ts->contact_size * i]);
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
"Applying '180 degrees rotated screen' quirk\n");
}
+ if (dmi_check_system(nine_bytes_report)) {
+ ts->contact_size = 9;
+
+ dev_dbg(&ts->client->dev,
+ "Non-standard 9-bytes report format quirk\n");
+ }
+
error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
ts->client = client;
i2c_set_clientdata(client, ts);
init_completion(&ts->firmware_loading_complete);
+ ts->contact_size = GOODIX_CONTACT_SIZE;
error = goodix_get_gpio_config(ts);
if (error)
for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
finger[i].is_valid = buf[i + y] >> 7;
if (finger[i].is_valid) {
- finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
- finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+ finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+ buf[i + y + 1];
+ finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+ buf[i + y + 2];
/* st1232 includes a z-axis / touch strength */
if (ts->chip_info->have_z)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- pasid = PPR_PASID(*(u64 *)&event[0]);
+ pasid = (event[0] & EVENT_DOMID_MASK_HI) |
+ (event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
* to 64 bits.
*/
static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
gfp_t gfp)
{
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
- /* address space already 64 bit large */
+ if (address <= PM_LEVEL_SIZE(domain->mode) ||
+ WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode))
- *updated = increase_address_space(domain, gfp) || *updated;
+ *updated = increase_address_space(domain, address, gfp) || *updated;
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
-#define EVENT_DOMID_MASK 0xffff
-#define EVENT_DOMID_SHIFT 0
+#define EVENT_DOMID_MASK_LO 0xffff
+#define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10
return 0;
out_clear_smmu:
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
smmu_domain->smmu = NULL;
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
dma_mask = dev->coherent_dma_mask;
- if (dma_mask >= dma_get_required_mask(dev))
+ if (dma_mask >= dma_direct_get_required_mask(dev))
return false;
/*
return nelems;
}
+static u64 intel_get_required_mask(struct device *dev)
+{
+ if (!iommu_need_mapping(dev))
+ return dma_direct_get_required_mask(dev);
+ return DMA_BIT_MASK(32);
+}
+
static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
.dma_supported = dma_direct_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .get_required_mask = intel_get_required_mask,
};
static void
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
+#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
+#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct io_pgtable *iop;
+ struct arm_lpae_io_pgtable *data;
- if (cfg->ias != 48 || cfg->oas > 40)
+ /* No quirks for Mali (hopefully) */
+ if (cfg->quirks)
+ return NULL;
+
+ if (cfg->ias > 48 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
- iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
- if (iop) {
- u64 mair, ttbr;
- /* Copy values as union fields overlap */
- mair = cfg->arm_lpae_s1_cfg.mair[0];
- ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+ data = arm_lpae_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
- cfg->arm_mali_lpae_cfg.memattr = mair;
- cfg->arm_mali_lpae_cfg.transtab = ttbr |
- ARM_MALI_LPAE_TTBR_READ_INNER |
- ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ /* Mali seems to need a full 4-level table regardless of IAS */
+ if (data->levels < ARM_LPAE_MAX_LEVELS) {
+ data->levels = ARM_LPAE_MAX_LEVELS;
+ data->pgd_size = sizeof(arm_lpae_iopte);
}
+ /*
+ * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
+ * best we can do is mimic the out-of-tree driver and hope that the
+ * "implementation-defined caching policy" is good enough. Similarly,
+ * we'll use it for the sake of a valid attribute for our 'device'
+ * index, although callers should never request that in practice.
+ */
+ cfg->arm_mali_lpae_cfg.memattr =
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- return iop;
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ if (!data->pgd)
+ goto out_free_data;
+
+ /* Ensure the empty pgd is visible before TRANSTAB can be written */
+ wmb();
+
+ cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+ ARM_MALI_LPAE_TTBR_READ_INNER |
+ ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
- irq = platform_get_irq(pdev, 0);
-
/*
* Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
/* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) {
+ irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
return irq;
struct device *dev;
void __iomem **bases;
int num_mmu;
+ int num_irq;
struct clk_bulk_data *clocks;
int num_clocks;
bool reset_disabled;
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int err, i, irq;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
+ iommu->num_irq = platform_irq_count(pdev);
+ if (iommu->num_irq < 0)
+ return iommu->num_irq;
+
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
pm_runtime_enable(dev);
- i = 0;
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
+
if (irq < 0)
return irq;
static void rk_iommu_shutdown(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
- int i = 0, irq;
+ int i;
+
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
+ }
pm_runtime_force_suspend(&pdev->dev);
}
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
+#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
chained_irq_exit(irqchip, desc);
}
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+
+ writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+ return 1;
+}
+
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
};
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
}
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS 50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
/*
}
}
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
if (cpu >= nr_cpu_ids)
return -EINVAL;
- if (!irqd_irq_disabled(d)) {
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
- }
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
}
#endif
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
- /*
- * There is no need to mask/unmask PLIC interrupts. They are "masked"
- * by reading claim and "unmasked" when writing it back.
- */
- .irq_enable = plic_irq_enable,
- .irq_disable = plic_irq_disable,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+ .irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
irq_set_chip_data(irq, NULL);
irq_set_noprobe(irq);
return 0;
hwirq);
else
generic_handle_irq(irq);
- writel(hwirq, claim);
}
csr_set(sie, SIE_SEIE);
}
static int clamped;
static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
static unsigned int min_freq, max_freq;
static int clamp_set(struct wf_control *ct, s32 value)
}
clamped = value;
- return dev_pm_qos_update_request(&qos_req, freq);
+ return freq_qos_update_request(&qos_req, freq);
}
static int clamp_get(struct wf_control *ct, s32 *value)
min_freq = policy->cpuinfo.min_freq;
max_freq = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+ max_freq);
+
cpufreq_cpu_put(policy);
+ if (ret < 0) {
+ pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+ ret);
+ return ret;
+ }
+
dev = get_cpu_device(0);
if (unlikely(!dev)) {
pr_warn("%s: No cpu device for cpu0\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
- if (clamp == NULL)
- return -ENOMEM;
-
- ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
- if (ret < 0) {
- pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
- ret);
- goto free;
+ if (clamp == NULL) {
+ ret = -ENOMEM;
+ goto fail;
}
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
ret = wf_register_control(clamp);
if (ret)
- goto fail;
+ goto free;
+
clamp_control = clamp;
return 0;
- fail:
- dev_pm_qos_remove_request(&qos_req);
free:
kfree(clamp);
+ fail:
+ freq_qos_remove_request(&qos_req);
return ret;
}
{
if (clamp_control) {
wf_unregister_control(clamp_control);
- dev_pm_qos_remove_request(&qos_req);
+ freq_qos_remove_request(&qos_req);
}
}
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{
- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
}
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
{
struct dm_cache_migration *mg;
- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
- if (!mg)
- return NULL;
+ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
memset(mg, 0, sizeof(*mg));
struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
- if (!cell_prealloc) {
- defer_bio(cache, bio);
- return false;
- }
build_key(oblock, end, &key);
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
- mg_complete(mg, false);
- return -ENOMEM;
- }
/*
* Prevent writes to the block, but allow reads to continue.
}
mg = alloc_migration(cache);
- if (!mg) {
- policy_complete_background_work(cache->policy, op, false);
- background_work_end(cache);
- return -ENOMEM;
- }
mg->op = op;
mg->overwrite_bio = bio;
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- invalidate_complete(mg, false);
- return -ENOMEM;
- }
build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
r = dm_cell_lock_v2(cache->prison, &key,
return -EPERM;
mg = alloc_migration(cache);
- if (!mg) {
- background_work_end(cache);
- return -ENOMEM;
- }
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
*
* NOTE: Must be called with the bucket lock held
*/
-struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
- unsigned long region_nr)
+static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
+ unsigned long region_nr)
{
struct dm_clone_region_hydration *hd;
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
#include "dm.h"
/* The on disk metadata handler */
struct dm_exception_store *store;
- /* Maximum number of in-flight COW jobs. */
- struct semaphore cow_count;
+ unsigned in_progress;
+ struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
*/
#define DEFAULT_COW_THRESHOLD 2048
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
goto bad_hash_tables;
}
- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+ init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
dm_put_device(ti, s->origin);
+ WARN_ON(s->in_progress);
+
kfree(s);
}
+static void account_start_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ s->in_progress++;
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ BUG_ON(!s->in_progress);
+ s->in_progress--;
+ if (likely(s->in_progress <= cow_threshold) &&
+ unlikely(waitqueue_active(&s->in_progress_wait)))
+ wake_up_locked(&s->in_progress_wait);
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+ if (unlikely(s->in_progress > cow_threshold)) {
+ spin_lock(&s->in_progress_wait.lock);
+ if (likely(s->in_progress > cow_threshold)) {
+ /*
+ * NOTE: this throttle doesn't account for whether
+ * the caller is servicing an IO that will trigger a COW
+ * so excess throttling may result for chunks not required
+ * to be COW'd. But if cow_threshold was reached, extra
+ * throttling is unlikely to negatively impact performance.
+ */
+ DECLARE_WAITQUEUE(wait, current);
+ __add_wait_queue(&s->in_progress_wait, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&s->in_progress_wait.lock);
+ if (unlock_origins)
+ up_read(&_origins_lock);
+ io_schedule();
+ remove_wait_queue(&s->in_progress_wait, &wait);
+ return false;
+ }
+ spin_unlock(&s->in_progress_wait.lock);
+ }
+ return true;
+}
+
/*
* Flush a list of buffers.
*/
}
}
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- r = do_origin(s->origin, bio);
+ r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
- up(&s->cow_count);
+ account_end_copy(s);
}
/*
dest.count = src.count;
/* Hand over to kcopyd */
- down(&s->cow_count);
+ account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
- down(&s->cow_count);
+ account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
struct bio *bio = context;
struct dm_snapshot *s = bio->bi_private;
- up(&s->cow_count);
+ account_end_copy(s);
bio->bi_status = write_err ? BLK_STS_IOERR : 0;
bio_endio(bio);
}
dest.sector = bio->bi_iter.bi_sector;
dest.count = s->store->chunk_size;
- down(&s->cow_count);
+ account_start_copy(s);
WARN_ON_ONCE(bio->bi_private);
bio->bi_private = s;
dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
if (!s->valid)
return DM_MAPIO_KILL;
+ if (bio_data_dir(bio) == WRITE) {
+ while (unlikely(!wait_for_in_progress(s, false)))
+ ; /* wait_for_in_progress() has slept */
+ }
+
down_read(&s->lock);
dm_exception_table_lock(&lock);
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
- return do_origin(s->origin, bio);
+ return do_origin(s->origin, bio, false);
}
out_unlock:
/*
* Called on a write from the origin driver.
*/
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
+again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
- if (o)
+ if (o) {
+ if (limit) {
+ struct dm_snapshot *s;
+ list_for_each_entry(s, &o->snapshots, list)
+ if (unlikely(!wait_for_in_progress(s, true)))
+ goto again;
+ }
+
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+ }
up_read(&_origins_lock);
return r;
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
- return do_origin(o->dev, bio);
+ return do_origin(o->dev, bio, true);
}
/*
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -ENOTSUPP;
goto abort;
}
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
- goto err_out;
+ goto err_out_int;
}
jm = kzalloc(sizeof(struct jmb38x_ms)
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
mt6397_irq_resume);
+struct chip_data {
+ u32 cid_addr;
+ u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+ .cid_addr = MT6323_CID,
+ .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+ .cid_addr = MT6397_CID,
+ .cid_shift = 0,
+};
+
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
unsigned int id;
struct mt6397_chip *pmic;
+ const struct chip_data *pmic_core;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
if (!pmic->regmap)
return -ENODEV;
- platform_set_drvdata(pdev, pmic);
+ pmic_core = of_device_get_match_data(&pdev->dev);
+ if (!pmic_core)
+ return -ENODEV;
- ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+ ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
if (ret) {
- dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
return ret;
}
+ pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+ platform_set_drvdata(pdev, pmic);
+
pmic->irq = platform_get_irq(pdev, 0);
if (pmic->irq <= 0)
return pmic->irq;
- switch (id & 0xff) {
- case MT6323_CHIP_ID:
- pmic->int_con[0] = MT6323_INT_CON0;
- pmic->int_con[1] = MT6323_INT_CON1;
- pmic->int_status[0] = MT6323_INT_STATUS0;
- pmic->int_status[1] = MT6323_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
+ ret = mt6397_irq_init(pmic);
+ if (ret)
+ return ret;
+ switch (pmic->chip_id) {
+ case MT6323_CHIP_ID:
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
ARRAY_SIZE(mt6323_devs), NULL,
0, pmic->irq_domain);
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
- pmic->int_con[0] = MT6397_INT_CON0;
- pmic->int_con[1] = MT6397_INT_CON1;
- pmic->int_status[0] = MT6397_INT_STATUS0;
- pmic->int_status[1] = MT6397_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
-
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
ARRAY_SIZE(mt6397_devs), NULL,
0, pmic->irq_domain);
break;
default:
- dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+ dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
return -ENODEV;
}
}
static const struct of_device_id mt6397_of_match[] = {
- { .compatible = "mediatek,mt6397" },
- { .compatible = "mediatek,mt6323" },
- { }
+ {
+ .compatible = "mediatek,mt6323",
+ .data = &mt6323_core,
+ }, {
+ .compatible = "mediatek,mt6397",
+ .data = &mt6397_core,
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mt6397_of_match);
cq_host->slot[tag].flags = 0;
cq_host->qcnt += 1;
-
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
pr_debug("%s: cqhci: doorbell not set for tag %d\n",
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
#include <linux/highmem.h>
#include <linux/clk.h>
#include <linux/err.h>
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
host->data = data;
ssp->dma_dir = dma_data_dir;
ssp->slave_dirn = slave_dirn;
- desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
struct tmio_mmc_dma *dma_priv;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
+ int num_irqs, irq, ret, i;
struct resource *res;
- int irq, ret, i;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
host->hs400_complete = renesas_sdhi_hs400_complete;
}
- i = 0;
- while (1) {
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ ret = num_irqs;
+ goto eirq;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!num_irqs) {
+ ret = -ENXIO;
+ goto eirq;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
+ if (irq < 0) {
+ ret = irq;
+ goto eirq;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
-
dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.ops = &sdhci_iproc_32only_ops,
};
* on temperature
*/
if (temperature < -20000)
- phase_delay = min(max_window + 4 * max_len - 24,
+ phase_delay = min(max_window + 4 * (max_len - 1) - 24,
max_window +
DIV_ROUND_UP(13 * max_len, 16) * 4);
else if (temperature < 20000)
const char *name;
irq[0] = platform_get_irq(pdev, 0);
- irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0) {
- dev_err(dev, "Get irq error\n");
+ irq[1] = platform_get_irq_optional(pdev, 1);
+ if (irq[0] < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; idx < old_arr->count; idx++) {
+ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
loc = B53_EG_MIR_CTL;
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
- reg &= ~MIRROR_MASK;
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
{
int i;
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz8795_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
static int ksz8795_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz8795_regmap_config
- [i]);
+ rc = ksz8795_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
static int ksz9477_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *i2c_id)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_i2c(i2c,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&i2c->dev,
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2018 Microchip Technology Inc.
static int ksz9477_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
}
mutex_init(&dev->dev_mutex);
- mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->regmap_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip switch driver common header
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
*/
const char *name;
struct mutex dev_mutex; /* device access */
- struct mutex stats_mutex; /* status access */
+ struct mutex regmap_mutex; /* regmap access */
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
const struct ksz_dev_ops *dev_ops;
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
+static inline void ksz_regmap_lock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_unlock(mtx);
+}
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
.write_flag_mask = \
KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
regbits, regpad), \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
.reg_format_endian = REGMAP_ENDIAN_BIG, \
.val_format_endian = REGMAP_ENDIAN_BIG \
}
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_H
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_DYNAMIC_CONFIG_H
#define _SJA1105_DYNAMIC_CONFIG_H
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_PTP_H
#define _SJA1105_PTP_H
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_TAS_H
#define _SJA1105_TAS_H
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic_set_packet_filter(aq_nic, ndev->flags);
-
- aq_nic_set_multicast_list(aq_nic, ndev);
+ (void)aq_nic_set_multicast_list(aq_nic, ndev);
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
- unsigned int packet_filter = self->packet_filter;
+ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ unsigned int packet_filter = ndev->flags;
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
+ int err = 0;
self->mc_list.count = 0;
if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
} else {
netdev_for_each_uc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
}
}
- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_ALLMULTI;
- } else {
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
+ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+ if (cfg->is_mc_list_enabled) {
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
+ } else {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++],
+ ha->addr);
+ }
}
}
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ err = hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ return err;
}
return aq_nic_set_packet_filter(self, packet_filter);
}
break;
buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
err = 0;
goto err_exit;
}
- if (buff->is_error) {
+ if (buff->is_error || buff->is_cso_err) {
buff_ = buff;
do {
next_ = buff_->next,
cfg->is_vlan_force_promisc);
hw_atl_rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST));
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
- cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
+ int err;
+ u32 val;
+
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
- hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
- return aq_hw_err_from_flags(self);
+ err = aq_hw_err_from_flags(self);
+
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
{
+ u32 val;
+
+ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
- init);
+ val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
-/* set rdm rx dma descriptor cache init */
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
/* Convert PHY temperature from 1/256 degree Celsius
* to 1/1000 degree Celsius.
*/
- *temp = temp_res * 1000 / 256;
+ *temp = (temp_res & 0xFFFF) * 1000 / 256;
return 0;
}
default y
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
- select DIMLIB
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
select MII
select PHYLIB
select FIXED_PHY
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
select LIBCRC32C
select NET_DEVLINK
select PAGE_POOL
+ select DIMLIB
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
*/
if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ int0_enable |= UMAC_IRQ_PHY_DET_R;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
priv->irq0_stat = 0;
spin_unlock_irq(&priv->lock);
+ if (status & UMAC_IRQ_PHY_DET_R &&
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+ phy_init_hw(priv->dev->phydev);
+
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT) {
- priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->dev->phydev);
- }
+
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
}
/* all other interested interrupts handled in bottom half */
- status &= UMAC_IRQ_LINK_EVENT;
+ status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
if (status) {
/* Save irq status for bottom-half processing. */
spin_lock_irqsave(&priv->lock, flags);
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ ret = bcmgenet_mii_connect(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_clk_disable;
+ }
+
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ ret = bcmgenet_mii_config(dev, true);
+ if (ret) {
+ netdev_err(dev, "unsupported PHY\n");
+ goto err_disconnect_phy;
+ }
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
ret = bcmgenet_init_dma(priv);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
- goto err_clk_disable;
+ goto err_disconnect_phy;
}
/* Always enable ring 16 - descriptor ring */
goto err_irq0;
}
- ret = bcmgenet_mii_probe(dev);
- if (ret) {
- netdev_err(dev, "failed to connect to PHY\n");
- goto err_irq1;
- }
-
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
-err_irq1:
- free_irq(priv->irq1, priv);
err_irq0:
free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv);
+err_disconnect_phy:
+ phy_disconnect(dev->phydev);
err_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ phy_init_hw(dev->phydev);
+
bcmgenet_umac_reset(priv);
init_umac(priv);
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(dev->phydev);
-
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev, false);
#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN_V123 (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_connect(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
-int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
bcmgenet_fixed_phy_link_update);
}
+int bcmgenet_mii_connect(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct phy_device *phydev;
+ u32 phy_flags = 0;
+ int ret;
+
+ /* Communicate the integrated PHY revision */
+ if (priv->internal_phy)
+ phy_flags = priv->gphy_rev;
+
+ /* Initialize link state variables that bcmgenet_mii_setup() uses */
+ priv->old_link = -1;
+ priv->old_speed = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+
+ if (dn) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
+ reg |= id_mode_dis;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ reg |= RGMII_MODE_EN_V123;
+ else
+ reg |= RGMII_MODE_EN;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
-
- return 0;
-}
-
-int bcmgenet_mii_probe(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
- struct phy_device *phydev;
- u32 phy_flags;
- int ret;
-
- /* Communicate the integrated PHY revision */
- phy_flags = priv->gphy_rev;
-
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
- if (dn) {
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- } else {
- phydev = dev->phydev;
- phydev->dev_flags = phy_flags;
+ if (init) {
+ linkmode_copy(phydev->advertising, phydev->supported);
- ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
- if (ret) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- }
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
+ */
+ if (priv->internal_phy && !GENET_IS_V5(priv))
+ phydev->irq = PHY_IGNORE_INTERRUPT;
- /* Configure port multiplexer based on what the probed PHY device since
- * reading the 'max-speed' property determines the maximum supported
- * PHY speed which is needed for bcmgenet_mii_config() to configure
- * things appropriately.
- */
- ret = bcmgenet_mii_config(dev, true);
- if (ret) {
- phy_disconnect(dev->phydev);
- return ret;
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
- /* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
- * that prevents the signaling of link UP interrupts when
- * the link operates at 10Mbps, so fallback to polling for
- * those versions of GENET.
- */
- if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
-
return 0;
}
return err;
}
- *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
+ return PTR_ERR(*tx_clk);
- *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+ *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
+ return PTR_ERR(*rx_clk);
- *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
if (IS_ERR(*tsu_clk))
- *tsu_clk = NULL;
+ return PTR_ERR(*tsu_clk);
err = clk_prepare_enable(*pclk);
if (err) {
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
* Copyright (c) 2003-2015, 2017 Cavium, Inc.
*/
priv->rx_td_enabled = enable;
}
+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
+
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
goto out;
if (state.up) {
+ update_tx_fqids(priv);
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
} else {
return 0;
}
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_queue_id qid = {0};
+ struct dpaa2_eth_fq *fq;
+ struct dpni_queue queue;
+ int i, j, err;
+
+ /* We only use Tx FQIDs for FQID-based enqueue, so check
+ * if DPNI version supports it before updating FQIDs
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ return;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, j, fq->flowid,
+ &queue, &qid);
+ if (err)
+ goto out_err;
+
+ fq->tx_fqid[j] = qid.fqid;
+ if (fq->tx_fqid[j] == 0)
+ goto out_err;
+ }
+ }
+
+ priv->enqueue = dpaa2_eth_enqueue_fq;
+
+ return;
+
+out_err:
+ netdev_info(priv->net_dev,
+ "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+ set_mac_addr(netdev_priv(net_dev));
+
return IRQ_HANDLED;
}
}
err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
- DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+ DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
if (err < 0) {
dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
goto free_irq;
*/
#define DPNI_IRQ_INDEX 0
/**
- * IRQ event - indicates a change in link state
+ * IRQ events:
+ * indicates a change in link state
+ * indicates a change in endpoint
*/
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
#define HNAE3_MOD_VERSION "1.0"
+#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msi = hdev->num_roce_msi;
+
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ hdev->num_nic_msi = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
}
return 0;
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
return 0;
}
int vectors;
int i;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
hdev->roce_base_msix_offset;
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
+ u16 num_nic_msi; /* Num of nic vectors for this PF */
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* if user not set rss, the rss_size should compare with the
+ * valid msi numbers to ensure one to one map between tqp and
+ * irq as default.
+ */
+ if (!kinfo->req_rss_size)
+ max_rss_size = min_t(u16, max_rss_size,
+ (hdev->num_nic_msi - 1) /
+ kinfo->num_tc);
+
/* Set to the maximum specification value (max_rss_size). */
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
- kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
}
kinfo->tqp[i] = &hdev->htqp[i].q;
}
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
+ * and rss size with the actual vector numbers
+ */
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+ kinfo->rss_size);
+
return 0;
}
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
int vectors;
int i;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ if (hnae3_dev_roce_supported(hdev))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
hdev->num_msi,
PCI_IRQ_MSIX);
else
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
req = (struct hclgevf_query_res_cmd *)desc.data;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msix = hdev->num_roce_msix;
+
/* VF should have NIC vectors and Roce vectors, NIC vectors
* are queued before Roce vectors. The offset is fixed to 64.
*/
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ hdev->num_nic_msix = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for vf(min:2).\n",
+ hdev->num_nic_msix);
+ return -EINVAL;
}
return 0;
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
u16 num_roce_msix; /* Num of roce vectors for this VF */
u16 roce_base_msix_offset;
int roce_base_vector;
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
+
#define DMA_WBACK(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
unregister_netdev (dev);
dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
free_netdev (dev);
return 0;
}
dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
&lp->dma_addr, GFP_KERNEL,
- DMA_ATTR_NON_CONSISTENT);
+ LIB82596_DMA_ATTR);
if (!dma) {
printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
return -ENOMEM;
i = register_netdev(dev);
if (i) {
dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ dma, lp->dma_addr, LIB82596_DMA_ATTR);
return i;
}
static const char sni_82596_string[] = "snirm_82596";
+#define LIB82596_DMA_ATTR 0
+
#define DMA_WBACK(priv, addr, len) do { } while (0)
#define DMA_INV(priv, addr, len) do { } while (0)
#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
unregister_netdev(dev);
dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
+ struct irq_desc *desc = irq_to_desc(scrq->irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
- rc = plpar_hcall_norets(H_EOI, val);
- if (rc)
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ chip->irq_eoi(&desc->irq_data);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
ge_mode = 0;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
* it means that all the previous stes are the same,
* if so, this rule is duplicated.
*/
- if (mlx5dr_ste_is_last_in_rule(nic_matcher,
- matched_ste->ste_chain_location)) {
- mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
- return NULL;
- }
- return matched_ste;
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+ return matched_ste;
+
+ mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
}
if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
devlink = priv_to_devlink(mlxsw_sp->core);
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
local_port);
+ skb_push(skb, ETH_HLEN);
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
consume_skb(skb);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
- /* Reset the ethernet controller */
- __lpc_eth_reset(pldat);
-
/* then shut everything down to save power */
__lpc_eth_shutdown(pldat);
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
+#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
+#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
{
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
int i, q_num;
ionic_get_lif_stats(lif, &lif_stats);
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txstats = &lif_to_txstats(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+ **buf = IONIC_READ_STAT64(txstats,
&ionic_tx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txqcq->stats->tx.sg_cntr[i];
+ **buf = txstats->sg_cntr[i];
(*buf)++;
}
}
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxstats = &lif_to_rxstats(lif, q_num);
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+ **buf = IONIC_READ_STAT64(rxstats,
&ionic_rx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
rtl_lock_config_regs(tp);
}
+static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
+{
+ if (mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
+}
+
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN) {
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
- PCI_EXP_DEVCTL_NOSNOOP_EN);
- }
}
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
}
rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
/* Magic. */
RTL_W8(tp, DBG_REG, 0x20);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
/* Reset tx FIFO pointer */
rtl_ephy_init(tp, e_info_8168e_2);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
+ rtl_jumbo_config(tp, tp->dev->mtu);
+
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R16(tp, CPlusCmd);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
{
struct rtl8169_private *tp = netdev_priv(dev);
- if (new_mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
+ rtl_jumbo_config(tp, new_mtu);
dev->mtu = new_mtu;
netdev_update_features(dev);
}
}
} else {
- netdev_info(dev, "Too many address, switching to promiscuous\n");
+ if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
v = EMAC_FRM_FLT_RXALL;
}
value |= GMAC_PACKET_FILTER_HPF;
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
if (!enable) {
val |= PPSCMDx(index, 0x5);
+ val |= PPSEN0;
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
}
if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable(priv->plat->pclk);
- clk_disable(priv->plat->stmmac_clk);
+ if (priv->plat->clk_ptp_ref)
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
}
mutex_unlock(&priv->lock);
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk previously disabled */
- clk_enable(priv->plat->stmmac_clk);
- clk_enable(priv->plat->pclk);
+ clk_prepare_enable(priv->plat->stmmac_clk);
+ clk_prepare_enable(priv->plat->pclk);
+ if (priv->plat->clk_ptp_ref)
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
/* structure describing a PTP hardware clock */
static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
- .name = "stmmac_ptp_clock",
+ .name = "stmmac ptp",
.max_adj = 62500000,
.n_alarm = 0,
.n_ext_ts = 0,
static int stmmac_test_hfilt(struct stmmac_priv *priv)
{
- unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
- unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+ unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+ unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
struct stmmac_packet_attrs attr = { };
int ret;
if (ret)
return ret;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
ret = dev_mc_add(priv->dev, gd_addr);
if (ret)
return ret;
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all MC addresses */
__dev_mc_unsync(priv->dev, NULL);
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all UC addresses */
__dev_uc_unsync(priv->dev, NULL);
return NULL;
}
-struct {
+static struct {
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry);
} tc_flow_parsers[] = {
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{
int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
int free_rx_num = 0, free_tx_num = 0;
void nsim_fib_exit(void)
{
- unregister_pernet_subsys(&nsim_fib_net_ops);
unregister_fib_notifier(&nsim_fib_nb);
+ unregister_pernet_subsys(&nsim_fib_net_ops);
}
int nsim_fib_init(void)
err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
if (err < 0) {
pr_err("Failed to register fib notifier\n");
+ unregister_pernet_subsys(&nsim_fib_net_ops);
goto err_out;
}
.name = _name, \
/* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
+ .soft_reset = genphy_soft_reset, \
.config_init = bcm7xxx_config_init, \
.suspend = bcm7xxx_suspend, \
.resume = bcm7xxx_config_init, \
return genphy_config_aneg(phydev);
}
+static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
+ const u32 ksz_phy_id)
+{
+ int ret;
+
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+ return 0;
+
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+
+ /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
+ * exact PHY ID. However, they can be told apart by the extended
+ * capability registers presence. The KSZ8051 PHY has them while
+ * the switch does not.
+ */
+ ret &= BMSR_ERCAP;
+ if (ksz_phy_id == PHY_ID_KSZ8051)
+ return ret;
+ else
+ return !ret;
+}
+
+static int ksz8051_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+}
+
static int ksz8081_config_init(struct phy_device *phydev)
{
/* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
return kszphy_config_init(phydev);
}
+static int ksz8795_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8051,
- .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8051_type,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
+ .match_phy_device = ksz8051_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8795,
- .phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ8795",
+ .name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .match_phy_device = ksz8795_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
{
int val;
+ linkmode_zero(phydev->lp_advertising);
+
val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
if (val < 0)
return val;
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
- /* Invalidate LP advertising flags */
- linkmode_zero(phydev->lp_advertising);
-
err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
{
int lpa, lpagb;
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ 0);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ return 0;
+ }
+
if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
}
return 0;
/**
* phylink_create() - create a phylink instance
- * @ndev: a pointer to the &struct net_device
+ * @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
* interface
* @iface: the desired link mode defined by &typedef phy_interface_t
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
- if (e->queue_index != queue_index)
- e->queue_index = queue_index;
+ if (READ_ONCE(e->queue_index) != queue_index)
+ WRITE_ONCE(e->queue_index, queue_index);
if (e->updated != jiffies)
e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
int intr_completed;
struct usb_endpoint_descriptor *endp;
struct urb *urb;
- struct hso_serial_state_notification serial_state_notification;
+ struct hso_serial_state_notification *serial_state_notification;
u16 prev_UART_state_bitmap;
struct uart_icount icount;
};
usb_rcvintpipe(usb,
tiocmget->endp->
bEndpointAddress & 0x7F),
- &tiocmget->serial_state_notification,
+ tiocmget->serial_state_notification,
sizeof(struct hso_serial_state_notification),
tiocmget_intr_callback, serial,
tiocmget->endp->bInterval);
/* wIndex should be the USB interface number of the port to which the
* notification applies, which should always be the Modem port.
*/
- serial_state_notification = &tiocmget->serial_state_notification;
+ serial_state_notification = tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
usb_free_urb(tiocmget->urb);
tiocmget->urb = NULL;
serial->tiocmget = NULL;
+ kfree(tiocmget->serial_state_notification);
+ tiocmget->serial_state_notification = NULL;
kfree(tiocmget);
}
}
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ serial->tiocmget->serial_state_notification
+ = kzalloc(sizeof(struct hso_serial_state_notification),
+ GFP_KERNEL);
/* it isn't going to break our heart if serial->tiocmget
* allocation fails don't bother checking this.
*/
- if (serial->tiocmget) {
+ if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
tiocmget = serial->tiocmget;
tiocmget->endp = hso_get_ep(interface,
USB_ENDPOINT_XFER_INT,
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
+ ret = lan78xx_phy_init(dev);
+ if (ret < 0)
+ goto out4;
+
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out4;
+ goto out5;
}
usb_set_intfdata(intf, dev);
pm_runtime_set_autosuspend_delay(&udev->dev,
DEFAULT_AUTOSUSPEND_DELAY);
- ret = lan78xx_phy_init(dev);
- if (ret < 0)
- goto out5;
-
return 0;
out5:
- unregister_netdev(netdev);
+ phy_disconnect(netdev->phydev);
out4:
usb_free_urb(dev->urb_intr);
out3:
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
- __le16 res;
+ __le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);
return ret;
}
- if (!uart_print && ar->hw_params.uart_pin_workaround) {
- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
- ar->hw_params.uart_pin);
- if (ret) {
- ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
- return ret;
+ if (!uart_print) {
+ if (ar->hw_params.uart_pin_workaround) {
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+ ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "failed to set UART TX pin: %d",
+ ret);
+ return ret;
+ }
}
return 0;
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
if (new_page)
__free_page(new_page);
}
+ kfree(table);
return NULL;
}
alloc_size = min_t(int, size, PAGE_SIZE);
*/
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return ofs + trans->cfg->trans.umac_prph_offset;
+ return ofs + trans->trans_cfg->umac_prph_offset;
}
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
return iwl_read_prph_no_grab(trans, ofs +
- trans->cfg->trans.umac_prph_offset);
+ trans->trans_cfg->umac_prph_offset);
}
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
+ return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
}
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset,
+ iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
val);
}
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val);
+ iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
}
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
return iwl_poll_prph_bit(trans, addr +
- trans->cfg->trans.umac_prph_offset,
+ trans->trans_cfg->umac_prph_offset,
bits, mask, timeout);
}
};
int ret;
+ if (mvm->trans->cfg->tx_with_siso_diversity)
+ init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
lockdep_assert_held(&mvm->mutex);
mvm->rfkill_safe_init_done = false;
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
mvm->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
* firmware versions. Unfortunately, we don't have a TLV API
* flag to rely on, so rely on the major version which is in
* the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to29 and 17.
- * The intention was to have it in 36 as well, but not all
- * 8000 family got this feature enabled. The 8000 family is
- * the only one using version 36, so skip this version
- * entirely.
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+ ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
enabled = &wifi_pkg->package.elements[1];
if (enabled->type != ACPI_TYPE_INTEGER ||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
if (!iwl_mvm_has_new_rx_api(mvm))
return;
- notif->cookie = mvm->queue_sync_cookie;
-
- if (notif->sync)
+ if (notif->sync) {
+ notif->cookie = mvm->queue_sync_cookie;
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
+ }
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
size, !notif->sync);
out:
atomic_set(&mvm->queue_sync_counter, 0);
- mvm->queue_sync_cookie++;
+ if (notif->sync)
+ mvm->queue_sync_cookie++;
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
- if (ret) {
- dma_free_coherent(trans->dev,
- sizeof(*prph_scratch),
- prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- return ret;
- }
+ if (ret)
+ goto err_free_prph_scratch;
+
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
- if (!prph_info)
- return -ENOMEM;
+ if (!prph_info) {
+ ret = -ENOMEM;
+ goto err_free_prph_scratch;
+ }
/* Allocate context info */
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
sizeof(*ctxt_info_gen3),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
- if (!ctxt_info_gen3)
- return -ENOMEM;
+ if (!ctxt_info_gen3) {
+ ret = -ENOMEM;
+ goto err_free_prph_info;
+ }
ctxt_info_gen3->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
+
+err_free_prph_info:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_info),
+ prph_info,
+ trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 tfdidx;
u32 caplen, cmdlen;
+ if (trans->trans_cfg->use_tfh)
+ tfdidx = idx;
+ else
+ tfdidx = ptr;
+
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
- cmdq->tfds +
- tfd_size * ptr);
+ (u8 *)cmdq->tfds +
+ tfd_size * tfdidx);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
spin_lock_init(&trans_pcie->reg_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!trans_pcie->rba.alloc_wq) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->tso_hdr_page) {
ret = -ENOMEM;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
mutex_init(&trans_pcie->fw_mon_data.mutex);
iwl_pcie_free_ict(trans);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
if (err < 0) {
rtnl_unlock();
- goto out_free_radios;
+ goto out_free_mon;
}
err = register_netdevice(hwsim_mon);
#include <linux/leds.h>
#include <linux/mutex.h>
#include <linux/etherdevice.h>
-#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
#include <linux/average.h>
{
struct rt2x00debug_intf *intf = file->private_data;
struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
- static unsigned long last_reset;
+ static unsigned long last_reset = INITIAL_JIFFIES;
if (!rt2x00_has_cap_restart_hw(rt2x00dev))
return -EOPNOTSUPP;
xenvif_unmap_frontend_data_rings(queue);
netif_napi_del(&queue->napi);
err:
- module_put(THIS_MODULE);
return err;
}
rc = pn533_finalize_setup(priv);
if (rc)
- goto error;
+ goto err_deregister;
usb_set_intfdata(interface, phy);
return 0;
+err_deregister:
+ pn533_unregister_device(phy->priv);
error:
+ usb_kill_urb(phy->in_urb);
+ usb_kill_urb(phy->out_urb);
+ usb_kill_urb(phy->ack_urb);
+
usb_free_urb(phy->in_urb);
usb_free_urb(phy->out_urb);
usb_free_urb(phy->ack_urb);
usb_put_dev(phy->udev);
kfree(in_buf);
+ kfree(phy->ack_buffer);
return rc;
}
/*
* Only new queue scan work when admin and IO queues are both alive
*/
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
queue_work(nvme_wq, &ctrl->scan_work);
}
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->state != NVME_CTRL_RESETTING)
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
ret = -ENETRESET;
}
old_state = ctrl->state;
switch (new_state) {
- case NVME_CTRL_ADMIN_ONLY:
- switch (old_state) {
- case NVME_CTRL_CONNECTING:
- changed = true;
- /* FALLTHRU */
- default:
- break;
- }
- break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
break;
}
- if (changed)
+ if (changed) {
ctrl->state = new_state;
+ wake_up_all(&ctrl->state_wq);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (changed && ctrl->state == NVME_CTRL_LIVE)
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+ wait_event(ctrl->state_wq,
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+ nvme_state_terminal(ctrl));
+ return ctrl->state == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
static void nvme_free_ns_head(struct kref *ref)
{
struct nvme_ns_head *head =
if (ns->disk && nvme_revalidate_disk(ns->disk))
nvme_set_queue_dying(ns);
up_read(&ctrl->namespaces_rwsem);
-
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_unfreeze(ctrl);
nvme_mpath_unfreeze(ctrl->subsys);
mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
switch (ctrl->state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
break;
default:
return -EWOULDBLOCK;
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
- [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
struct nvme_id_ctrl *id;
unsigned nn;
- if (ctrl->state != NVME_CTRL_LIVE)
+ /* No tagset on a live ctrl means IO queues could not created */
+ if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
- WARN_ON_ONCE(!ctrl->tagset);
-
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
if (time_after(jiffies, fw_act_timeout)) {
dev_warn(ctrl->device,
"Fw activation timeout, reset controller\n");
- nvme_reset_ctrl(ctrl);
- break;
+ nvme_try_sched_reset(ctrl);
+ return;
}
msleep(100);
}
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_start_queues(ctrl);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- queue_work(nvme_wq, &ctrl->fw_act_work);
+ /*
+ * We are (ab)using the RESETTING state to prevent subsequent
+ * recovery actions from interfering with the controller's
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ init_waitqueue_head(&ctrl->state_wq);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE ||
- ctrl->state == NVME_CTRL_ADMIN_ONLY))
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
+#include <linux/wait.h>
#include <trace/events/block.h>
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
- NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
+ wait_queue_head_t state_wq;
struct nvme_subsystem *subsys;
struct list_head subsys_entry;
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+ unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
+ unsigned int first_prp_len = dev->ctrl.page_size - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
return true;
}
-/*
- * return error value only when tagset allocation failed
- */
-static int nvme_dev_add(struct nvme_dev *dev)
+static void nvme_dev_add(struct nvme_dev *dev)
{
int ret;
if (ret) {
dev_warn(dev->ctrl.device,
"IO queues tagset allocation failed %d\n", ret);
- return ret;
+ return;
}
dev->ctrl.tagset = &dev->tagset;
} else {
}
nvme_dbbuf_set(dev);
- return 0;
}
static int nvme_pci_enable(struct nvme_dev *dev)
mutex_unlock(&dev->shutdown_lock);
}
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+ if (!nvme_wait_reset(&dev->ctrl))
+ return -EBUSY;
+ nvme_dev_disable(dev, shutdown);
+ return 0;
+}
+
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
dma_pool_destroy(dev->prp_small_pool);
}
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+ if (dev->tagset.tags)
+ blk_mq_free_tag_set(&dev->tagset);
+ dev->ctrl.tagset = NULL;
+}
+
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
put_device(dev->dev);
- if (dev->tagset.tags)
- blk_mq_free_tag_set(&dev->tagset);
+ nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
{
+ /*
+ * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+ * may be holding this pci_dev's device lock.
+ */
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
nvme_kill_queues(&dev->ctrl);
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
result = -ENODEV;
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_free_tagset(dev);
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- /* hit this only when allocate tagset fails */
- if (nvme_dev_add(dev))
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_dev_add(dev);
nvme_unfreeze(&dev->ctrl);
}
* If only admin queue live, keep it to do further investigation or
* recovery.
*/
- if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
dev_warn(dev->ctrl.device,
- "failed to mark controller state %d\n", new_state);
+ "failed to mark controller live state\n");
result = -ENODEV;
goto out;
}
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- *val = readq(to_nvme_dev(ctrl)->bar + off);
+ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
return 0;
}
static void nvme_reset_prepare(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, false);
+
+ /*
+ * We don't need to check the return value from waiting for the reset
+ * state as pci_dev device lock is held, making it impossible to race
+ * with ->remove().
+ */
+ nvme_disable_prepare_reset(dev, false);
+ nvme_sync_queues(&dev->ctrl);
}
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl_sync(&dev->ctrl);
+
+ if (!nvme_try_sched_reset(&dev->ctrl))
+ flush_work(&dev->ctrl.reset_work);
}
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, true);
+ nvme_disable_prepare_reset(dev, true);
}
/*
if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
- nvme_reset_ctrl(ctrl);
+ return nvme_try_sched_reset(&ndev->ctrl);
return 0;
}
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
- (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
- nvme_dev_disable(ndev, true);
- return 0;
- }
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+ return nvme_disable_prepare_reset(ndev, true);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
nvme_sync_queues(ctrl);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
goto unfreeze;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
* Clearing npss forces a controller reset on resume. The
* correct value will be resdicovered then.
*/
- nvme_dev_disable(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
- ret = 0;
}
unfreeze:
nvme_unfreeze(ctrl);
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
-
- nvme_dev_disable(ndev, true);
- return 0;
+ return nvme_disable_prepare_reset(ndev, true);
}
static int nvme_simple_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&ndev->ctrl);
- return 0;
+ return nvme_try_sched_reset(&ndev->ctrl);
}
static const struct dev_pm_ops nvme_dev_pm_ops = {
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
* Teardown immediately if controller times out while starting
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
queue->sock->sk->sk_ll_usec = 1;
+#endif
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
dev_warn(ctrl->ctrl.device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
ret = nvme_tcp_map_data(queue, rq);
if (unlikely(ret)) {
+ nvme_cleanup_cmd(rq);
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
return ret;
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl, SG_CHUNK_SIZE))
+ iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+ nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE;
+ }
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
if (!target)
return -ENODEV;
- if (!of_device_is_available(target))
+ if (!of_device_is_available(target)) {
+ of_node_put(target);
return 0;
+ }
rmem = __find_rmem(target);
of_node_put(target);
of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ kfree(unittest_data);
return -ENODATA;
}
goto free_regulators;
}
- ret = regulator_enable(reg);
- if (ret < 0) {
- regulator_put(reg);
- goto free_regulators;
- }
-
opp_table->regulators[i] = reg;
}
return opp_table;
free_regulators:
- while (i--) {
- regulator_disable(opp_table->regulators[i]);
- regulator_put(opp_table->regulators[i]);
- }
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- for (i = opp_table->regulator_count - 1; i >= 0; i--) {
- regulator_disable(opp_table->regulators[i]);
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- }
_free_set_opp_data(opp_table);
{
struct dev_pm_opp *opp;
- lockdep_assert_held(&opp_table_lock);
-
mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
return 0;
}
+ /*
+ * Re-initialize list_kref every time we add static OPPs to the OPP
+ * table as the reference count may be 0 after the last tie static OPPs
+ * were removed.
+ */
+ kref_init(&opp_table->list_kref);
+
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
opp = _opp_add_static_v2(opp_table, dev, np);
return(0);
}
- /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
- * first, then fall back to 32-bit if that fails.
- * We are just "encouraging" 32-bit DMA masks here since we can
- * never allow IOMMU bypass unless we add special support for ZX1.
- */
- if (mask > ~0U)
- return 0;
-
ioc = GET_IOC(dev);
if (!ioc)
return 0;
pci_update_current_state(dev, dev->current_state);
}
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- if (platform_pci_power_manageable(dev))
- platform_pci_set_power_state(dev, PCI_D0);
-
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
-}
-
/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
}
EXPORT_SYMBOL(pci_set_power_state);
+/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ __pci_start_power_transition(dev, PCI_D0);
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
/**
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
#define K25 7
SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
FUNC_GROUP_DECL(MACLINK4, K25);
#define AD11 206
SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 14));
+ SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
#define AF10 207
SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 15));
+ SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
#define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
#define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
#define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
#define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
#define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
#define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
#define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
#define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
#define Y1 240
SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
#define Y2 241
SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
#define Y3 242
SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
#define Y4 243
SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
* FIXME: Confirm bits and priorities are the right way around for the
* following 4 pins
*/
#define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
#define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
FUNC_GROUP_DECL(FSI1, AF25, AE26);
#define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
#define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
+ ASPEED_PINCTRL_PIN(AA16),
+ ASPEED_PINCTRL_PIN(AA17),
ASPEED_PINCTRL_PIN(AA23),
ASPEED_PINCTRL_PIN(AA24),
ASPEED_PINCTRL_PIN(AA25),
ASPEED_PINCTRL_PIN(AB11),
ASPEED_PINCTRL_PIN(AB12),
ASPEED_PINCTRL_PIN(AB15),
+ ASPEED_PINCTRL_PIN(AB16),
+ ASPEED_PINCTRL_PIN(AB17),
ASPEED_PINCTRL_PIN(AB18),
ASPEED_PINCTRL_PIN(AB19),
ASPEED_PINCTRL_PIN(AB22),
ASPEED_PINCTRL_PIN(AC11),
ASPEED_PINCTRL_PIN(AC12),
ASPEED_PINCTRL_PIN(AC15),
+ ASPEED_PINCTRL_PIN(AC16),
ASPEED_PINCTRL_PIN(AC17),
ASPEED_PINCTRL_PIN(AC18),
ASPEED_PINCTRL_PIN(AC19),
ASPEED_PINCTRL_PIN(AD12),
ASPEED_PINCTRL_PIN(AD14),
ASPEED_PINCTRL_PIN(AD15),
+ ASPEED_PINCTRL_PIN(AD16),
ASPEED_PINCTRL_PIN(AD19),
ASPEED_PINCTRL_PIN(AD20),
ASPEED_PINCTRL_PIN(AD22),
ASPEED_PINCTRL_PIN(AE12),
ASPEED_PINCTRL_PIN(AE14),
ASPEED_PINCTRL_PIN(AE15),
+ ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE25),
+ ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
ASPEED_PINCTRL_PIN(AE8),
ASPEED_PINCTRL_PIN(AF10),
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF24),
+ ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
ASPEED_PINCTRL_PIN(AF8),
ASPEED_PINCTRL_PIN(AF9),
ASPEED_PINCTRL_PIN(Y3),
ASPEED_PINCTRL_PIN(Y4),
ASPEED_PINCTRL_PIN(Y5),
- ASPEED_PINCTRL_PIN(AB16),
- ASPEED_PINCTRL_PIN(AA17),
- ASPEED_PINCTRL_PIN(AB17),
- ASPEED_PINCTRL_PIN(AE16),
- ASPEED_PINCTRL_PIN(AC16),
- ASPEED_PINCTRL_PIN(AA16),
- ASPEED_PINCTRL_PIN(AD16),
- ASPEED_PINCTRL_PIN(AF25),
- ASPEED_PINCTRL_PIN(AE26),
- ASPEED_PINCTRL_PIN(AE25),
- ASPEED_PINCTRL_PIN(AF24),
};
static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(SALT9G1),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SD2),
- ASPEED_PINCTRL_GROUP(SD3),
- ASPEED_PINCTRL_GROUP(SD3DAT4),
- ASPEED_PINCTRL_GROUP(SD3DAT5),
- ASPEED_PINCTRL_GROUP(SD3DAT6),
- ASPEED_PINCTRL_GROUP(SD3DAT7),
+ ASPEED_PINCTRL_GROUP(EMMCG1),
+ ASPEED_PINCTRL_GROUP(EMMCG4),
+ ASPEED_PINCTRL_GROUP(EMMCG8),
ASPEED_PINCTRL_GROUP(SGPM1),
ASPEED_PINCTRL_GROUP(SGPS1),
ASPEED_PINCTRL_GROUP(SIOONCTRL),
ASPEED_PINCTRL_FUNC(ADC8),
ASPEED_PINCTRL_FUNC(ADC9),
ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(EMMC),
ASPEED_PINCTRL_FUNC(ESPI),
ASPEED_PINCTRL_FUNC(ESPIALT),
ASPEED_PINCTRL_FUNC(FSI1),
ASPEED_PINCTRL_FUNC(SALT9),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SD2),
- ASPEED_PINCTRL_FUNC(SD3),
- ASPEED_PINCTRL_FUNC(SD3DAT4),
- ASPEED_PINCTRL_FUNC(SD3DAT5),
- ASPEED_PINCTRL_FUNC(SD3DAT6),
- ASPEED_PINCTRL_FUNC(SD3DAT7),
ASPEED_PINCTRL_FUNC(SGPM1),
ASPEED_PINCTRL_FUNC(SGPS1),
ASPEED_PINCTRL_FUNC(SIOONCTRL),
* @idx: The bit index in the register
*/
#define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
#define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
#define SIG_DESC_LIST_DECL(sig, group, ...) \
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
#define FUNC_GROUP_DECL(func, ...) \
GROUP_DECL(func, __VA_ARGS__); \
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
- if (irq) {
+ if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
const struct ns2_pin_function *func;
const struct ns2_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
- BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{}
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
+ struct irq_chip irqchip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
return ret;
}
-static struct irq_chip intel_gpio_irqchip = {
- .name = "intel-gpio",
- .irq_ack = intel_gpio_irq_ack,
- .irq_mask = intel_gpio_irq_mask,
- .irq_unmask = intel_gpio_irq_unmask,
- .irq_set_type = intel_gpio_irq_type,
- .irq_set_wake = intel_gpio_irq_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
pctrl->chip = intel_gpio_chip;
+ /* Setup GPIO chip */
pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
pctrl->irq = irq;
+ /* Setup IRQ chip */
+ pctrl->irqchip.name = dev_name(pctrl->dev);
+ pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+ pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+ pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+ pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+ pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+ pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+ ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
- NULL);
+ gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
18, 2, "gpio", "uart"),
- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
};
};
static inline void armada_37xx_update_reg(unsigned int *reg,
- unsigned int offset)
+ unsigned int *offset)
{
/* We never have more than 2 registers */
- if (offset >= GPIO_PER_REG) {
- offset -= GPIO_PER_REG;
+ if (*offset >= GPIO_PER_REG) {
+ *offset -= GPIO_PER_REG;
*reg += sizeof(u32);
}
}
{
int offset = irqd_to_hwirq(d);
- armada_37xx_update_reg(reg, offset);
+ armada_37xx_update_reg(reg, &offset);
}
static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
unsigned int reg = OUTPUT_EN;
unsigned int mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
return regmap_update_bits(info->regmap, reg, mask, 0);
unsigned int reg = OUTPUT_EN;
unsigned int val, mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
unsigned int reg = OUTPUT_EN;
unsigned int mask, val, ret;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
ret = regmap_update_bits(info->regmap, reg, mask, mask);
unsigned int reg = INPUT_VAL;
unsigned int val, mask;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
unsigned int reg = OUTPUT_VAL;
unsigned int mask, val;
- armada_37xx_update_reg(®, offset);
+ armada_37xx_update_reg(®, &offset);
mask = BIT(offset);
val = value ? mask : 0;
static int stmfx_pinctrl_remove(struct platform_device *pdev)
{
- struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+ struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
return stmfx_function_disable(stmfx,
STMFX_FUNC_GPIO |
static int cmpc_accel_remove_v4(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
return cmpc_remove_acpi_notify_device(acpi);
static int cmpc_accel_remove(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
return cmpc_remove_acpi_notify_device(acpi);
}
if (ret < 0) {
dev_dbg(dev, "Error requesting irq at index %d: %d\n",
inst_data[i].irq_idx, ret);
+ goto error;
}
board_info.irq = ret;
break;
platform_set_drvdata(pdev, punit_ipcdev);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- punit_ipcdev->irq = 0;
dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
} else {
ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
- when using the end-to-end delay (E2E) mechansim. The peer
- delay mechansim (P2P) is not supported.
+ when using the end-to-end delay (E2E) mechanism. The peer
+ delay mechanism (P2P) is not supported.
This clock is only useful if your PTP programs are getting
hardware time stamps on the PTP Ethernet packets using the
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
&rdev->dev.of_node);
+
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ kfree(config);
+ kfree(rdev);
+ ret = -EPROBE_DEFER;
+ goto rinse;
+ }
+
/*
* We need to keep track of any GPIO descriptor coming from the
* device tree until we have handled it over to the core. If the
static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
unsigned int val, mode = 0;
int ret;
return REGULATOR_MODE_NORMAL;
}
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
int ret, val;
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK2,
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK3,
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_LDO1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK2,
__builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK3,
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK4,
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_LDO1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
struct device *dev = &pdev->dev;
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
- const struct fixed_dev_type *drvtype =
- of_match_device(dev->driver->of_match_table, dev)->data;
+ const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
struct regulator_config cfg = { };
enum gpiod_flags gflags;
int ret;
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
- if (drvtype->has_enable_clock) {
+ if (drvtype && drvtype->has_enable_clock) {
drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
drvdata->enable_clock = devm_clk_get(dev, NULL);
.enable_time = 3000,
.ramp_delay = 1000,
+ .off_on_delay = 15000,
.owner = THIS_MODULE,
},
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
- if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-min-microvolt", &pval))
suspend_state->min_uV = pval;
- if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-max-microvolt", &pval))
suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
goto error;
}
- if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
- dev_err(dev,
- "driver callback failed to parse DT for regulator %pOFn\n",
- child);
- goto error;
+ if (desc->of_parse_cb) {
+ int ret;
+
+ ret = desc->of_parse_cb(child, desc, config);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ dev_err(dev,
+ "driver callback failed to parse DT for regulator %pOFn\n",
+ child);
+ goto error;
+ }
}
*node = child;
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
- regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+ ret = regmap_read(pfuze_chip->regmap,
+ desc->vsel_reg, &val);
+ if (ret) {
+ dev_err(&client->dev, "Fails to read from the register.\n");
+ return ret;
+ }
+
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000 ||
pfuze_chip->chip_id == PFUZE3001) {
static const struct rpmh_vreg_hw_data pmic5_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
- .n_voltages = 136,
+ .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ .n_voltages = 32,
.pmic_mode_map = pmic_mode_map_pmic5_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
};
while (timeout++ <= abb->settling_time) {
status = ti_abb_check_txdone(abb);
if (status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
status = ti_abb_check_txdone(abb);
if (!status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex);
if (zcdndev) {
static void qeth_l2_vnicc_init(struct qeth_card *card)
{
u32 *timeout = &card->options.vnicc.learning_timeout;
+ bool enable, error = false;
unsigned int chars_len, i;
unsigned long chars_tmp;
u32 sup_cmds, vnicc;
- bool enable, error;
QETH_CARD_TEXT(card, 2, "vniccini");
/* reset rx_bcast */
chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
- qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
- if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
- !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
+ sup_cmds = 0;
+ error = true;
+ }
+ if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
+ (sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ card->options.vnicc.getset_timeout_sup |= vnicc;
+ else
card->options.vnicc.getset_timeout_sup &= ~vnicc;
- if (!(sup_cmds & IPA_VNICC_ENABLE) ||
- !(sup_cmds & IPA_VNICC_DISABLE))
+ if ((sup_cmds & IPA_VNICC_ENABLE) &&
+ (sup_cmds & IPA_VNICC_DISABLE))
+ card->options.vnicc.set_char_sup |= vnicc;
+ else
card->options.vnicc.set_char_sup &= ~vnicc;
}
/* enforce assumed default values and recover settings, if changed */
- error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
- timeout);
+ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+ timeout);
chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
struct kmem_cache *zfcp_fsf_qtcb_cache;
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- dev_warn(&adapter->ccw_device->dev,
- "The error threshold for checksum statistics "
- "has been exceeded\n");
zfcp_dbf_hba_bit_err("fssrh_3", req);
+ if (ber_stop) {
+ dev_warn(&adapter->ccw_device->dev,
+ "All paths over this FCP device are disused because of excessive bit errors\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+ } else {
+ dev_warn(&adapter->ccw_device->dev,
+ "The error threshold for checksum statistics has been exceeded\n");
+ }
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
config 53C700_LE_ON_BE
bool
- depends on SCSI_LASI700
+ depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX
scsi_changer *ch = file->private_data;
scsi_device_put(ch->device);
- ch->device = NULL;
file->private_data = NULL;
kref_put(&ch->ref, ch_destroy);
return 0;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
+ bool transitioning_sense = false;
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
goto retry;
}
/*
- * Retry on ALUA state transition or if any
- * UNIT ATTENTION occurred.
+ * If the array returns with 'ALUA state transition'
+ * sense code here it cannot return RTPG data during
+ * transition. So set the state to 'transitioning' directly.
*/
if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
- err = SCSI_DH_RETRY;
- else if (sense_hdr.sense_key == UNIT_ATTENTION)
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+ transitioning_sense = true;
+ goto skip_rtpg;
+ }
+ /*
+ * Retry on any other UNIT ATTENTION occurred.
+ */
+ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
off = 8 + (desc[7] * 4);
}
+ skip_rtpg:
spin_lock_irqsave(&pg->lock, flags);
+ if (transitioning_sense)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
return SCSI_MLQUEUE_HOST_BUSY;
}
+ c->device = dev;
+
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_raid_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_direct_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
}
}
-#if defined(BUILD_NVME)
/* Clear NVME stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
}
}
-#endif
/* Clear SCSI stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
return;
}
-#endif
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
- } else {
- WARN_ON_ONCE(true);
}
}
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ if (unlikely(!ha->wq)) {
+ ret = -ENOMEM;
+ goto probe_failed;
+ }
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
+ ses->resid_len = scmd->req.resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
+ scmd->req.resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
+ scmd->req.resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
{
struct scsi_device *sdev = NULL;
- if (q->mq_ops == &scsi_mq_ops)
+ if (q->mq_ops == &scsi_mq_ops_no_commit ||
+ q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
const char *buf, size_t count)
{
struct kernfs_node *kn;
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ /*
+ * We need to try to get module, avoiding the module been removed
+ * during delete.
+ */
+ if (scsi_device_get(sdev))
+ return -ENODEV;
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
WARN_ON_ONCE(!kn);
* state into SDEV_DEL.
*/
device_remove_file(dev, attr);
- scsi_remove_device(to_scsi_device(dev));
+ scsi_remove_device(sdev);
if (kn)
sysfs_unbreak_active_protection(kn);
+ scsi_device_put(sdev);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
/* we need to evaluate the error return */
if (scsi_sense_valid(sshdr) &&
(sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20)) /* invalid command */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
/* this is no error here */
return 0;
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ if (!hostdata)
return -ENOMEM;
- }
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
struct se_device *dev = cmd->se_dev;
unsigned int size;
- /*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
- }
-
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- struct dev_pm_qos_request qos_req;
+ struct freq_qos_request qos_req;
};
static DEFINE_IDA(cpufreq_ida);
cpufreq_cdev->cpufreq_state = state;
- return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+ return freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency);
}
cooling_ops = &cpufreq_cooling_ops;
}
- ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- cpufreq_cdev->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints,
+ &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+ cpufreq_cdev->freq_table[0].frequency);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
return cdev;
remove_qos_req:
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
mutex_unlock(&cooling_list_lock);
thermal_cooling_device_unregister(cdev);
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
- phys_addr_t resv_msi_base;
+ phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
vhost_test_stop(n, &private);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
mappass->reqcopy = *req;
icsk = inet_csk(mappass->sock->sk);
queue = &icsk->icsk_accept_queue;
- data = queue->rskq_accept_head != NULL;
+ data = READ_ONCE(queue->rskq_accept_head) != NULL;
if (data) {
mappass->reqcopy.cmd = 0;
ret = 0;
btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
cache->key.objectid);
+ btrfs_put_block_group(cache);
ret = -EINVAL;
goto error;
}
struct btrfs_workqueue *fixup_workers;
struct btrfs_workqueue *delayed_workers;
- /* the extent workers do delayed refs on the extent allocation tree */
- struct btrfs_workqueue *extent_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
u32 thread_pool_size;
int nitems, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
out_qgroup:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
out_fail:
- btrfs_inode_rsv_release(inode, true);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return ret;
* btrfs_delalloc_release_extents - release our outstanding_extents
* @inode: the inode to balance the reservation for.
* @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
*
* When we reserve space we increase outstanding_extents for the extents we may
* add. Once we've set the range as delalloc or created our ordered extents we
* temporarily tracked outstanding_extents. This _must_ be used in conjunction
* with btrfs_delalloc_reserve_metadata.
*/
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
- bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned num_extents;
if (btrfs_is_testing(fs_info))
return;
- btrfs_inode_rsv_release(inode, qgroup_free);
+ btrfs_inode_rsv_release(inode, true);
}
/**
btrfs_destroy_workqueue(fs_info->readahead_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
- btrfs_destroy_workqueue(fs_info->extent_workers);
/*
* Now that all other work queues are destroyed, we can safely destroy
* the queues used for metadata I/O, since tasks from those other work
max_active, 2);
fs_info->qgroup_rescan_workers =
btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
- fs_info->extent_workers =
- btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 8);
if (!(fs_info->workers && fs_info->delalloc_workers &&
fs_info->submit_workers && fs_info->flush_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->readahead_workers &&
fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->extent_workers &&
fs_info->qgroup_rescan_workers)) {
return -ENOMEM;
}
force_page_uptodate);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
break;
}
if (extents_locked == -EAGAIN)
goto again;
btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes, true);
+ reserve_bytes);
ret = extents_locked;
break;
}
else
free_extent_state(cached_state);
- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
- true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
break;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- u64 len;
- /*
- * If the inode needs a full sync, make sure we use a full range to
- * avoid log tree corruption, due to hole detection racing with ordered
- * extent completion for adjacent ranges, and assertion failures during
- * hole detection.
- */
- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags)) {
- start = 0;
- end = LLONG_MAX;
- }
-
- /*
- * The range length can be represented by u64, we have to do the typecasts
- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
- */
- len = (u64)end - (u64)start + 1;
trace_btrfs_sync_file(file, datasync);
btrfs_init_log_ctx(&ctx, inode);
atomic_inc(&root->log_batch);
+ /*
+ * If the inode needs a full sync, make sure we use a full range to
+ * avoid log tree corruption, due to hole detection racing with ordered
+ * extent completion for adjacent ranges, and assertion failures during
+ * hole detection. Do this while holding the inode lock, to avoid races
+ * with other tasks.
+ */
+ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
/*
* Before we acquired the inode's lock, someone may have dirtied more
* pages in the target range. We need to make sure that writeback for
/*
* We have to do this here to avoid the priority inversion of waiting on
* IO of a lower priority task while holding a transaction open.
+ *
+ * Also, the range length can be represented by u64, we have to do the
+ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
*/
- ret = btrfs_wait_ordered_range(inode, start, len);
+ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
if (ret) {
up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
goto out_put;
}
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
out_put:
iput(inode);
out_release:
ClearPageChecked(page);
set_page_dirty(page);
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state);
if (!page) {
btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
ret = -ENOMEM;
goto out;
}
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, block_start,
blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
unlock_page(page);
put_page(page);
out:
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode, data_reserved,
offset, count - (size_t)ret, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), count);
}
out:
if (wakeup)
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
if (!ret2) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
out_unlock:
unlock_page(page);
out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
btrfs_delalloc_release_space(inode, data_reserved, page_start,
reserved_space, (ret != 0));
out_noreserve:
unlock_page(pages[i]);
put_page(pages[i]);
}
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
out:
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
- true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret;
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+ trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0)
return ret;
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
- trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+ trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
num_bytes, type);
}
if (!page) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ PAGE_SIZE);
ret = -ENOMEM;
goto out;
}
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
ret = -EIO;
goto out;
}
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
- PAGE_SIZE, true);
+ PAGE_SIZE);
clear_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
put_page(page);
index++;
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
- false);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
}
}
done:
- if (*p != end)
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
bad:
goto bad;
info->filelock_reply = *p;
- *p += sizeof(*info->filelock_reply);
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
{
if (features == (u64)-1 ||
(features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
+ /* Malformed reply? */
if (*p == end) {
info->has_create_ino = false;
} else {
info->has_create_ino = true;
- info->ino = ceph_decode_64(p);
+ ceph_decode_64_safe(p, end, info->ino, bad);
}
+ } else {
+ if (*p != end)
+ goto bad;
}
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
for (;;) {
entry = xas_find_conflict(xas);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+ return entry;
if (dax_entry_order(entry) < order)
return XA_RETRY_ENTRY;
- if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
- !dax_is_locked(entry))
+ if (!dax_is_locked(entry))
return entry;
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
}
}
-/**
+/*
* dio_complete() - called when all DIO BIO I/O has been completed
- * @offset: the byte offset in the file of the completed operation
*
* This drops i_dio_count, lets interested parties know that a DIO operation
* has completed, and calculates the resulting return code for the operation.
* cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
* @bdi_id: target bdi id
* @memcg_id: target memcg css id
- * @nr_pages: number of pages to write, 0 for best-effort dirty flushing
+ * @nr: number of pages to write, 0 for best-effort dirty flushing
* @reason: reason why some writeback work initiated
* @done: target wb_completion
*
}
static const struct fs_context_operations gfs2_meta_context_ops = {
+ .free = gfs2_fc_free,
.get_tree = gfs2_meta_get_tree,
};
unsigned sq_entries;
unsigned sq_mask;
unsigned sq_thread_idle;
+ unsigned cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct {
unsigned cached_cq_tail;
+ atomic_t cached_cq_overflow;
unsigned cq_entries;
unsigned cq_mask;
struct wait_queue_head cq_wait;
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */
+#define REQ_F_ISREG 2048 /* regular file */
+#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
u64 user_data;
u32 result;
u32 sequence;
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
- return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ + atomic_read(&ctx->cached_cq_overflow);
}
static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
} else {
- unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
-
- WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
}
}
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
}
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+ struct io_rings *rings = ctx->rings;
+
+ /* make sure SQ entry isn't read before tail */
+ return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
/*
* Find and free completed poll iocbs
*/
mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
- int iters, ret = 0;
+ int iters = 0, ret = 0;
- /*
- * We disallow the app entering submit/complete with polling, but we
- * still need to lock the ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
-
- iters = 0;
do {
int tmin = 0;
ret = 0;
} while (min && !*nr_events && !need_resched());
+ return ret;
+}
+
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
+{
+ int ret;
+
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_iopoll_check(ctx, nr_events, min);
mutex_unlock(&ctx->uring_lock);
return ret;
}
-static void kiocb_end_write(struct kiocb *kiocb)
+static void kiocb_end_write(struct io_kiocb *req)
{
- if (kiocb->ki_flags & IOCB_WRITE) {
- struct inode *inode = file_inode(kiocb->ki_filp);
+ /*
+ * Tell lockdep we inherited freeze protection from submission
+ * thread.
+ */
+ if (req->flags & REQ_F_ISREG) {
+ struct inode *inode = file_inode(req->file);
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
- if (S_ISREG(inode->i_mode))
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- file_end_write(kiocb->ki_filp);
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
}
+ file_end_write(req->file);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
if (!req->file)
return -EBADF;
- if (force_nonblock && !io_file_supports_async(req->file))
- force_nonblock = false;
+ if (S_ISREG(file_inode(req->file)->i_mode))
+ req->flags |= REQ_F_ISREG;
+
+ /*
+ * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
+ * we know to async punt it even if it was opened O_NONBLOCK
+ */
+ if (force_nonblock && !io_file_supports_async(req->file)) {
+ req->flags |= REQ_F_MUST_PUNT;
+ return -EAGAIN;
+ }
kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
return ret;
/* don't allow async punt if RWF_NOWAIT was requested */
- if (kiocb->ki_flags & IOCB_NOWAIT)
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ (req->file->f_flags & O_NONBLOCK))
req->flags |= REQ_F_NOWAIT;
if (force_nonblock)
* need async punt anyway, so it's more efficient to do it
* here.
*/
- if (force_nonblock && ret2 > 0 && ret2 < read_size)
+ if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
+ (req->flags & REQ_F_ISREG) &&
+ ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
* released so that it doesn't complain about the held lock when
* we return to userspace.
*/
- if (S_ISREG(file_inode(file)->i_mode)) {
+ if (req->flags & REQ_F_ISREG) {
__sb_start_write(file_inode(file)->i_sb,
SB_FREEZE_WRITE, true);
__sb_writers_release(file_inode(file)->i_sb,
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_ring_ctx *ctx;
- struct io_kiocb *req;
+ struct io_kiocb *req, *prev;
unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer);
atomic_inc(&ctx->cq_timeouts);
spin_lock_irqsave(&ctx->completion_lock, flags);
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail pointer
+ * will be increased, otherwise other timeout reqs may return in
+ * advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
list_del(&req->list);
io_cqring_fill_event(ctx, req->user_data, -ETIME);
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- unsigned count, req_dist, tail_index;
+ unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
struct timespec64 ts;
+ unsigned span = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
count = 1;
req->sequence = ctx->cached_sq_head + count - 1;
+ /* reuse it to store the count */
+ req->submit.sequence = count;
req->flags |= REQ_F_TIMEOUT;
/*
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.
*/
- tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
- req_dist = req->sequence - tail_index;
spin_lock_irq(&ctx->completion_lock);
list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
- unsigned dist;
+ unsigned nxt_sq_head;
+ long long tmp, tmp_nxt;
+
+ /*
+ * Since cached_sq_head + count - 1 can overflow, use type long
+ * long to store it.
+ */
+ tmp = (long long)ctx->cached_sq_head + count - 1;
+ nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
+ tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
- dist = nxt->sequence - tail_index;
- if (req_dist >= dist)
+ /*
+ * cached_sq_head may overflow, and it will never overflow twice
+ * once there is some timeout req still be valid.
+ */
+ if (ctx->cached_sq_head < nxt_sq_head)
+ tmp += UINT_MAX;
+
+ if (tmp > tmp_nxt)
break;
+
+ /*
+ * Sequence of reqs after the insert one and itself should
+ * be adjusted because each timeout req consumes a slot.
+ */
+ span++;
+ nxt->sequence++;
}
+ req->sequence -= span;
list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock);
}
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, bool force_nonblock)
+ struct sqe_submit *s)
{
int ret;
- ret = __io_submit_sqe(ctx, req, s, force_nonblock);
- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+ ret = __io_submit_sqe(ctx, req, s, true);
+
+ /*
+ * We async punt it if the file wasn't marked NOWAIT, or if the file
+ * doesn't support non-blocking read/write attempts
+ */
+ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
+ (req->flags & REQ_F_MUST_PUNT))) {
struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
}
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, bool force_nonblock)
+ struct sqe_submit *s)
{
int ret;
return 0;
}
- return __io_queue_sqe(ctx, req, s, force_nonblock);
+ return __io_queue_sqe(ctx, req, s);
}
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_kiocb *shadow,
- bool force_nonblock)
+ struct sqe_submit *s, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
if (!shadow)
- return io_queue_sqe(ctx, req, s, force_nonblock);
+ return io_queue_sqe(ctx, req, s);
/*
* Mark the first IO in link list as DRAIN, let all the following
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req, s, force_nonblock);
+ return __io_queue_sqe(ctx, req, s);
return 0;
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link,
- bool force_nonblock)
+ struct io_submit_state *state, struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
struct io_kiocb *req;
return;
}
+ req->user_data = s->sqe->user_data;
+
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
- io_queue_sqe(ctx, req, s, force_nonblock);
+ io_queue_sqe(ctx, req, s);
}
}
/* drop invalid entries */
ctx->cached_sq_head++;
- rings->sq_dropped++;
+ ctx->cached_sq_dropped++;
+ WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
return false;
}
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
- unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+ bool has_user, bool mm_fault)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
}
for (i = 0; i < nr; i++) {
+ struct sqe_submit s;
+
+ if (!io_get_sqring(ctx, &s))
+ break;
+
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- true);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
- prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+ prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
- if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+ if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
- shadow_req->sequence = sqes[i].sequence;
+ shadow_req->sequence = s.sequence;
}
out:
if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, sqes[i].sqe->user_data,
+ io_cqring_add_event(ctx, s.sqe->user_data,
-EFAULT);
} else {
- sqes[i].has_user = has_user;
- sqes[i].needs_lock = true;
- sqes[i].needs_fixed_file = true;
- io_submit_sqe(ctx, &sqes[i], statep, &link, true);
+ s.has_user = has_user;
+ s.needs_lock = true;
+ s.needs_fixed_file = true;
+ io_submit_sqe(ctx, &s, statep, &link);
submitted++;
}
}
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(&state);
static int io_sq_thread(void *data)
{
- struct sqe_submit sqes[IO_IOPOLL_BATCH];
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
mm_segment_t old_fs;
timeout = inflight = 0;
while (!kthread_should_park()) {
- bool all_fixed, mm_fault = false;
- int i;
+ bool mm_fault = false;
+ unsigned int to_submit;
if (inflight) {
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
- io_iopoll_check(ctx, &nr_events, 0);
+ /*
+ * inflight is the count of the maximum possible
+ * entries we submitted, but it can be smaller
+ * if we dropped some of them. If we don't have
+ * poll entries available, then we know that we
+ * have nothing left to poll for. Reset the
+ * inflight count to zero in that case.
+ */
+ mutex_lock(&ctx->uring_lock);
+ if (!list_empty(&ctx->poll_list))
+ __io_iopoll_check(ctx, &nr_events, 0);
+ else
+ inflight = 0;
+ mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
timeout = jiffies + ctx->sq_thread_idle;
}
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
/* make sure to read SQ tail after writing flags */
smp_mb();
- if (!io_get_sqring(ctx, &sqes[0])) {
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- i = 0;
- all_fixed = true;
- do {
- if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
- all_fixed = false;
-
- i++;
- if (i == ARRAY_SIZE(sqes))
- break;
- } while (io_get_sqring(ctx, &sqes[i]));
-
/* Unless all new commands are FIXED regions, grab mm */
- if (!all_fixed && !cur_mm) {
+ if (!cur_mm) {
mm_fault = !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
}
}
- inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
- mm_fault);
+ to_submit = min(to_submit, ctx->sq_entries);
+ inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
+ mm_fault);
/* Commit SQ ring head once we've consumed all SQEs */
io_commit_sqring(ctx);
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
- bool block_for_last)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
}
for (i = 0; i < to_submit; i++) {
- bool force_nonblock = true;
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- force_nonblock);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;
-
- /*
- * The caller will block for events after submit, submit the
- * last IO non-blocking. This is either the only IO it's
- * submitting, or it already submitted the previous ones. This
- * improves performance by avoiding an async punt that we don't
- * need to do.
- */
- if (block_for_last && submit == to_submit)
- force_nonblock = false;
-
- io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
+ io_submit_sqe(ctx, &s, statep, &link);
}
- io_commit_sqring(ctx);
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req,
- !block_for_last);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);
+ io_commit_sqring(ctx);
+
return submit;
}
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
- bool block_for_last = false;
-
to_submit = min(to_submit, ctx->sq_entries);
- /*
- * Allow last submission to block in a series, IFF the caller
- * asked to wait for events and we don't currently have
- * enough. This potentially avoids an async punt.
- */
- if (to_submit == min_complete &&
- io_cqring_events(ctx->rings) < min_complete)
- block_for_last = true;
-
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit, block_for_last);
+ submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
/**
* simple_write_end - .write_end helper for non-block-device FSes
- * @available: See .write_end of address_space_operations
- * @file: "
+ * @file: See .write_end of address_space_operations
* @mapping: "
* @pos: "
* @len: "
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
+ transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
+ transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
/*
* Now that recovery is shut down, and the osb is about to be
struct ocfs2_dinode *alloc = NULL;
cancel_delayed_work(&osb->la_enable_wq);
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
- show_val_kb(m, "FileHugePages: ",
+ show_val_kb(m, "FileHugePages: ",
global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
- show_val_kb(m, "FilePmdMapped: ",
+ show_val_kb(m, "FilePmdMapped: ",
global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
#endif
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
+
if (!ppage || PageSlab(ppage) || page_has_type(ppage))
pcount = 0;
else
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (put_user(stable_page_flags(ppage), out)) {
ret = -EFAULT;
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (ppage)
ino = page_cgroup_ino(ppage);
*/
static int verify_dirent_name(const char *name, int len)
{
- if (WARN_ON_ONCE(!len))
+ if (!len)
return -EIO;
- if (WARN_ON_ONCE(memchr(name, '/', len)))
+ if (memchr(name, '/', len))
return -EIO;
return 0;
}
uint64_t bs_blocks; /* number of blocks */
uint64_t bs_xflags; /* extended flags */
- uint64_t bs_atime; /* access time, seconds */
- uint64_t bs_mtime; /* modify time, seconds */
+ int64_t bs_atime; /* access time, seconds */
+ int64_t bs_mtime; /* modify time, seconds */
- uint64_t bs_ctime; /* inode change time, seconds */
- uint64_t bs_btime; /* creation time, seconds */
+ int64_t bs_ctime; /* inode change time, seconds */
+ int64_t bs_btime; /* creation time, seconds */
uint32_t bs_gen; /* generation count */
uint32_t bs_uid; /* user id */
struct acpi_processor_limit limit;
struct thermal_cooling_device *cdev;
struct device *dev; /* Processor device. */
- struct dev_pm_qos_request perflib_req;
- struct dev_pm_qos_request thermal_req;
+ struct freq_qos_request perflib_req;
+ struct freq_qos_request thermal_req;
};
struct acpi_processor_errata {
#ifdef CONFIG_CPU_FREQ
extern bool acpi_processor_cpufreq_init;
void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
{
return;
}
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
return;
}
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
}
/**
- * bitmap_or_equal - Check whether the or of two bitnaps is equal to a third
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
* @src1: Pointer to bitmap 1
* @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
* @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits: number of bits in each of these bitmaps
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct dev_pm_qos_request *min_freq_req;
- struct dev_pm_qos_request *max_freq_req;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
#define THIS_MODULE ((struct module *)0)
#endif
-#define NS_SEPARATOR "."
-
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 4 \n" \
- "__ksymtab_" #ns NS_SEPARATOR #sym ": \n" \
+ "__ksymtab_" #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
" .long __kstrtabns_" #sym "- . \n" \
int namespace_offset;
};
#else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
- static const struct kernel_symbol __ksymtab_##sym##__##ns \
- asm("__ksymtab_" #ns NS_SEPARATOR #sym) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
- asm("__ksymtab_" #sym) \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, NULL }
static const char __kstrtabns_##sym[] \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #ns; \
- __KSYMTAB_ENTRY_NS(sym, sec, ns)
+ __KSYMTAB_ENTRY_NS(sym, sec)
#define ___EXPORT_SYMBOL(sym, sec) \
___export_symbol_common(sym, sec); \
*/
bool threaded;
+ /**
+ * @init_hw: optional routine to initialize hardware before
+ * an IRQ chip will be added. This is quite useful when
+ * a particular driver wants to clear IRQ related registers
+ * in order to avoid undesired events.
+ */
+ int (*init_hw)(struct gpio_chip *chip);
+
/**
* @init_valid_mask: optional routine to initialize @valid_mask, to be
* used if not all GPIO lines are valid interrupts. Sometimes some
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
-#define PHY_ID_KSZ8795 0x00221550
+#define PHY_ID_KSZ87XX 0x00221550
#define PHY_ID_KSZ9477 0x00221631
enum page_ext_flags {
PAGE_EXT_OWNER,
- PAGE_EXT_OWNER_ACTIVE,
+ PAGE_EXT_OWNER_ALLOCATED,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
unsigned long flags;
};
+extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
#ifdef CONFIG_SPARSEMEM
struct page_ext *lookup_page_ext(const struct page *page);
+static inline struct page_ext *page_ext_next(struct page_ext *curr)
+{
+ void *next = curr;
+ next += page_ext_size;
+ return next;
+}
+
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE,
- DEV_PM_QOS_MIN_FREQUENCY,
- DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS,
};
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
- struct pm_qos_constraints min_frequency;
- struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req;
- struct dev_pm_qos_request *min_frequency_req;
- struct dev_pm_qos_request *max_frequency_req;
};
/* Action requested to pm_qos_update_target */
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- case DEV_PM_QOS_MIN_FREQUENCY:
- return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- case DEV_PM_QOS_MAX_FREQUENCY:
- return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default:
WARN_ON(1);
return 0;
}
#endif
+#define FREQ_QOS_MIN_DEFAULT_VALUE 0
+#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+ return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+
#endif
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+asmlinkage void preempt_schedule_irq(void);
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+ int mac_len);
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
int skb_mpls_dec_ttl(struct sk_buff *skb);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
/* fastopen_rsk points to request_sock that resulted in this big
* socket. Used to retransmit SYNACKs etc.
*/
- struct request_sock *fastopen_rsk;
+ struct request_sock __rcu *fastopen_rsk;
u32 *saved_syn;
};
static inline bool tcp_passive_fastopen(const struct sock *sk)
{
- return (sk->sk_state == TCP_SYN_RECV &&
- tcp_sk(sk)->fastopen_rsk != NULL);
+ return sk->sk_state == TCP_SYN_RECV &&
+ rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
}
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
* Two common ranges are predefined for you:
- * * xa_limit_32b - [0 - UINT_MAX]
- * * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_32b - [0 - UINT_MAX]
+ * * xa_limit_31b - [0 - INT_MAX]
*/
struct xa_limit {
u32 max;
*/
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+/**
+ * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+ *
+ * Pre-CAC is allowed only in some regdomains (notable ETSI).
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
/**
* DOC: Internal regulatory db functions
*
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
+ /* First cache line can be often dirtied.
+ * Do not place here read-mostly fields.
+ */
refcount_t passive; /* To decide when the network
* namespace should be freed.
*/
*/
spinlock_t rules_mod_lock;
- u32 hash_mix;
+ unsigned int dev_unreg_count;
+
+ unsigned int dev_base_seq; /* protected by rtnl_mutex */
+ int ifindex;
+
+ spinlock_t nsid_lock;
+ atomic_t fnhe_genid;
struct list_head list; /* list of network namespaces */
struct list_head exit_list; /* To linked to call pernet exit
#endif
struct user_namespace *user_ns; /* Owning user namespace */
struct ucounts *ucounts;
- spinlock_t nsid_lock;
struct idr netns_ids;
struct ns_common ns;
+ struct list_head dev_base_head;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
struct uevent_sock *uevent_sock; /* uevent socket */
- struct list_head dev_base_head;
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
- unsigned int dev_base_seq; /* protected by rtnl_mutex */
- int ifindex;
- unsigned int dev_unreg_count;
+ /* Note that @hash_mix can be read millions times per second,
+ * it is critical that it is on a read_mostly cache line.
+ */
+ u32 hash_mix;
+
+ struct net_device *loopback_dev; /* The loopback */
/* core fib_rules */
struct list_head rules_ops;
- struct net_device *loopback_dev; /* The loopback */
struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
struct sock *crypto_nlsk;
#endif
struct sock *diag_nlsk;
- atomic_t fnhe_genid;
} __randomize_layout;
#include <linux/seq_file_net.h>
static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
{
- return queue->rskq_accept_head == NULL;
+ return READ_ONCE(queue->rskq_accept_head) == NULL;
}
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
req = queue->rskq_accept_head;
if (req) {
sk_acceptq_removed(parent);
- queue->rskq_accept_head = req->dl_next;
+ WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
if (queue->rskq_accept_head == NULL)
queue->rskq_accept_tail = NULL;
}
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
}
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
#endif /* __net_sctp_h__ */
*/
static inline int sk_stream_min_wspace(const struct sock *sk)
{
- return sk->sk_wmem_queued >> 1;
+ return READ_ONCE(sk->sk_wmem_queued) >> 1;
}
static inline int sk_stream_wspace(const struct sock *sk)
{
- return sk->sk_sndbuf - sk->sk_wmem_queued;
+ return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
+}
+
+static inline void sk_wmem_queued_add(struct sock *sk, int val)
+{
+ WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
void sk_stream_write_space(struct sock *sk);
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
- if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+ if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
return sk->sk_prot->stream_memory_free ?
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk->sk_wmem_queued -= skb->truesize;
+ sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->sk_wmem_queued += copy;
+ sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
return 0;
}
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
- if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
- sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
- sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
- }
+ u32 val;
+
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ return;
+
+ val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+
+ WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
*/
static inline bool sock_writeable(const struct sock *sk)
{
- return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+ return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
}
static inline gfp_t gfp_any(void)
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+ int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
+
+ return v ?: 1;
}
/* Alas, with timeout socket operations are not restartable.
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return tcp_memory_pressure;
+ return READ_ONCE(tcp_memory_pressure);
}
/*
* The next routines deal with comparing 32 bit unsigned ints
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(sk->sk_backlog.len) -
atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf);
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+ u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
struct scsi_eh_save {
/* saved state */
int result;
+ unsigned int resid_len;
int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define ML_LOSIDV_STREAM_MASK 0xFFFE
+
#define ML_LCTL_SCF_MASK 0xF
#define AZX_MLCTL_SPA (0x1 << 16)
#define AZX_MLCTL_CPA (0x1 << 23)
struct link_info *li);
#ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
- char *name,
- struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+ char *name,
+ struct asoc_simple_dai *dai)
{
struct device *dev = simple_priv_to_dev(priv);
dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
}
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
{
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device *dev = simple_priv_to_dev(priv);
__entry->qgid = qgroup->qgroupid;
__entry->cur_reserved = qgroup->rsv.values[type];
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
TP_fast_assign_btrfs(root->fs_info,
__entry->refroot = root->root_key.objectid;
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
TP_STRUCT__entry_btrfs(
__field( u64, refroot )
__field( s64, diff )
- __field( int, type )
),
TP_fast_assign_btrfs(root->fs_info,
);
TRACE_EVENT(rxrpc_peer,
- TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
+ TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
int usage, const void *where),
- TP_ARGS(peer, op, usage, where),
+ TP_ARGS(peer_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, peer )
),
TP_fast_assign(
- __entry->peer = peer->debug_id;
+ __entry->peer = peer_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
);
TRACE_EVENT(rxrpc_conn,
- TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
int usage, const void *where),
- TP_ARGS(conn, op, usage, where),
+ TP_ARGS(conn_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, conn )
),
TP_fast_assign(
- __entry->conn = conn->debug_id;
+ __entry->conn = conn_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
);
TRACE_EVENT(rxrpc_call,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
int usage, const void *where, const void *aux),
- TP_ARGS(call, op, usage, where, aux),
+ TP_ARGS(call_debug_id, op, usage, where, aux),
TP_STRUCT__entry(
__field(unsigned int, call )
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
TP_fast_assign(
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->truesize = skb->truesize;
- __entry->sk_rcvbuf = sk->sk_rcvbuf;
+ __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf);
),
TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->sysctl_wmem = sk_get_wmem0(sk, prot);
__entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
- __entry->wmem_queued = sk->sk_wmem_queued;
+ __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
__entry->kind = kind;
),
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
bool orig_page_huge = false;
+ unsigned int gup_flags = FOLL_FORCE;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
retry:
+ if (is_register)
+ gup_flags |= FOLL_SPLIT_PMD;
/* Read the page with vaddr into memory */
- ret = get_user_pages_remote(NULL, mm, vaddr, 1,
- FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
+ &old_page, &vma, NULL);
if (ret <= 0)
return ret;
if (ret <= 0)
goto put_old;
+ if (WARN(!is_register && PageCompound(old_page),
+ "uprobe unregister should never work on compound page\n")) {
+ ret = -EINVAL;
+ goto put_old;
+ }
+
/* We are going to replace instruction, update ref_ctr. */
if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
find $cpio_dir -type f -print0 |
xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
-# Create archive and try to normalize metadata for reproducibility
-tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
- -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --owner=0 --group=0 --numeric-owner --no-recursion \
+ -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
echo "$src_files_md5" > kernel/kheaders.md5
echo "$obj_files_md5" >> kernel/kheaders.md5
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-void __kthread_queue_delayed_work(struct kthread_worker *worker,
- struct kthread_delayed_work *dwork,
- unsigned long delay)
+static void __kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
#include "power.h"
}
late_initcall(pm_qos_power_init);
+
+/* Definitions related to the frequency QoS below. */
+
+/**
+ * freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void freq_constraints_init(struct freq_constraints *qos)
+{
+ struct pm_qos_constraints *c;
+
+ c = &qos->min_freq;
+ plist_head_init(&c->list);
+ c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+ c->type = PM_QOS_MAX;
+ c->notifiers = &qos->min_freq_notifiers;
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+
+ c = &qos->max_freq;
+ plist_head_init(&c->list);
+ c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = &qos->max_freq_notifiers;
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+/**
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
+ * @qos: Constraints to evaluate.
+ * @type: QoS request type.
+ */
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type)
+{
+ s32 ret;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = IS_ERR_OR_NULL(qos) ?
+ FREQ_QOS_MIN_DEFAULT_VALUE :
+ pm_qos_read_value(&qos->min_freq);
+ break;
+ case FREQ_QOS_MAX:
+ ret = IS_ERR_OR_NULL(qos) ?
+ FREQ_QOS_MAX_DEFAULT_VALUE :
+ pm_qos_read_value(&qos->max_freq);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ */
+static int freq_qos_apply(struct freq_qos_request *req,
+ enum pm_qos_req_action action, s32 value)
+{
+ int ret;
+
+ switch(req->type) {
+ case FREQ_QOS_MIN:
+ ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+ action, value);
+ break;
+ case FREQ_QOS_MAX:
+ ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object. The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !req)
+ return -EINVAL;
+
+ if (WARN(freq_qos_request_active(req),
+ "%s() called for active request\n", __func__))
+ return -EINVAL;
+
+ req->qos = qos;
+ req->type = type;
+ ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+ if (ret < 0) {
+ req->qos = NULL;
+ req->type = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
+
+/**
+ * freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+{
+ if (!req)
+ return -EINVAL;
+
+ if (WARN(!freq_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (req->pnode.prio == new_value)
+ return 0;
+
+ return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
+
+/**
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
+ * @req: Request to remove.
+ *
+ * Remove the given frequency QoS request from the list of constraints it
+ * belongs to and recompute the effective constraint value for that list.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_remove_request(struct freq_qos_request *req)
+{
+ if (!req)
+ return -EINVAL;
+
+ if (WARN(!freq_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
+
+/**
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !notifier)
+ return -EINVAL;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
+ notifier);
+ break;
+ case FREQ_QOS_MAX:
+ ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
+
+/**
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qos) || !notifier)
+ return -EINVAL;
+
+ switch (type) {
+ case FREQ_QOS_MIN:
+ ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
+ notifier);
+ break;
+ case FREQ_QOS_MAX:
+ ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*/
+#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/init.h>
/* Reset ack counter. */
atomic_set(&msdata->thread_ack, msdata->num_threads);
smp_wmb();
- msdata->state = newstate;
+ WRITE_ONCE(msdata->state, newstate);
}
/* Last one to ack a state moves to the next state. */
static int multi_cpu_stop(void *data)
{
struct multi_stop_data *msdata = data;
- enum multi_stop_state curstate = MULTI_STOP_NONE;
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
int cpu = smp_processor_id(), err = 0;
const struct cpumask *cpumask;
unsigned long flags;
do {
/* Chill out and ensure we re-read multi_stop_state. */
stop_machine_yield(cpumask);
- if (msdata->state != curstate) {
- curstate = msdata->state;
+ newstate = READ_ONCE(msdata->state);
+ if (newstate != curstate) {
+ curstate = newstate;
switch (curstate) {
case MULTI_STOP_DISABLE_IRQ:
local_irq_disable();
#ifdef CONFIG_SPARC
#endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
extern int pwrsw_enabled;
#endif
.proc_handler = proc_dointvec,
},
#endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
{
.procname = "soft-power",
.data = &pwrsw_enabled,
struct hrtimer_clock_base *base;
for (;;) {
- base = timer->base;
+ base = READ_ONCE(timer->base);
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* See the comment in lock_hrtimer_base() */
- timer->base = &migration_base;
+ WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
- timer->base = base;
+ WRITE_ONCE(timer->base, base);
goto again;
}
- timer->base = new_base;
+ WRITE_ONCE(timer->base, new_base);
} else {
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {
goto out;
}
+ mutex_lock(&event_mutex);
ret = perf_trace_event_init(tp_event, p_event);
if (ret)
destroy_local_trace_kprobe(tp_event);
+ mutex_unlock(&event_mutex);
out:
kfree(func);
return ret;
void perf_kprobe_destroy(struct perf_event *p_event)
{
+ mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ mutex_unlock(&event_mutex);
destroy_local_trace_kprobe(p_event->tp_event);
}
{
if (str_has_prefix(type, "u"))
return false;
+ if (strcmp(type, "gfp_t") == 0)
+ return false;
return true;
}
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
+#include <linux/kmemleak.h>
#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
}
EXPORT_SYMBOL(__genradix_ptr);
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ struct genradix_node *node;
+
+ node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+ /*
+ * We're using pages (not slab allocations) directly for kernel data
+ * structures, so we need to explicitly inform kmemleak of them in order
+ * to avoid false positive memory leak reports.
+ */
+ kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+ return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kmemleak_free(node);
+ free_page((unsigned long)node);
+}
+
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
break;
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
n = READ_ONCE(*p);
if (!n) {
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
}
if (new_node)
- free_page((unsigned long) new_node);
+ genradix_free_node(new_node);
return &n->data[offset];
}
genradix_free_recurse(n->children[i], level - 1);
}
- free_page((unsigned long) n);
+ genradix_free_node(n);
}
int __genradix_prealloc(struct __genradix *radix, size_t size,
return 1;
}
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ int i, iter, maxiter = 1024;
+ int num, bytes;
+ bool fail = false;
+ void *objects[10];
+
+ c = kmem_cache_create("test_cache", size, size, 0, NULL);
+ for (iter = 0; (iter < maxiter) && !fail; iter++) {
+ num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+ objects);
+ for (i = 0; i < num; i++) {
+ bytes = count_nonzero_bytes(objects[i], size);
+ if (bytes)
+ fail = true;
+ fill_with_garbage(objects[i], size);
+ }
+
+ if (num)
+ kmem_cache_free_bulk(c, num, objects);
+ }
+ *total_failures += fail;
+ return 1;
+}
+
/*
* Test kmem_cache allocation by creating caches of different sizes, with and
* without constructors, with and without SLAB_TYPESAFE_BY_RCU.
num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
&failures);
}
+ num_tests += do_kmem_cache_size_bulk(size, &failures);
}
REPORT_FAILURES_IN_FN();
*total_failures += failures;
static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
{
int ret = 0;
- size_t start, end, i;
- size_t zero_start = size / 4;
- size_t zero_end = size - zero_start;
+ size_t start, end, i, zero_start, zero_end;
+
+ if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+ return -EINVAL;
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
/*
- * We conduct a series of check_nonzero_user() tests on a block of memory
- * with the following byte-pattern (trying every possible [start,end]
- * pair):
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
*
* [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
*
- * And we verify that check_nonzero_user() acts identically to memchr_inv().
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
*/
memset(kmem, 0x0, size);
size_t ksize, usize;
umem_src = kmalloc(size, GFP_KERNEL);
- if ((ret |= test(umem_src == NULL, "kmalloc failed")))
+ ret = test(umem_src == NULL, "kmalloc failed");
+ if (ret)
goto out_free;
expected = kmalloc(size, GFP_KERNEL);
- if ((ret |= test(expected == NULL, "kmalloc failed")))
+ ret = test(expected == NULL, "kmalloc failed");
+ if (ret)
goto out_free;
/* Fill umem with a fixed byte pattern. */
/* Ensure the start of the pageblock or zone is online and valid */
block_pfn = pageblock_start_pfn(pfn);
- block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+ block_pfn = max(block_pfn, zone->zone_start_pfn);
+ block_page = pfn_to_online_page(block_pfn);
if (block_page) {
page = block_page;
pfn = block_pfn;
}
/* Ensure the end of the pageblock or zone is online and valid */
- block_pfn += pageblock_nr_pages;
+ block_pfn = pageblock_end_pfn(pfn) - 1;
block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
end_page = pfn_to_online_page(block_pfn);
if (!end_page)
page += (1 << PAGE_ALLOC_COSTLY_ORDER);
pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
- } while (page < end_page);
+ } while (page <= end_page);
return false;
}
#include <linux/rmap.h>
#include <linux/delayacct.h>
#include <linux/psi.h>
+#include <linux/ramfs.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
}
static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
unsigned long pte_end;
struct page *head, *page;
pte = READ_ONCE(*ptep);
- if (!pte_access_permitted(pte, write))
+ if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;
/* hugepages are never "special" */
}
static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned int pdshift, unsigned long end, int write,
+ unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
pte_t *ptep;
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
- if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
return 0;
} while (ptep++, addr = next, addr != end);
}
#else
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end, int write,
+ unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
return 0;
#endif /* CONFIG_ARCH_HAS_HUGEPD */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
- unsigned long end, unsigned int flags, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
- if (mapping)
- __dec_node_page_state(page, NR_SHMEM_THPS);
+ if (mapping) {
+ if (PageSwapBacked(page))
+ __dec_node_page_state(page, NR_SHMEM_THPS);
+ else
+ __dec_node_page_state(page, NR_FILE_THPS);
+ }
+
spin_unlock(&ds_queue->split_queue_lock);
__split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
struct page *page;
for (i = start_pfn; i < end_pfn; i++) {
- if (!pfn_valid(i))
+ page = pfn_to_online_page(i);
+ if (!page)
return false;
- page = pfn_to_page(i);
-
if (page_zone(page) != z)
return false;
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/cpumask.h>
+#include <linux/mman.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
return object;
}
+/*
+ * Remove an object from the object_tree_root and object_list. Must be called
+ * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ */
+static void __remove_object(struct kmemleak_object *object)
+{
+ rb_erase(&object->rb_node, &object_tree_root);
+ list_del_rcu(&object->object_list);
+}
+
/*
* Look up an object in the object search tree and remove it from both
* object_tree_root and object_list. The returned object's use_count should be
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, alias);
- if (object) {
- rb_erase(&object->rb_node, &object_tree_root);
- list_del_rcu(&object->object_list);
- }
+ if (object)
+ __remove_object(object);
write_unlock_irqrestore(&kmemleak_lock, flags);
return object;
static void __kmemleak_do_cleanup(void)
{
- struct kmemleak_object *object;
+ struct kmemleak_object *object, *tmp;
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list)
- delete_object_full(object->pointer);
- rcu_read_unlock();
+ /*
+ * Kmemleak has already been disabled, no need for RCU list traversal
+ * or kmemleak_lock held.
+ */
+ list_for_each_entry_safe(object, tmp, &object_list, object_list) {
+ __remove_object(object);
+ __delete_object(object);
+ }
}
/*
align = SMP_CACHE_BYTES;
}
- if (end > memblock.current_limit)
- end = memblock.current_limit;
-
again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
+ if (max_addr > memblock.current_limit)
+ max_addr = memblock.current_limit;
+
alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
/* retry allocation without lower limit */
struct mem_cgroup *from,
struct mem_cgroup *to)
{
+ struct lruvec *from_vec, *to_vec;
+ struct pglist_data *pgdat;
unsigned long flags;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
int ret;
anon = PageAnon(page);
+ pgdat = page_pgdat(page);
+ from_vec = mem_cgroup_lruvec(pgdat, from);
+ to_vec = mem_cgroup_lruvec(pgdat, to);
+
spin_lock_irqsave(&from->move_lock, flags);
if (!anon && page_mapped(page)) {
- __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
- __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
+ __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
}
/*
struct address_space *mapping = page_mapping(page);
if (mapping_cap_account_dirty(mapping)) {
- __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
- __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
+ __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
}
}
if (PageWriteback(page)) {
- __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
- __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
+ __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct task_struct *tsk;
unsigned long addr;
short size_shift;
- char addr_valid;
};
/*
}
}
tk->addr = page_address_in_vma(p, vma);
- tk->addr_valid = 1;
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(p, vma);
else
tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
/*
- * In theory we don't have to kill when the page was
- * munmaped. But it could be also a mremap. Since that's
- * likely very rare kill anyways just out of paranoia, but use
- * a SIGKILL because the error is not contained anymore.
+ * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+ * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+ * so "tk->size_shift == 0" effectively checks no mapping on
+ * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+ * to a process' address space, it's possible not all N VMAs
+ * contain mappings for the page, but at least one VMA does.
+ * Only deliver SIGBUS with payload derived from the VMA that
+ * has a mapping for the page.
*/
- if (tk->addr == -EFAULT || tk->size_shift == 0) {
+ if (tk->addr == -EFAULT) {
pr_info("Memory failure: Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
- tk->addr_valid = 0;
+ } else if (tk->size_shift == 0) {
+ kfree(tk);
+ return;
}
get_task_struct(tsk);
tk->tsk = tsk;
* make sure the process doesn't catch the
* signal and then access the memory. Just kill it.
*/
- if (fail || tk->addr_valid == 0) {
+ if (fail || tk->addr == -EFAULT) {
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
- if (!pfn_valid(pfn)) {
+ p = pfn_to_online_page(pfn);
+ if (!p) {
+ if (pfn_valid(pfn)) {
+ pgmap = get_dev_pagemap(pfn, NULL);
+ if (pgmap)
+ return memory_failure_dev_pagemap(pfn, flags,
+ pgmap);
+ }
pr_err("Memory failure: %#lx: memory outside kernel control\n",
pfn);
return -ENXIO;
}
- pgmap = get_dev_pagemap(pfn, NULL);
- if (pgmap)
- return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
- p = pfn_to_page(pfn);
if (PageHuge(p))
return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) {
zone_span_writeunlock(zone);
}
-static void shrink_pgdat_span(struct pglist_data *pgdat,
- unsigned long start_pfn, unsigned long end_pfn)
+static void update_pgdat_span(struct pglist_data *pgdat)
{
- unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
- unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
- unsigned long pgdat_end_pfn = p;
- unsigned long pfn;
- int nid = pgdat->node_id;
-
- if (pgdat_start_pfn == start_pfn) {
- /*
- * If the section is smallest section in the pgdat, it need
- * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
- * In this case, we find second smallest valid mem_section
- * for shrinking zone.
- */
- pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
- pgdat_end_pfn);
- if (pfn) {
- pgdat->node_start_pfn = pfn;
- pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
- }
- } else if (pgdat_end_pfn == end_pfn) {
- /*
- * If the section is biggest section in the pgdat, it need
- * shrink pgdat->node_spanned_pages.
- * In this case, we find second biggest valid mem_section for
- * shrinking zone.
- */
- pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
- start_pfn);
- if (pfn)
- pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
- }
-
- /*
- * If the section is not biggest or smallest mem_section in the pgdat,
- * it only creates a hole in the pgdat. So in this case, we need not
- * change the pgdat.
- * But perhaps, the pgdat has only hole data. Thus it check the pgdat
- * has only hole or not.
- */
- pfn = pgdat_start_pfn;
- for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
- continue;
-
- if (pfn_to_nid(pfn) != nid)
- continue;
+ unsigned long node_start_pfn = 0, node_end_pfn = 0;
+ struct zone *zone;
- /* Skip range to be removed */
- if (pfn >= start_pfn && pfn < end_pfn)
- continue;
+ for (zone = pgdat->node_zones;
+ zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
+ unsigned long zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
- /* If we find valid section, we have nothing to do */
- return;
+ /* No need to lock the zones, they can't change. */
+ if (zone_end_pfn > node_end_pfn)
+ node_end_pfn = zone_end_pfn;
+ if (zone->zone_start_pfn < node_start_pfn)
+ node_start_pfn = zone->zone_start_pfn;
}
- /* The pgdat has no valid section */
- pgdat->node_start_pfn = 0;
- pgdat->node_spanned_pages = 0;
+ pgdat->node_start_pfn = node_start_pfn;
+ pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
static void __remove_zone(struct zone *zone, unsigned long start_pfn,
pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
- shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
+ update_pgdat_span(pgdat);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
}
void memunmap_pages(struct dev_pagemap *pgmap)
{
struct resource *res = &pgmap->res;
+ struct page *first_page;
unsigned long pfn;
int nid;
put_page(pfn_to_page(pfn));
dev_pagemap_cleanup(pgmap);
+ /* make sure to access a memmap that was actually initialized */
+ first_page = pfn_to_page(pfn_first(pgmap));
+
/* pages are dead and unused, undo the arch mapping */
- nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+ nid = page_to_nid(first_page);
mem_hotplug_begin();
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- pfn = PHYS_PFN(res->start);
- __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
- PHYS_PFN(resource_size(res)), NULL);
+ __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+ PHYS_PFN(resource_size(res)), NULL);
} else {
arch_remove_memory(nid, res->start, resource_size(res),
pgmap_altmap(pgmap));
if (page)
goto got_pg;
- if (order >= pageblock_order && (gfp_mask & __GFP_IO)) {
+ if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
+ !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
/*
* If allocating entire pageblock(s) and compaction
* failed because all zones are below low watermarks
* or is prohibited because it recently failed at this
- * order, fail immediately.
+ * order, fail immediately unless the allocator has
+ * requested compaction and reclaim retry.
*
* Reclaim is
* - potentially very expensive because zones are far
#endif
};
+unsigned long page_ext_size = sizeof(struct page_ext);
+
static unsigned long total_usage;
-static unsigned long extra_mem;
static bool __init invoke_need_callbacks(void)
{
for (i = 0; i < entries; i++) {
if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
- page_ext_ops[i]->offset = sizeof(struct page_ext) +
- extra_mem;
- extra_mem += page_ext_ops[i]->size;
+ page_ext_ops[i]->offset = page_ext_size;
+ page_ext_size += page_ext_ops[i]->size;
need = true;
}
}
}
}
-static unsigned long get_entry_size(void)
-{
- return sizeof(struct page_ext) + extra_mem;
-}
-
static inline struct page_ext *get_entry(void *base, unsigned long index)
{
- return base + get_entry_size() * index;
+ return base + page_ext_size * index;
}
#if !defined(CONFIG_SPARSEMEM)
!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
nr_pages += MAX_ORDER_NR_PAGES;
- table_size = get_entry_size() * nr_pages;
+ table_size = page_ext_size * nr_pages;
base = memblock_alloc_try_nid(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
if (section->page_ext)
return 0;
- table_size = get_entry_size() * PAGES_PER_SECTION;
+ table_size = page_ext_size * PAGES_PER_SECTION;
base = alloc_page_ext(table_size, nid);
/*
* we need to apply a mask.
*/
pfn &= PAGE_SECTION_MASK;
- section->page_ext = (void *)base - get_entry_size() * pfn;
+ section->page_ext = (void *)base - page_ext_size * pfn;
total_usage += table_size;
return 0;
}
struct page *page = virt_to_page(addr);
size_t table_size;
- table_size = get_entry_size() * PAGES_PER_SECTION;
+ table_size = page_ext_size * PAGES_PER_SECTION;
BUG_ON(PageReserved(page));
kmemleak_free(addr);
short last_migrate_reason;
gfp_t gfp_mask;
depot_stack_handle_t handle;
-#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t free_handle;
-#endif
};
-static bool page_owner_disabled = true;
+static bool page_owner_enabled = false;
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
static depot_stack_handle_t dummy_handle;
return -EINVAL;
if (strcmp(buf, "on") == 0)
- page_owner_disabled = false;
+ page_owner_enabled = true;
return 0;
}
static bool need_page_owner(void)
{
- if (page_owner_disabled)
- return false;
-
- return true;
+ return page_owner_enabled;
}
static __always_inline depot_stack_handle_t create_dummy_stack(void)
static void init_page_owner(void)
{
- if (page_owner_disabled)
+ if (!page_owner_enabled)
return;
register_dummy_stack();
{
int i;
struct page_ext *page_ext;
-#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t handle = 0;
struct page_owner *page_owner;
- if (debug_pagealloc_enabled())
- handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
-#endif
+ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
for (i = 0; i < (1 << order); i++) {
- page_ext = lookup_page_ext(page + i);
- if (unlikely(!page_ext))
- continue;
- __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (debug_pagealloc_enabled()) {
- page_owner = get_page_owner(page_ext);
- page_owner->free_handle = handle;
- }
-#endif
+ __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+ page_owner = get_page_owner(page_ext);
+ page_owner->free_handle = handle;
+ page_ext = page_ext_next(page_ext);
}
}
page_owner->gfp_mask = gfp_mask;
page_owner->last_migrate_reason = -1;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
- page_ext = lookup_page_ext(page + i);
+ page_ext = page_ext_next(page_ext);
}
}
if (unlikely(!page_ext))
return;
- page_owner = get_page_owner(page_ext);
- page_owner->order = 0;
- for (i = 1; i < (1 << order); i++) {
- page_ext = lookup_page_ext(page + i);
+ for (i = 0; i < (1 << order); i++) {
page_owner = get_page_owner(page_ext);
page_owner->order = 0;
+ page_ext = page_ext_next(page_ext);
}
}
* the new page, which will be freed.
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
* not matter as the mixed block count will still be correct
*/
for (; pfn < end_pfn; ) {
- if (!pfn_valid(pfn)) {
+ page = pfn_to_online_page(pfn);
+ if (!page) {
pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
continue;
}
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);
- page = pfn_to_page(pfn);
pageblock_mt = get_pageblock_migratetype(page);
for (; pfn < block_end_pfn; pfn++) {
if (!pfn_valid_within(pfn))
continue;
+ /* The pageblock is online, no need to recheck. */
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
if (unlikely(!page_ext))
continue;
- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
return;
}
- if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
pr_alert("page_owner tracks the page as allocated\n");
else
pr_alert("page_owner tracks the page as freed\n");
stack_trace_print(entries, nr_entries, 0);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
handle = READ_ONCE(page_owner->free_handle);
if (!handle) {
pr_alert("page_owner free stack trace missing\n");
pr_alert("page last free stack trace:\n");
stack_trace_print(entries, nr_entries, 0);
}
-#endif
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
* Although we do have the info about past allocation of free
* pages, it's not relevant for current memory usage.
*/
- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
/**
* __ksize -- Uninstrumented ksize.
+ * @objp: pointer to the object
*
* Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
* safety checks as ksize() with KASAN instrumentation enabled.
+ *
+ * Return: size of the actual memory used by @objp in bytes
*/
size_t __ksize(const void *objp)
{
static void destroy_memcg_params(struct kmem_cache *s)
{
- if (is_root_cache(s))
+ if (is_root_cache(s)) {
kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
- else
+ } else {
+ mem_cgroup_put(s->memcg_params.memcg);
+ WRITE_ONCE(s->memcg_params.memcg, NULL);
percpu_ref_exit(&s->memcg_params.refcnt);
+ }
}
static void free_memcg_params(struct rcu_head *rcu)
} else {
list_del(&s->memcg_params.children_node);
list_del(&s->memcg_params.kmem_caches_node);
- mem_cgroup_put(s->memcg_params.memcg);
- WRITE_ONCE(s->memcg_params.memcg, NULL);
}
}
#else
return p;
}
+/*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+ void *obj)
+{
+ if (unlikely(slab_want_init_on_free(s)) && obj)
+ memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+}
+
/*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call
prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH);
}
- /*
- * If the object has been wiped upon free, make sure it's fully
- * initialized by zeroing out freelist pointer.
- */
- if (unlikely(slab_want_init_on_free(s)) && object)
- memset(object + s->offset, 0, sizeof(void *));
+
+ maybe_wipe_obj_freeptr(s, object);
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
goto error;
c = this_cpu_ptr(s->cpu_slab);
+ maybe_wipe_obj_freeptr(s, p[i]);
+
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
p[i] = object;
+ maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
local_irq_enable();
}
}
- get_online_mems();
+ /*
+ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+ * already held which will conflict with an existing lock order:
+ *
+ * mem_hotplug_lock->slab_mutex->kernfs_mutex
+ *
+ * We don't really need mem_hotplug_lock (to hold off
+ * slab_mem_going_offline_callback) here because slab's memory hot
+ * unplug code doesn't destroy the kmem_cache->node[] data.
+ */
+
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
struct kmem_cache_node *n;
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
- put_online_mems();
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
unlock_page(page);
continue;
}
+
+ /* Take a pin outside pagevec */
+ get_page(page);
+
+ /*
+ * Drop extra pins before trying to invalidate
+ * the huge page.
+ */
+ pagevec_remove_exceptionals(&pvec);
+ pagevec_release(&pvec);
}
ret = invalidate_inode_page(page);
*/
if (!ret)
deactivate_file_page(page);
+ if (PageTransHuge(page))
+ put_page(page);
count += ret;
}
pagevec_remove_exceptionals(&pvec);
*/
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
{
- unsigned long lru_size;
+ unsigned long lru_size = 0;
int zid;
- if (!mem_cgroup_disabled())
- lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
- else
+ if (!mem_cgroup_disabled()) {
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+ } else
lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
* Note that if SetPageDirty is always performed via set_page_dirty,
* and thus under the i_pages lock, then this ordering is not required.
*/
- if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
- refcount = 1 + HPAGE_PMD_NR;
- else
- refcount = 2;
+ refcount = 1 + compound_nr(page);
if (!page_ref_freeze(page, refcount))
goto cannot_free;
/* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
unsigned int hlen, ll_rs, mtu;
+ ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
struct iphdr *iph;
int err;
if (iter.frag)
ip_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
goto blackhole;
}
+ skb2->tstamp = tstamp;
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
skb->len += copied;
skb->truesize += truesize;
if (sk && sk->sk_type == SOCK_STREAM) {
- sk->sk_wmem_queued += truesize;
+ sk_wmem_queued_add(sk, truesize);
sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
case SO_RCVBUF:
val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_SNDBUF:
val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ WRITE_ONCE(sk->sk_sndbuf,
+ max_t(int, val * 2, SOCK_MIN_SNDBUF));
break;
case SO_MAX_PACING_RATE: /* 32bit version */
if (val != ~0U)
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_MARK:
if (sk->sk_mark != val) {
return __peernet2id_alloc(net, peer, &no);
}
-static void rtnl_net_notifyid(struct net *net, int cmd, int id);
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+ struct nlmsghdr *nlh);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
- rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
if (alive)
put_net(peer);
return id;
idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
- rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
if (tmp == last)
break;
}
err = alloc_netid(net, peer, nsid);
spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
- rtnl_net_notifyid(net, RTM_NEWNSID, err);
+ rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+ nlh);
err = 0;
} else if (err == -ENOSPC && nsid >= 0) {
err = -EEXIST;
return err < 0 ? err : skb->len;
}
-static void rtnl_net_notifyid(struct net *net, int cmd, int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+ struct nlmsghdr *nlh)
{
struct net_fill_args fillargs = {
+ .portid = portid,
+ .seq = nlh ? nlh->nlmsg_seq : 0,
.cmd = cmd,
.nsid = id,
};
if (err < 0)
goto err_out;
- rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+ rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
return;
err_out:
fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
- tcp_sk(sk)->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
spin_lock_bh(&fastopenq->lock);
fastopenq->qlen--;
tcp_rsk(req)->tfo_listener = false;
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned int)sk->sk_rcvbuf)
+ (unsigned int)READ_ONCE(sk->sk_rcvbuf))
return -ENOMEM;
skb_orphan(skb);
* @skb: buffer
* @mpls_lse: MPLS label stack entry to push
* @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
+ * @mac_len: length of the MAC header
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+ int mac_len)
{
struct mpls_shim_hdr *lse;
int err;
return err;
if (!skb->inner_protocol) {
- skb_set_inner_network_header(skb, skb->mac_len);
+ skb_set_inner_network_header(skb, mac_len);
skb_set_inner_protocol(skb, skb->protocol);
}
skb_push(skb, MPLS_HLEN);
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
- skb->mac_len);
+ mac_len);
skb_reset_mac_header(skb);
- skb_set_network_header(skb, skb->mac_len);
+ skb_set_network_header(skb, mac_len);
lse = mpls_hdr(skb);
lse->label_stack_entry = mpls_lse;
*
* @skb: buffer
* @next_proto: ethertype of header after popped MPLS header
+ * @mac_len: length of the MAC header
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
{
int err;
if (unlikely(!eth_p_mpls(skb->protocol)))
- return -EINVAL;
+ return 0;
- err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+ err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
- skb->mac_len);
+ mac_len);
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
- skb_set_network_header(skb, skb->mac_len);
+ skb_set_network_header(skb, mac_len);
if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
struct ethhdr *hdr;
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+ } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
*/
val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ WRITE_ONCE(sk->sk_sndbuf,
+ max_t(int, val * 2, SOCK_MIN_SNDBUF));
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_RCVBUFFORCE:
if (sock->ops->set_rcvlowat)
ret = sock->ops->set_rcvlowat(sk, val);
else
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_RCVTIMEO_OLD:
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority)
{
- if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ if (force ||
+ refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
struct sk_buff *skb = alloc_skb(size, priority);
+
if (skb) {
skb_set_owner_w(skb, sk);
return skb;
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+ if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+ if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
break;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
} else {
unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
- if (memory_pressure && *memory_pressure)
- *memory_pressure = 0;
+ if (memory_pressure && READ_ONCE(*memory_pressure))
+ WRITE_ONCE(*memory_pressure, 0);
}
}
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
- mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+ mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
- mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+ mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
- mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
}
dst->index = index;
INIT_LIST_HEAD(&dst->list);
- list_add_tail(&dsa_tree_list, &dst->list);
+ list_add_tail(&dst->list, &dsa_tree_list);
kref_init(&dst->refcount);
percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
- BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+ BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
* Also to satisfy an assertion in
* tcp_v4_destroy_sock().
*/
- tcp_sk(child)->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
}
inet_csk_destroy_sock(child);
}
req->sk = child;
req->dl_next = NULL;
if (queue->rskq_accept_head == NULL)
- queue->rskq_accept_head = req;
+ WRITE_ONCE(queue->rskq_accept_head, req);
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
struct inet_diag_meminfo minfo = {
.idiag_rmem = sk_rmem_alloc_get(sk),
- .idiag_wmem = sk->sk_wmem_queued,
+ .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
.idiag_fmem = sk->sk_forward_alloc,
.idiag_tmem = sk_wmem_alloc_get(sk),
};
struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter;
+ ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
int err = 0;
ip_fraglist_prepare(skb, &iter);
}
+ skb->tstamp = tstamp;
err = output(net, sk, skb);
if (!err)
/*
* Put this fragment into the sending queue.
*/
+ skb2->tstamp = tstamp;
err = output(net, sk, skb2);
if (err)
goto fail;
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
if (orig) {
- dst_dev_put(&orig->dst);
+ rt_add_uncached_list(orig);
dst_release(&orig->dst);
}
} else {
int orig_oif = fl4->flowi4_oif;
unsigned int flags = 0;
struct rtable *rth;
- int err = -ENETUNREACH;
+ int err;
if (fl4->saddr) {
- rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
- ipv4_is_zeronet(fl4->saddr))
+ ipv4_is_zeronet(fl4->saddr)) {
+ rth = ERR_PTR(-EINVAL);
goto out;
+ }
+
+ rth = ERR_PTR(-ENETUNREACH);
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
{
unsigned long val;
- if (tcp_memory_pressure)
+ if (READ_ONCE(tcp_memory_pressure))
return;
val = jiffies;
{
unsigned long val;
- if (!tcp_memory_pressure)
+ if (!READ_ONCE(tcp_memory_pressure))
return;
val = xchg(&tcp_memory_pressure, 0);
if (val)
icsk->icsk_sync_mss = tcp_sync_mss;
- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
- sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
+ WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+ WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_sockets_allocated_inc(sk);
sk->sk_route_forced_caps = NETIF_F_GSO;
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
int target, struct sock *sk)
{
- return (tp->rcv_nxt - tp->copied_seq >= target) ||
+ return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
(sk->sk_prot->stream_memory_read ?
sk->sk_prot->stream_memory_read(sk) : false);
}
/* Connected or passive Fast Open socket? */
if (state != TCP_SYN_SENT &&
- (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+ (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
- if (tp->urg_seq == tp->copied_seq &&
+ if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->urg_data)
target++;
unlock_sock_fast(sk, slow);
break;
case SIOCATMARK:
- answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
+ answ = tp->urg_data &&
+ READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
break;
case SIOCOUTQ:
if (sk->sk_state == TCP_LISTEN)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_una;
+ answ = READ_ONCE(tp->write_seq) - tp->snd_una;
break;
case SIOCOUTQNSD:
if (sk->sk_state == TCP_LISTEN)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_nxt;
+ answ = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
break;
default:
return -ENOIOCTLCMD;
tcb->sacked = 0;
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->sk_wmem_queued += copy;
+ sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
skb->ip_summed = CHECKSUM_PARTIAL;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
sk_eat_skb(sk, skb);
if (!desc->count)
break;
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
}
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
else
cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
val = min(val, cap);
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
/* Check if we need to signal EPOLLIN right now */
tcp_data_ready(sk);
val <<= 1;
if (val > sk->sk_rcvbuf) {
- sk->sk_rcvbuf = val;
+ WRITE_ONCE(sk->sk_rcvbuf, val);
tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
}
return 0;
out:
up_read(¤t->mm->mmap_sem);
if (length) {
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
if (urg_offset < used) {
if (!urg_offset) {
if (!sock_flag(sk, SOCK_URGINLINE)) {
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
urg_hole++;
offset++;
used--;
}
}
- *seq += used;
+ WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
found_fin_ok:
/* Process the FIN. */
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb);
break;
}
if (sk->sk_state == TCP_CLOSE) {
- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+ struct request_sock *req;
+
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ lockdep_sock_is_held(sk));
/* We could get here with a non-NULL req if the socket is
* aborted (e.g., closed with unread data) before 3WHS
* finishes.
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
+ u32 seq;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
__kfree_skb(sk->sk_rx_skb_cache);
sk->sk_rx_skb_cache = NULL;
}
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
tp->srtt_us = 0;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
tp->rcv_rtt_last_tsecr = 0;
- tp->write_seq += tp->max_window + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+
+ seq = tp->write_seq + tp->max_window + 2;
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
+
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
if (sk->sk_state != TCP_CLOSE)
err = -EPERM;
else if (tp->repair_queue == TCP_SEND_QUEUE)
- tp->write_seq = val;
+ WRITE_ONCE(tp->write_seq, val);
else if (tp->repair_queue == TCP_RECV_QUEUE)
- tp->rcv_nxt = val;
+ WRITE_ONCE(tp->rcv_nxt, val);
else
err = -EINVAL;
break;
void tcp_done(struct sock *sk)
{
- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+ struct request_sock *req;
+
+ /* We might be called with a new socket, after
+ * inet_csk_prepare_forced_close() has been called
+ * so we can not use lockdep_sock_is_held(sk)
+ */
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
- r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
- r->idiag_wqueue = tp->write_seq - tp->snd_una;
+ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
+ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
}
if (info)
tcp_get_info(sk, info);
*/
tp = tcp_sk(child);
- tp->fastopen_rsk = req;
+ rcu_assign_pointer(tp->fastopen_rsk, req);
tcp_rsk(req)->tfo_listener = true;
/* RFC1323: The window in SYN & SYN/ACK segments is never
sndmem *= nr_segs * per_mss;
if (sk->sk_sndbuf < sndmem)
- sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
+ WRITE_ONCE(sk->sk_sndbuf,
+ min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_under_memory_pressure(sk) &&
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
- sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
- net->ipv4.sysctl_tcp_rmem[2]);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ min(atomic_read(&sk->sk_rmem_alloc),
+ net->ipv4.sysctl_tcp_rmem[2]));
}
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
rcvbuf = min_t(u64, rcvwin * rcvmem,
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
- sk->sk_rcvbuf = rcvbuf;
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make the window clamp follow along. */
tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
struct tcp_sock *tp = tcp_sk(sk);
bool recovered = !before(tp->snd_una, tp->high_seq);
- if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
+ if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
tcp_try_undo_loss(sk, false))
return;
/* If the retrans timer is currently being used by Fast Open
* for SYN-ACK retrans purpose, stay put.
*/
- if (tp->fastopen_rsk)
+ if (rcu_access_pointer(tp->fastopen_rsk))
return;
if (!tp->packets_out) {
sock_owned_by_me((struct sock *)tp);
tp->bytes_received += delta;
- tp->rcv_nxt = seq;
+ WRITE_ONCE(tp->rcv_nxt, seq);
}
/* Update our send window.
}
tp->urg_data = TCP_URG_NOTYET;
- tp->urg_seq = ptr;
+ WRITE_ONCE(tp->urg_seq, ptr);
/* Disable header prediction. */
tp->pred_flags = 0;
/* Ok.. it's good. Set up sequence numbers and
* move to established.
*/
- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
/* Remember, tcp_poll() does not lock socket!
* Change state from SYN-SENT only after copied_seq
* is initialized. */
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
smc_check_reset_syn(tp);
tp->tcp_header_len = sizeof(struct tcphdr);
}
- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
+ struct request_sock *req;
+
tcp_try_undo_loss(sk, false);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
* we no longer need req so release it.
*/
- reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ reqsk_fastopen_remove(sk, req, false);
/* Re-arm the timer because data may have been sent out.
* This is similar to the regular data transmission case
tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0;
- req = tp->fastopen_rsk;
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
if (req) {
bool req_stolen;
tcp_try_undo_spurious_syn(sk);
tp->retrans_stamp = 0;
tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
* without appearing to create any others.
*/
if (likely(!tp->repair)) {
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+ u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
+
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
}
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
if (likely(!tp->repair))
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
inet->inet_dport = usin->sin_port;
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcp_seq(inet->inet_saddr,
- inet->inet_daddr,
- inet->inet_sport,
- usin->sin_port);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcp_seq(inet->inet_saddr,
+ inet->inet_daddr,
+ inet->inet_sport,
+ usin->sin_port));
tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
inet->inet_saddr,
inet->inet_daddr);
icsk = inet_csk(sk);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
- fastopen = tp->fastopen_rsk;
+ fastopen = rcu_dereference(tp->fastopen_rsk);
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+ u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
- BUG_ON(tp->fastopen_rsk);
+ BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
/* Because we don't lock the socket,
* we might find a transient negative value.
*/
- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
i, src, srcp, dest, destp, state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk;
struct tcp_sock *oldtp, *newtp;
+ u32 seq;
if (!newsk)
return NULL;
/* Now setup tcp_sock */
newtp->pred_flags = 0;
- newtp->rcv_wup = newtp->copied_seq =
- newtp->rcv_nxt = treq->rcv_isn + 1;
+ seq = treq->rcv_isn + 1;
+ newtp->rcv_wup = seq;
+ WRITE_ONCE(newtp->copied_seq, seq);
+ WRITE_ONCE(newtp->rcv_nxt, seq);
newtp->segs_in = 1;
- newtp->snd_sml = newtp->snd_una =
- newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+ seq = treq->snt_isn + 1;
+ newtp->snd_sml = newtp->snd_una = seq;
+ WRITE_ONCE(newtp->snd_nxt, seq);
+ newtp->snd_up = seq;
INIT_LIST_HEAD(&newtp->tsq_node);
INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
newtp->total_retrans = req->num_retrans;
tcp_init_xmit_timers(newsk);
- newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+ WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_req = NULL;
- newtp->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
- tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, &sk->sk_write_queue);
tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
- tp->write_seq = TCP_SKB_CB(skb)->end_seq;
+ WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
}
return -ENOMEM; /* We'll just try again later. */
skb_copy_decrypted(buff, skb);
- sk->sk_wmem_queued += buff->truesize;
+ sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
nlen = skb->len - len - nsize;
buff->truesize += nlen;
if (delta_truesize) {
skb->truesize -= delta_truesize;
- sk->sk_wmem_queued -= delta_truesize;
+ sk_wmem_queued_add(sk, -delta_truesize);
sk_mem_uncharge(sk, delta_truesize);
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
}
return -ENOMEM;
skb_copy_decrypted(buff, skb);
- sk->sk_wmem_queued += buff->truesize;
+ sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
buff->truesize += nlen;
skb->truesize -= nlen;
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb)
return -1;
- sk->sk_wmem_queued += nskb->truesize;
+ sk_wmem_queued_add(sk, nskb->truesize);
sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk);
/* Don't do any loss probe on a Fast Open connection before 3WHS
* finishes.
*/
- if (tp->fastopen_rsk)
+ if (rcu_access_pointer(tp->fastopen_rsk))
return false;
early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
* if FIN had been sent. This is because retransmit path
* does not change tp->snd_nxt.
*/
- tp->snd_nxt++;
+ WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
return;
}
} else {
tcp_rtx_queue_unlink_and_free(skb, sk);
__skb_header_release(nskb);
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
- sk->sk_wmem_queued += nskb->truesize;
+ sk_wmem_queued_add(sk, nskb->truesize);
sk_mem_charge(sk, nskb->truesize);
skb = nskb;
}
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
- tp->snd_nxt = tp->write_seq;
+ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
inet_csk(sk)->icsk_retransmits = 0;
tcb->end_seq += skb->len;
__skb_header_release(skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
- tp->write_seq = tcb->end_seq;
+ WRITE_ONCE(tp->write_seq, tcb->end_seq);
tp->packets_out += tcp_skb_pcount(skb);
}
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
*/
- tp->snd_nxt = tp->write_seq;
+ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
tp->pushed_seq = tp->write_seq;
buff = tcp_send_head(sk);
if (unlikely(buff)) {
- tp->snd_nxt = TCP_SKB_CB(buff)->seq;
+ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
tp->pushed_seq = TCP_SKB_CB(buff)->seq;
}
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
* Timer for Fast Open socket to retransmit SYNACK. Note that the
* sk here is the child socket, not the parent (listener) socket.
*/
-static void tcp_fastopen_synack_timer(struct sock *sk)
+static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = icsk->icsk_syn_retries ? :
sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
struct tcp_sock *tp = tcp_sk(sk);
- struct request_sock *req;
- req = tcp_sk(sk)->fastopen_rsk;
req->rsk_ops->syn_ack_timeout(req);
if (req->num_timeout >= max_retries) {
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct request_sock *req;
- if (tp->fastopen_rsk) {
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ if (req) {
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
- tcp_fastopen_synack_timer(sk);
+ tcp_fastopen_synack_timer(sk, req);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).
*/
{
ether_setup(dev);
+ dev->max_mtu = 0;
dev->netdev_ops = &ip6erspan_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
inet6_sk(skb->sk) : NULL;
struct ip6_frag_state state;
unsigned int mtu, hlen, nexthdr_offset;
+ ktime_t tstamp = skb->tstamp;
int hroom, err = 0;
__be32 frag_id;
u8 *prevhdr, nexthdr = 0;
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
/*
* Put this fragment into the sending queue.
*/
+ frag->tstamp = tstamp;
err = output(net, sk, frag);
if (err)
goto fail;
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
+ ktime_t tstamp = skb->tstamp;
struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0;
unsigned int mtu, hlen;
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
goto blackhole;
}
+ skb2->tstamp = tstamp;
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
sk->sk_v6_daddr = usin->sin6_addr;
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
- sk->sk_v6_daddr.s6_addr32,
- inet->inet_sport,
- inet->inet_dport);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcpv6_seq(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+ inet->inet_sport,
+ inet->inet_dport));
tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
- fastopen = tp->fastopen_rsk;
+ fastopen = rcu_dereference(tp->fastopen_rsk);
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
/* Because we don't lock the socket,
* we might find a transient negative value.
*/
- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
*
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
+ *
+ * This function always consumes a reference to the skb.
*/
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
{
struct llc_sock* llc = llc_sk(sk);
- int rc = 0;
if (unlikely(llc_data_accept_state(llc->state) ||
llc->remote_busy_flag ||
llc->p_flag)) {
long timeout = sock_sndtimeo(sk, noblock);
+ int rc;
rc = llc_ui_wait_for_busy_core(sk, timeout);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
}
- if (unlikely(!rc))
- rc = llc_build_and_send_pkt(sk, skb);
- return rc;
+ return llc_build_and_send_pkt(sk, skb);
}
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
size_t size = 0;
int rc = -EINVAL, copied = 0, hdrlen;
lock_sock(sk);
if (addr) {
if (msg->msg_namelen < sizeof(*addr))
- goto release;
+ goto out;
} else {
if (llc_ui_addr_null(&llc->addr))
- goto release;
+ goto out;
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
- goto release;
+ goto out;
}
hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
size = hdrlen + len;
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
- goto release;
+ goto out;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (!skb)
- goto release;
+ goto out;
skb->dev = llc->dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_test) {
llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_xid) {
llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
rc = -ENOPROTOOPT;
if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
+ skb = NULL;
out:
- if (rc) {
- kfree_skb(skb);
-release:
+ kfree_skb(skb);
+ if (rc)
dprintk("%s: failed sending from %02X to %02X: %d\n",
__func__, llc->laddr.lsap, llc->daddr.lsap, rc);
- }
release_sock(sk);
return rc ? : copied;
}
llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
#endif
static int llc_find_offset(int state, int ev_type);
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
+static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
* (executing it's actions and changing state), upper layer will be
* indicated or confirmed, if needed. Returns 0 for success, 1 for
* failure. The socket lock has to be held before calling this function.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(skb->sk);
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
- /*
- * We have to hold the skb, because llc_conn_service will kfree it in
- * the sending path and we need to look at the skb->cb, where we encode
- * llc_conn_state_ev.
- */
- skb_get(skb);
ev->ind_prim = ev->cfm_prim = 0;
/*
* Send event to state machine
rc = llc_conn_service(skb->sk, skb);
if (unlikely(rc != 0)) {
printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
- goto out_kfree_skb;
- }
-
- if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
- /* indicate or confirm not required */
- if (!skb->next)
- goto out_kfree_skb;
goto out_skb_put;
}
- if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
- skb_get(skb);
-
switch (ev->ind_prim) {
case LLC_DATA_PRIM:
+ skb_get(skb);
llc_save_primitive(sk, skb, LLC_DATA_PRIM);
if (unlikely(sock_queue_rcv_skb(sk, skb))) {
/*
* skb->sk pointing to the newly created struct sock in
* llc_conn_handler. -acme
*/
+ skb_get(skb);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
break;
sk->sk_state_change(sk);
}
}
- kfree_skb(skb);
sock_put(sk);
break;
case LLC_RESET_PRIM:
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset ind!\n", __func__);
- kfree_skb(skb);
break;
default:
- if (ev->ind_prim) {
+ if (ev->ind_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->ind_prim);
- kfree_skb(skb);
- }
/* No indication */
break;
}
printk(KERN_INFO "%s: received a reset conf!\n", __func__);
break;
default:
- if (ev->cfm_prim) {
+ if (ev->cfm_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->cfm_prim);
- break;
- }
- goto out_skb_put; /* No confirmation */
+ /* No confirmation */
+ break;
}
-out_kfree_skb:
- kfree_skb(skb);
out_skb_put:
kfree_skb(skb);
return rc;
}
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
- return llc_conn_send_pdus(sk, skb);
+ llc_conn_send_pdus(sk);
}
/**
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
- * @hold_skb: the skb held by caller, or NULL if does not care
*
- * Sends queued pdus to MAC layer for transmission. When @hold_skb is
- * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
- * successfully, or 1 for failure.
+ * Sends queued pdus to MAC layer for transmission.
*/
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
- int ret = 0;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
- dev_queue_xmit(skb2);
- } else {
- bool is_target = skb == hold_skb;
- int rc;
-
- if (is_target)
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- if (is_target)
- ret = rc;
+ skb = skb2;
}
+ dev_queue_xmit(skb);
}
-
- return ret;
}
/**
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
goto drop_unlock;
}
out:
* closed and -EBUSY when sending data is not permitted in this state or
* LLC has send an I pdu with p bit set to 1 and is waiting for it's
* response.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
{
struct llc_sock *llc = llc_sk(sk);
if (unlikely(llc->state == LLC_CONN_STATE_ADM))
- goto out;
+ goto out_free;
rc = -EBUSY;
if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
llc->p_flag)) {
llc->failed_data_req = 1;
- goto out;
+ goto out_free;
}
ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PRIM;
ev->prim = LLC_DATA_PRIM;
ev->prim_type = LLC_PRIM_TYPE_REQ;
skb->dev = llc->dev;
- rc = llc_conn_state_process(sk, skb);
-out:
+ return llc_conn_state_process(sk, skb);
+
+out_free:
+ kfree_skb(skb);
return rc;
}
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
* After executing actions of the event, upper layer will be indicated
* if needed(on receiving an UI frame). sk can be null for the
* datalink_proto case.
+ *
+ * This function always consumes a reference to the skb.
*/
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- /*
- * We have to hold the skb, because llc_sap_next_state
- * will kfree it in the sending path and we need to
- * look at the skb->cb, where we encode llc_sap_state_ev.
- */
- skb_get(skb);
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
- if (ev->ind_cfm_flag == LLC_IND) {
- if (skb->sk->sk_state == TCP_LISTEN)
- kfree_skb(skb);
- else {
- llc_save_primitive(skb->sk, skb, ev->prim);
- /* queue skb to the user. */
- if (sock_queue_rcv_skb(skb->sk, skb))
- kfree_skb(skb);
- }
+ if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
+ llc_save_primitive(skb->sk, skb, ev->prim);
+
+ /* queue skb to the user. */
+ if (sock_queue_rcv_skb(skb->sk, skb) == 0)
+ return;
}
kfree_skb(skb);
}
rcu_read_lock();
ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
- if (WARN_ON_ONCE(ssid == NULL))
+ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
ssid_len = 0;
else
ssid_len = ssid[1];
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- if (!ssidie) {
+ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
kfree(assoc_data);
return -EINVAL;
case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
/* process for all: mesh, mlme, ibss */
break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ if (is_multicast_ether_addr(mgmt->da) &&
+ !is_broadcast_ether_addr(mgmt->da))
+ return RX_DROP_MONITOR;
+
+ /* process only for station/IBSS */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ return RX_DROP_MONITOR;
+ break;
case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
if (is_multicast_ether_addr(mgmt->da) &&
!is_broadcast_ether_addr(mgmt->da))
return 0;
}
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *sdata_iter;
+
+ if (!ieee80211_is_radar_required(local))
+ return true;
+
+ if (!regulatory_pre_cac_allowed(local->hw.wiphy))
+ return false;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata_iter, &local->interfaces, list) {
+ if (sdata_iter->wdev.cac_started) {
+ mutex_unlock(&local->iflist_mtx);
+ return false;
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ return true;
+}
+
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- if (ieee80211_is_radar_required(local))
+ if (!__ieee80211_can_leave_ch(sdata))
return false;
if (!list_empty(&local->roc_list))
lockdep_assert_held(&local->mtx);
- if (local->scan_req || ieee80211_is_radar_required(local))
+ if (local->scan_req)
+ return -EBUSY;
+
+ if (!__ieee80211_can_leave_ch(sdata))
return -EBUSY;
if (!ieee80211_can_scan(local, sdata)) {
if (nf_ct_is_confirmed(ct))
extra_jiffies += nfct_time_stamp;
- if (ct->timeout != extra_jiffies)
- ct->timeout = extra_jiffies;
+ if (READ_ONCE(ct->timeout) != extra_jiffies)
+ WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
if (do_acct)
nf_ct_acct_update(ct, ctinfo, skb->len);
{
int err;
- err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
+ err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
+ skb->mac_len);
if (err)
return err;
{
int err;
- err = skb_mpls_pop(skb, ethertype);
+ err = skb_mpls_pop(skb, ethertype, skb->mac_len);
if (err)
return err;
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
+ const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long ack_lost_at; /* When ACK is figured as lost */
smp_store_release(&b->conn_backlog_head,
(head + 1) & (size - 1));
- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
atomic_read(&conn->usage), here);
}
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
call->state = RXRPC_CALL_SERVER_PREALLOC;
- trace_rxrpc_call(call, rxrpc_call_new_service,
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
atomic_read(&call->usage),
here, (const void *)user_call_ID);
rxrpc_see_call(call);
call->conn = conn;
+ call->security = conn->security;
call->peer = rxrpc_get_peer(conn->params.peer);
call->cong_cwnd = call->peer->cong_cwnd;
return call;
if (p->intr)
__set_bit(RXRPC_CALL_IS_INTR, &call->flags);
call->tx_total_len = p->tx_total_len;
- trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
+ atomic_read(&call->usage),
here, (const void *)p->user_call_ID);
/* We need to protect a partially set up call against the user as we
if (ret < 0)
goto error;
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
+ atomic_read(&call->usage), here, NULL);
rxrpc_start_call_timer(call);
error:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+ atomic_read(&call->usage), here, ERR_PTR(ret));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
if (n == 0)
return false;
if (rxrpc_queue_work(&call->processor))
- trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
+ here, NULL);
else
rxrpc_put_call(call, rxrpc_call_put_noqueue);
return true;
int n = atomic_read(&call->usage);
ASSERTCMP(n, >=, 1);
if (rxrpc_queue_work(&call->processor))
- trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
+ here, NULL);
else
rxrpc_put_call(call, rxrpc_call_put_noqueue);
return true;
if (call) {
int n = atomic_read(&call->usage);
- trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
+ here, NULL);
}
}
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&call->usage);
- trace_rxrpc_call(call, op, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, op, n, here, NULL);
}
/*
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+ trace_rxrpc_call(call->debug_id, rxrpc_call_release,
+ atomic_read(&call->usage),
here, (const void *)call->flags);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- if (conn) {
+ if (conn)
rxrpc_disconnect_call(call);
- conn->security->free_call_crypto(call);
- }
+ if (call->security)
+ call->security->free_call_crypto(call);
rxrpc_cleanup_ring(call);
_leave("");
{
struct rxrpc_net *rxnet = call->rxnet;
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = call->debug_id;
int n;
ASSERT(call != NULL);
n = atomic_dec_return(&call->usage);
- trace_rxrpc_call(call, op, n, here, NULL);
+ trace_rxrpc_call(debug_id, op, n, here, NULL);
ASSERTCMP(n, >=, 0);
if (n == 0) {
_debug("call %d dead", call->debug_id);
rxrpc_get_local(conn->params.local);
key_get(conn->params.key);
- trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
+ atomic_read(&conn->usage),
__builtin_return_address(0));
trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
_leave(" = %p", conn);
if (cp->exclusive) {
call->conn = candidate;
+ call->security = candidate->security;
call->security_ix = candidate->security_ix;
call->service_id = candidate->service_id;
_leave(" = 0 [exclusive %d]", candidate->debug_id);
candidate_published:
set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
call->conn = candidate;
+ call->security = candidate->security;
call->security_ix = candidate->security_ix;
call->service_id = candidate->service_id;
spin_unlock(&local->client_conns_lock);
spin_lock(&conn->channel_lock);
call->conn = conn;
+ call->security = conn->security;
call->security_ix = conn->security_ix;
call->service_id = conn->service_id;
list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
void rxrpc_put_client_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = conn->debug_id;
int n;
do {
n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
if (n > 0)
return;
ASSERTCMP(n, >=, 0);
if (n == 0)
return false;
if (rxrpc_queue_work(&conn->processor))
- trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
else
rxrpc_put_connection(conn);
return true;
if (conn) {
int n = atomic_read(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
}
}
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
}
/*
if (conn) {
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n > 0)
- trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
else
conn = NULL;
}
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = conn->debug_id;
int n;
n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
if (n == 1)
rxrpc_set_service_reap_timer(conn->params.local->rxnet,
*/
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue;
- trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
if (rxrpc_conn_is_client(conn))
BUG();
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
write_unlock(&rxnet->conn_lock);
- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
atomic_read(&conn->usage),
__builtin_return_address(0));
}
{
struct sock_exterr_skb *serr;
struct sockaddr_rxrpc srx;
- struct rxrpc_local *local = sk->sk_user_data;
+ struct rxrpc_local *local;
struct rxrpc_peer *peer;
struct sk_buff *skb;
+ rcu_read_lock();
+ local = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!local)) {
+ rcu_read_unlock();
+ return;
+ }
_enter("%p{%d}", sk, local->debug_id);
/* Clear the outstanding error value on the socket so that it doesn't
skb = sock_dequeue_err_skb(sk);
if (!skb) {
+ rcu_read_unlock();
_leave("UDP socket errqueue empty");
return;
}
serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
+ rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed);
return;
}
- rcu_read_lock();
peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
if (peer) {
atomic_set(&peer->usage, 1);
- peer->local = local;
+ peer->local = rxrpc_get_local(local);
INIT_HLIST_HEAD(&peer->error_targets);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
- peer->local = local;
rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
int n;
n = atomic_inc_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
return peer;
}
if (peer) {
int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
if (n > 0)
- trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
else
peer = NULL;
}
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
+ rxrpc_put_local(peer->local);
kfree_rcu(peer, rcu);
}
void rxrpc_put_peer(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id;
int n;
if (peer) {
+ debug_id = peer->debug_id;
n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
if (n == 0)
__rxrpc_put_peer(peer);
}
void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = peer->debug_id;
int n;
n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
+ rxrpc_put_local(peer->local);
kfree_rcu(peer, rcu);
}
}
seq += subpacket;
}
- return call->conn->security->verify_packet(call, skb, offset, len,
- seq, cksum);
+ return call->security->verify_packet(call, skb, offset, len,
+ seq, cksum);
}
/*
*_offset = offset;
*_len = len;
- call->conn->security->locate_data(call, skb, _offset, _len);
+ call->security->locate_data(call, skb, _offset, _len);
return 0;
}
call->tx_winsize)
sp->hdr.flags |= RXRPC_MORE_PACKETS;
- ret = conn->security->secure_packet(
+ ret = call->security->secure_packet(
call, skb, skb->mark, skb->head);
if (ret < 0)
goto out;
case RXRPC_CALL_SERVER_PREALLOC:
case RXRPC_CALL_SERVER_SECURING:
case RXRPC_CALL_SERVER_ACCEPTING:
+ rxrpc_put_call(call, rxrpc_call_put);
ret = -EBUSY;
goto error_release_sock;
default:
}
static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
- [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
- .len = IFNAMSIZ - 1 },
+ [TCA_ACT_KIND] = { .type = NLA_STRING },
[TCA_ACT_INDEX] = { .type = NLA_U32 },
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
NL_SET_ERR_MSG(extack, "TC action kind must be specified");
goto err_out;
}
- nla_strlcpy(act_name, kind, IFNAMSIZ);
-
+ if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+ NL_SET_ERR_MSG(extack, "TC action name too long");
+ goto err_out;
+ }
if (tb[TCA_ACT_COOKIE]) {
cookie = nla_memdup_cookie(tb);
if (!cookie) {
struct netlink_ext_ack *extack)
{
size_t attr_size = 0;
- int ret = 0;
+ int loop, ret;
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
- &attr_size, true, extack);
+ for (loop = 0; loop < 10; loop++) {
+ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+ actions, &attr_size, true, extack);
+ if (ret != -EAGAIN)
+ break;
+ }
+
if (ret < 0)
return ret;
ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
*/
if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
-replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
extack);
- if (ret == -EAGAIN)
- goto replay;
break;
case RTM_DELACTION:
ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
return err;
pr_info("Mirror/redirect action on\n");
- return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ if (err)
+ unregister_netdevice_notifier(&mirred_device_notifier);
+
+ return err;
}
static void __exit mirred_cleanup_module(void)
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
__be32 new_lse;
- int ret;
+ int ret, mac_len;
tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
/* Ensure 'data' points at mac_header prior calling mpls manipulating
* functions.
*/
- if (skb_at_tc_ingress(skb))
+ if (skb_at_tc_ingress(skb)) {
skb_push_rcsum(skb, skb->mac_len);
+ mac_len = skb->mac_len;
+ } else {
+ mac_len = skb_network_header(skb) - skb_mac_header(skb);
+ }
ret = READ_ONCE(m->tcf_action);
switch (p->tcfm_action) {
case TCA_MPLS_ACT_POP:
- if (skb_mpls_pop(skb, p->tcfm_proto))
+ if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
goto drop;
break;
case TCA_MPLS_ACT_PUSH:
new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
- if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
+ if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
goto drop;
break;
case TCA_MPLS_ACT_MODIFY:
return TC_H_MAJ(first);
}
+static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
+{
+ if (kind)
+ return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
+ memset(name, 0, IFNAMSIZ);
+ return false;
+}
+
static bool tcf_proto_is_unlocked(const char *kind)
{
const struct tcf_proto_ops *ops;
bool ret;
+ if (strlen(kind) == 0)
+ return false;
+
ops = tcf_proto_lookup_ops(kind, false, NULL);
/* On error return false to take rtnl lock. Proto lookup/create
* functions will perform lookup again and properly handle errors.
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
+
/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
* block is shared (no qdisc found), qdisc is not unlocked, classifier
* type is not specified, classifier is not unlocked.
*/
if (rtnl_held ||
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
* found), qdisc is not unlocked, classifier type is not specified,
* classifier is not unlocked.
*/
if (!prio ||
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
* unlocked, classifier type is not specified, classifier is not
* unlocked.
*/
if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
*err = -1;
return;
}
- dst->value = sk->sk_wmem_queued;
+ dst->value = READ_ONCE(sk->sk_wmem_queued);
}
META_COLLECTOR(int_sk_fwd_alloc)
*err = -1;
return;
}
- dst->value = sk->sk_rcvlowat;
+ dst->value = READ_ONCE(sk->sk_rcvlowat);
}
META_COLLECTOR(int_sk_rcvtimeo)
}
const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
- [TCA_KIND] = { .type = NLA_NUL_STRING,
- .len = IFNAMSIZ - 1 },
+ [TCA_KIND] = { .type = NLA_STRING },
[TCA_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct tc_estimator) },
[TCA_STAB] = { .type = NLA_NESTED },
parent = *p;
skb = rb_to_skb(parent);
- if (ktime_after(txtime, skb->tstamp)) {
+ if (ktime_compare(txtime, skb->tstamp) >= 0) {
p = &parent->rb_right;
leftmost = false;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb)) {
qdisc_drop(skb, sch, to_free);
+ skb = NULL;
goto finish_segs;
}
finish_segs:
if (segs) {
unsigned int len, last_len;
- int nb = 0;
+ int nb;
- len = skb->len;
+ len = skb ? skb->len : 0;
+ nb = skb ? 1 : 0;
while (segs) {
skb2 = segs->next;
}
segs = skb2;
}
- qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
+ /* Parent qdiscs accounted for 1 skb of size @prev_len */
+ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+ } else if (!skb) {
+ return NET_XMIT_DROP;
}
return NET_XMIT_SUCCESS;
}
NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
goto out;
}
+
+ /* Everything went ok, return success. */
+ err = 0;
+
out:
return err;
}
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
bh_lock_sock(sk);
}
- if (sock_owned_by_user(sk)) {
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
if (sctp_add_backlog(sk, skb)) {
bh_unlock_sock(sk);
sctp_chunk_free(chunk);
local_bh_disable();
bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sctp_chunk_free(chunk);
else
backloged = 1;
if (backloged)
return 0;
} else {
- sctp_inq_push(inqueue, chunk);
+ if (!sctp_newsk_ready(sk)) {
+ if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ return 0;
+ sctp_chunk_free(chunk);
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
}
done:
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
- ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+ ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp_sock),
.useroffset = offsetof(struct sctp_sock, subscribe),
.usersize = offsetof(struct sctp_sock, initmsg) -
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp6_sock),
.useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
.usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
if (!lgr) {
rc = SMC_CLC_DECL_MEM;
- goto out;
+ goto ism_put_vlan;
}
lgr->is_smcd = ini->is_smcd;
lgr->sync_err = 0;
smc_llc_link_clear(lnk);
free_lgr:
kfree(lgr);
+ism_put_vlan:
+ if (ini->is_smcd && ini->vlan_id)
+ smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
out:
if (rc < 0) {
if (rc == -ENOMEM)
rc = sk_wait_event(sk, timeo,
sk->sk_err ||
sk->sk_shutdown & RCV_SHUTDOWN ||
- fcrit(conn) ||
- smc_cdc_rxed_any_close_or_senddone(conn),
+ fcrit(conn),
&wait);
remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return -EAGAIN;
}
+static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+{
+ struct smc_connection *conn = &smc->conn;
+
+ if (smc_rx_data_available(conn))
+ return true;
+ else if (conn->urg_state == SMC_URG_VALID)
+ /* we received a single urgent Byte - skip */
+ smc_rx_update_cons(smc, 0);
+ return false;
+}
+
/* smc_rx_recvmsg - receive data from RMBE
* @msg: copy data to receive buffer
* @pipe: copy data to pipe if set - indicates splice() call
if (read_done >= target || (pipe && read_done))
break;
- if (atomic_read(&conn->bytes_to_rcv))
+ if (smc_rx_recvmsg_data_available(smc))
goto copy;
- else if (conn->urg_state == SMC_URG_VALID)
- /* we received a single urgent Byte - skip */
- smc_rx_update_cons(smc, 0);
if (sk->sk_shutdown & RCV_SHUTDOWN ||
- smc_cdc_rxed_any_close_or_senddone(conn) ||
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
+ /* smc_cdc_msg_recv_action() could have run after
+ * above smc_rx_recvmsg_data_available()
+ */
+ if (smc_rx_recvmsg_data_available(smc))
+ goto copy;
break;
+ }
if (read_done) {
if (sk->sk_err ||
struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_in_group(hdr)))
- return sk->sk_rcvbuf;
+ return READ_ONCE(sk->sk_rcvbuf);
if (unlikely(!msg_connected(hdr)))
- return sk->sk_rcvbuf << msg_importance(hdr);
+ return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
- return sk->sk_rcvbuf;
+ return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM;
}
i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
- i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
+ i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
if (dqueues & TIPC_DUMP_SK_SNDQ) {
i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
return virtio_transport_get_ops()->send_pkt(pkt);
}
-static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
struct virtio_vsock_pkt *pkt)
{
+ if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
+ return false;
+
vvs->rx_bytes += pkt->len;
+ return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
vvs->buf_size_max = val;
vvs->buf_size = val;
vvs->buf_alloc = val;
+
+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+ NULL);
}
EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
struct virtio_vsock_pkt *pkt)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- bool free_pkt = false;
+ bool can_enqueue, free_pkt = false;
pkt->len = le32_to_cpu(pkt->hdr.len);
pkt->off = 0;
spin_lock_bh(&vvs->rx_lock);
- virtio_transport_inc_rx_pkt(vvs, pkt);
+ can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
+ if (!can_enqueue) {
+ free_pkt = true;
+ goto out;
+ }
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_GET_FTM_RESPONDER_STATS);
if (!hdr)
- return -ENOBUFS;
+ goto nla_put_failure;
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
return pre_cac_allowed;
}
+EXPORT_SYMBOL(regulatory_pre_cac_allowed);
void regulatory_propagate_dfs_state(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
*/
#define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
-/**
- * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
- * @wiphy: wiphy for which pre-CAC capability is checked.
-
- * Pre-CAC is allowed only in ETSI domain.
- */
-bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
-
/**
* regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
* @wiphy - wiphy on which radar is detected and the event will be propagated
static void
cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
struct cfg80211_bss *nontrans_bss,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+ struct ieee80211_mgmt *mgmt, size_t len)
{
u8 *ie, *new_ie, *pos;
const u8 *nontrans_ssid, *trans_ssid, *mbssid;
const struct cfg80211_bss_ies *old;
u8 cpy_len;
+ lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+
ie = mgmt->u.probe_resp.variable;
new_ie_len = ielen;
if (!mbssid || mbssid < trans_ssid)
return;
new_ie_len -= mbssid[1];
- rcu_read_lock();
+
nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
- if (!nontrans_ssid) {
- rcu_read_unlock();
+ if (!nontrans_ssid)
return;
- }
+
new_ie_len += nontrans_ssid[1];
- rcu_read_unlock();
/* generate new ie for nontrans BSS
* 1. replace SSID with nontrans BSS' SSID
* 2. skip MBSSID IE
*/
- new_ie = kzalloc(new_ie_len, gfp);
+ new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
if (!new_ie)
return;
- new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+
+ new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
if (!new_ies)
goto out_free;
cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
&non_tx_data, gfp);
+ spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+
/* check if the res has other nontransmitting bss which is not
* in MBSSID IE
*/
ies2 = rcu_access_pointer(tmp_bss->ies);
if (ies2->tsf < ies1->tsf)
cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
- mgmt, len, gfp);
+ mgmt, len);
}
+ spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
return res;
}
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int ret = 0;
/* call only for station! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
if (ie) {
data->flags = 1;
data->length = ie[1];
- memcpy(ssid, ie + 2, data->length);
+ if (data->length > IW_ESSID_MAX_SIZE)
+ ret = -EINVAL;
+ else
+ memcpy(ssid, ie + 2, data->length);
}
rcu_read_unlock();
} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
}
wdev_unlock(wdev);
- return 0;
+ return ret;
}
int cfg80211_mgd_wext_siwap(struct net_device *dev,
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
- queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+ queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
}
bh_unlock_sock(sk);
sock_put(sk);
}
EXPORT_SYMBOL(xsk_umem_consume_tx);
-static int xsk_zc_xmit(struct sock *sk)
+static int xsk_zc_xmit(struct xdp_sock *xs)
{
- struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev = xs->dev;
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
sock_wfree(skb);
}
-static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
- size_t total_len)
+static int xsk_generic_xmit(struct sock *sk)
{
- u32 max_batch = TX_BATCH_SIZE;
struct xdp_sock *xs = xdp_sk(sk);
+ u32 max_batch = TX_BATCH_SIZE;
bool sent_frame = false;
struct xdp_desc desc;
struct sk_buff *skb;
return err;
}
+static int __xsk_sendmsg(struct sock *sk)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ if (unlikely(!(xs->dev->flags & IFF_UP)))
+ return -ENETDOWN;
+ if (unlikely(!xs->tx))
+ return -ENOBUFS;
+
+ return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
+}
+
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
if (unlikely(!xsk_is_bound(xs)))
return -ENXIO;
- if (unlikely(!(xs->dev->flags & IFF_UP)))
- return -ENETDOWN;
- if (unlikely(!xs->tx))
- return -ENOBUFS;
- if (need_wait)
+ if (unlikely(need_wait))
return -EOPNOTSUPP;
- return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
+ return __xsk_sendmsg(sk);
}
static unsigned int xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
unsigned int mask = datagram_poll(file, sock, wait);
- struct xdp_sock *xs = xdp_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev;
struct xdp_umem *umem;
dev = xs->dev;
umem = xs->umem;
- if (umem->need_wakeup)
- dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
- umem->need_wakeup);
+ if (umem->need_wakeup) {
+ if (dev->netdev_ops->ndo_xsk_wakeup)
+ dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
+ umem->need_wakeup);
+ else
+ /* Poll needs to drive Tx also in copy mode */
+ __xsk_sendmsg(sk);
+ }
if (xs->rx && !xskq_empty_desc(xs->rx))
mask |= POLLIN | POLLRDNORM;
#ifndef __ASM_GOTO_WORKAROUND_H
#define __ASM_GOTO_WORKAROUND_H
-/* this will bring in asm_volatile_goto macro definition
+/*
+ * This will bring in asm_volatile_goto and asm_inline macro definitions
* if enabled by compiler and config options.
*/
#include <linux/types.h>
#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
#endif
+/*
+ * asm_inline is defined as asm __inline in "include/linux/compiler_types.h"
+ * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not
+ * supported by CLANG.
+ */
+#ifdef asm_inline
+#undef asm_inline
+#define asm_inline asm
+#endif
+
#define volatile(x...) volatile("")
#endif
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <linux/perf_event.h>
#include "libbpf.h"
#include "bpf_load.h"
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/// Use devm_platform_ioremap_resource helper which wraps
-/// platform_get_resource() and devm_ioremap_resource() together.
-///
-// Confidence: High
-// Copyright: (C) 2019 Himanshu Jha GPLv2.
-// Copyright: (C) 2019 Julia Lawall, Inria/LIP6. GPLv2.
-// Keywords: platform_get_resource, devm_ioremap_resource,
-// Keywords: devm_platform_ioremap_resource
-
-virtual patch
-virtual report
-
-@r depends on patch && !report@
-expression e1, e2, arg1, arg2, arg3;
-identifier id;
-@@
-
-(
-- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
- ... when != id
-- e1 = devm_ioremap_resource(arg3, id);
-+ e1 = devm_platform_ioremap_resource(arg1, arg2);
- ... when != id
-? id = e2
-
-@r1 depends on patch && !report@
-identifier r.id;
-type T;
-@@
-
-- T *id;
- ...when != id
-
-@r2 depends on report && !patch@
-identifier id;
-expression e1, e2, arg1, arg2, arg3;
-position j0;
-@@
-
-(
- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
- ... when != id
- e1@j0 = devm_ioremap_resource(arg3, id);
- ... when != id
-? id = e2
-
-@script:python depends on report && !patch@
-e1 << r2.e1;
-j0 << r2.j0;
-@@
-
-msg = "WARNING: Use devm_platform_ioremap_resource for %s" % (e1)
-coccilib.report.print_report(j0[0], msg)
from linux import utils
+printk_log_type = utils.CachedType("struct printk_log")
+
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
+ length_offset = printk_log_type.get_type()['len'].bitpos // 8
+ text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
+ time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
+ text_offset = printk_log_type.get_type().sizeof
+
pos = 0
while pos < log_buf.__len__():
- length = utils.read_u16(log_buf[pos + 8:pos + 10])
+ length = utils.read_u16(log_buf, pos + length_offset)
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
pos = log_buf_2nd_half
continue
- text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
- text = log_buf[pos + 16:pos + 16 + text_len].decode(
+ text_len = utils.read_u16(log_buf, pos + text_len_offset)
+ text_start = pos + text_offset
+ text = log_buf[text_start:text_start + text_len].decode(
encoding='utf8', errors='replace')
- time_stamp = utils.read_u64(log_buf[pos:pos + 8])
+ time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
import os
import re
-from linux import modules
+from linux import modules, utils
if hasattr(gdb, 'Breakpoint'):
module_file = self._get_module_file(module_name)
if module_file:
+ if utils.is_target_arch('s390'):
+ # Module text is preceded by PLT stubs on s390.
+ module_arch = module['arch']
+ plt_offset = int(module_arch['plt_offset'])
+ plt_size = int(module_arch['plt_size'])
+ module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
return memoryview(inf.read_memory(start, length))
-def read_u16(buffer):
+def read_u16(buffer, offset):
+ buffer_val = buffer[offset:offset + 2]
value = [0, 0]
- if type(buffer[0]) is str:
- value[0] = ord(buffer[0])
- value[1] = ord(buffer[1])
+ if type(buffer_val[0]) is str:
+ value[0] = ord(buffer_val[0])
+ value[1] = ord(buffer_val[1])
else:
- value[0] = buffer[0]
- value[1] = buffer[1]
+ value[0] = buffer_val[0]
+ value[1] = buffer_val[1]
if get_target_endianness() == LITTLE_ENDIAN:
return value[0] + (value[1] << 8)
return value[1] + (value[0] << 8)
-def read_u32(buffer):
+def read_u32(buffer, offset):
if get_target_endianness() == LITTLE_ENDIAN:
- return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
+ return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16)
else:
- return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
+ return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16)
-def read_u64(buffer):
+def read_u64(buffer, offset):
if get_target_endianness() == LITTLE_ENDIAN:
- return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
+ return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32)
else:
- return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
+ return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
target_arch = None
return export_unknown;
}
-static char *sym_extract_namespace(const char **symname)
+static const char *namespace_from_kstrtabns(struct elf_info *info,
+ Elf_Sym *kstrtabns)
{
- char *namespace = NULL;
- char *ns_separator;
+ char *value = info->ksymtab_strings + kstrtabns->st_value;
+ return value[0] ? value : NULL;
+}
+
+static void sym_update_namespace(const char *symname, const char *namespace)
+{
+ struct symbol *s = find_symbol(symname);
- ns_separator = strchr(*symname, '.');
- if (ns_separator) {
- namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
- *symname = ns_separator + 1;
+ /*
+ * That symbol should have been created earlier and thus this is
+ * actually an assertion.
+ */
+ if (!s) {
+ merror("Could not update namespace(%s) for symbol %s\n",
+ namespace, symname);
+ return;
}
- return namespace;
+ free(s->namespace);
+ s->namespace =
+ namespace && namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
}
/**
* Add an exported symbol - it may have already been added without a
* CRC, in this case just update the CRC
**/
-static struct symbol *sym_add_exported(const char *name, const char *namespace,
- struct module *mod, enum export export)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+ enum export export)
{
struct symbol *s = find_symbol(name);
s->module = mod;
}
}
- free(s->namespace);
- s->namespace = namespace ? strdup(namespace) : NULL;
s->preloaded = 0;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
+ else if (strcmp(secname, "__ksymtab_strings") == 0)
+ info->ksymtab_strings = (void *)hdr +
+ sechdrs[i].sh_offset -
+ sechdrs[i].sh_addr;
if (sechdrs[i].sh_type == SHT_SYMTAB) {
unsigned int sh_link_idx;
enum export export;
bool is_crc = false;
const char *name;
- char *namespace;
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
strstarts(symname, "__ksymtab"))
/* All exported symbols */
if (strstarts(symname, "__ksymtab_")) {
name = symname + strlen("__ksymtab_");
- namespace = sym_extract_namespace(&name);
- sym_add_exported(name, namespace, mod, export);
- free(namespace);
+ sym_add_exported(name, mod, export);
}
if (strcmp(symname, "init_module") == 0)
mod->has_init = 1;
handle_moddevtable(mod, &info, sym, symname);
}
+ /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
+ for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+ symname = remove_dot(info.strtab + sym->st_name);
+
+ if (strstarts(symname, "__kstrtabns_"))
+ sym_update_namespace(symname + strlen("__kstrtabns_"),
+ namespace_from_kstrtabns(&info,
+ sym));
+ }
+
// check for static EXPORT_SYMBOL_* functions && global vars
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
unsigned char bind = ELF_ST_BIND(sym->st_info);
else
basename = mod->name;
- if (exp->namespace && exp->namespace[0]) {
+ if (exp->namespace) {
add_namespace(&mod->required_namespaces,
exp->namespace);
mod = new_module(modname);
mod->skip = 1;
}
- s = sym_add_exported(symname, namespace, mod,
- export_no(export));
+ s = sym_add_exported(symname, mod, export_no(export));
s->kernel = kernel;
s->preloaded = 1;
s->is_static = 0;
sym_update_crc(symname, mod, crc, export_no(export));
+ sym_update_namespace(symname, namespace);
}
release_file(file, size);
return;
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
+ char *ksymtab_strings;
char *strtab;
char *modinfo;
unsigned int modinfo_len;
if [ ! -f "$ns_deps_file" ]; then return; fi
local mod_source_files=`cat $mod_file | sed -n 1p \
| sed -e 's/\.o/\.c/g' \
- | sed "s/[^ ]* */${srctree}\/&/g"`
+ | sed "s|[^ ]* *|${srctree}/&|g"`
for ns in `cat $ns_deps_file`; do
echo "Adding namespace $ns to module $mod_name (if needed)."
generate_deps_for_ns $ns $mod_source_files
# Check for mercurial and a mercurial repo.
if test -d .hg && hgid=`hg id 2>/dev/null`; then
# Do we have an tagged version? If so, latesttagdistance == 1
- if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+ if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
id=`hg log -r . --template '{latesttag}'`
printf '%s%s' -hg "$id"
else
ret = snd_hdac_ext_bus_link_power_up(link);
+ /*
+ * clear the register to invalidate all the output streams
+ */
+ snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV,
+ ML_LOSIDV_STREAM_MASK, 0);
/*
* wait for 521usec for codec to report status
* HDA spec section 4.3 - Codec Discovery
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Jasperlake */
+ { PCI_DEVICE(0x8086, 0x38c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Tigerlake */
+ { PCI_DEVICE(0x8086, 0xa0c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
nvhdmi_chmap_cea_alloc_validate_get_type;
spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+ codec->link_down_at_suspend = 1;
+
generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
return 0;
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
break;
case 0x10ec0662:
}
}
+static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
+ snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
+}
+
static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+ ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
ALC225_FIXUP_WYSE_AUTO_MUTE,
ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_HEADSET_MIC,
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC299_FIXUP_PREDATOR_SPK,
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
+ .chained = true,
+ .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
+ },
[ALC293_FIXUP_LENOVO_SPK_NOISE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_disable_aamix,
.chained = true,
.chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
},
+ [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
[ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
- SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
max98373->i_slot = value & 0xF;
else
max98373->i_slot = 1;
-
- max98373->reset_gpio = of_get_named_gpio(dev->of_node,
+ if (dev->of_node) {
+ max98373->reset_gpio = of_get_named_gpio(dev->of_node,
"maxim,reset-gpio", 0);
- if (!gpio_is_valid(max98373->reset_gpio)) {
- dev_err(dev, "Looking up %s property in node %s failed %d\n",
- "maxim,reset-gpio", dev->of_node->full_name,
- max98373->reset_gpio);
+ if (!gpio_is_valid(max98373->reset_gpio)) {
+ dev_err(dev, "Looking up %s property in node %s failed %d\n",
+ "maxim,reset-gpio", dev->of_node->full_name,
+ max98373->reset_gpio);
+ } else {
+ dev_dbg(dev, "maxim,reset-gpio=%d",
+ max98373->reset_gpio);
+ }
} else {
- dev_dbg(dev, "maxim,reset-gpio=%d",
- max98373->reset_gpio);
+ /* this makes reset_gpio as invalid */
+ max98373->reset_gpio = -1;
}
if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
};
+static const char * const rx_mix2_text[] = {
+ "ZERO", "IIR1", "IIR2"
+};
+
static const char *const dec_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
};
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
};
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B3_CTL,
+ 0, 3, rx_mix2_text);
+
/* DEC */
static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
"RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
"RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
+ "RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
+ "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
&rx3_mix1_inp2_mux),
SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
&rx3_mix1_inp3_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx1_mix2_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix2_inp1_mux),
SND_SOC_DAPM_MUX("CIC1 MUX", SND_SOC_NOPM, 0, 0, &cic1_mux),
SND_SOC_DAPM_MUX("CIC2 MUX", SND_SOC_NOPM, 0, 0, &cic2_mux),
static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
{
+ if (!rt5651->hp_jack)
+ return false;
+
/* Button press support only works with internal jack-detection */
return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
rt5651->gpiod_hp_det == NULL;
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ rt5682->hs_jack = hs_jack;
+
+ if (!hs_jack) {
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ return 0;
+ }
+
switch (rt5682->pdata.jd_src) {
case RT5682_JD1:
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
break;
}
- rt5682->hs_jack = hs_jack;
-
return 0;
}
static SOC_ENUM_SINGLE_DECL(adc_osr,
WM8994_OVERSAMPLING, 1, osr_text);
-static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+static const struct snd_kcontrol_new wm8994_common_snd_controls[] = {
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
WM8994_AIF1_ADC1_RIGHT_VOLUME,
1, 119, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
- WM8994_AIF1_ADC2_RIGHT_VOLUME,
- 1, 119, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
WM8994_AIF2_ADC_RIGHT_VOLUME,
1, 119, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
- WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8994_AIF2_DAC_LEFT_VOLUME,
WM8994_AIF2_DAC_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
SOC_SINGLE_TLV("AIF2 Boost Volume", WM8994_AIF2_CONTROL_2, 10, 3, 0, aif_tlv),
SOC_SINGLE("AIF1DAC1 EQ Switch", WM8994_AIF1_DAC1_EQ_GAINS_1, 0, 1, 0),
-SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
SOC_SINGLE("AIF2 EQ Switch", WM8994_AIF2_EQ_GAINS_1, 0, 1, 0),
WM8994_DRC_SWITCH("AIF1DAC1 DRC Switch", WM8994_AIF1_DRC1_1, 2),
WM8994_DRC_SWITCH("AIF1ADC1L DRC Switch", WM8994_AIF1_DRC1_1, 1),
WM8994_DRC_SWITCH("AIF1ADC1R DRC Switch", WM8994_AIF1_DRC1_1, 0),
-WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
-WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
-WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
-
WM8994_DRC_SWITCH("AIF2DAC DRC Switch", WM8994_AIF2_DRC_1, 2),
WM8994_DRC_SWITCH("AIF2ADCL DRC Switch", WM8994_AIF2_DRC_1, 1),
WM8994_DRC_SWITCH("AIF2ADCR DRC Switch", WM8994_AIF2_DRC_1, 0),
SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
-SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
-SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
-
SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
8, 1, 0),
};
+/* Controls not available on WM1811 */
+static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
+ WM8994_AIF1_ADC2_RIGHT_VOLUME,
+ 1, 119, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
+ WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+
+SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
+
+WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
+WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
+WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+};
+
static const struct snd_kcontrol_new wm8994_eq_controls[] = {
SOC_SINGLE_TLV("AIF1DAC1 EQ1 Volume", WM8994_AIF1_DAC1_EQ_GAINS_1, 11, 31, 0,
eq_tlv),
wm8994_handle_pdata(wm8994);
wm_hubs_add_analogue_controls(component);
- snd_soc_add_component_controls(component, wm8994_snd_controls,
- ARRAY_SIZE(wm8994_snd_controls));
+ snd_soc_add_component_controls(component, wm8994_common_snd_controls,
+ ARRAY_SIZE(wm8994_common_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
ARRAY_SIZE(wm8994_dapm_widgets));
switch (control->type) {
case WM8994:
+ snd_soc_add_component_controls(component, wm8994_snd_controls,
+ ARRAY_SIZE(wm8994_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
ARRAY_SIZE(wm8994_specific_dapm_widgets));
if (control->revision < 4) {
}
break;
case WM8958:
+ snd_soc_add_component_controls(component, wm8994_snd_controls,
+ ARRAY_SIZE(wm8994_snd_controls));
snd_soc_add_component_controls(component, wm8958_snd_controls,
- ARRAY_SIZE(wm8958_snd_controls));
+ ARRAY_SIZE(wm8958_snd_controls));
snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
ARRAY_SIZE(wm8958_dapm_widgets));
if (control->revision < 1) {
}
if (in) {
- if (in & WMFW_CTL_FLAG_READABLE)
- out |= rd;
+ out |= rd;
if (in & WMFW_CTL_FLAG_WRITEABLE)
out |= wr;
if (in & WMFW_CTL_FLAG_VOLATILE)
u32 xmalg, addr, magic;
int i, ret;
+ alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
+ if (!alg_region) {
+ adsp_err(dsp, "No algorithm region found\n");
+ return -EINVAL;
+ }
+
buf = wm_adsp_buffer_alloc(dsp);
if (!buf)
return -ENOMEM;
- alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
xmalg = dsp->ops->sys_config_size / sizeof(__be32);
addr = alg_region->base + xmalg + ALG_XM_FIELD(magic);
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
/* other jacks */
{ "IN1P", NULL, "Headset Mic" },
-
- /* digital mics */
- {"DMic", NULL, "SoC DMIC"},
-
};
static const struct snd_soc_dapm_route speaker_map[] = {
{ "Spk", NULL, "Speaker" },
};
+static const struct snd_soc_dapm_route dmic_map[] = {
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
return ret;
}
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+ ARRAY_SIZE(dmic_widgets));
+ if (ret) {
+ dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+ ARRAY_SIZE(dmic_map));
+
+ if (ret)
+ dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+ return ret;
+}
+
/* sof audio machine driver for rt5682 codec */
static struct snd_soc_card sof_audio_card_rt5682 = {
.name = "sof_rt5682",
links[id].name = "dmic01";
links[id].cpus = &cpus[id];
links[id].cpus->dai_name = "DMIC01 Pin";
+ links[id].init = dmic_init;
if (dmic_be_num > 1) {
/* set up 2 BE links at most */
links[id + 1].name = "dmic16k";
/* need to get main clock from pmc */
if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(ctx->mclk)) {
+ ret = PTR_ERR(ctx->mclk);
+
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+ ret);
+ return ret;
+ }
+
ret = clk_prepare_enable(ctx->mclk);
if (ret < 0) {
dev_err(&pdev->dev,
&sof_audio_card_rt5682);
}
+static int sof_rt5682_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_component *component = NULL;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, rt5682_component[0].name)) {
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct platform_driver sof_audio = {
.probe = sof_audio_probe,
+ .remove = sof_rt5682_remove,
.driver = {
.name = "sof_rt5682",
.pm = &snd_soc_pm_ops,
ret = rockchip_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM\n");
- return ret;
+ goto err_suspend;
}
return 0;
// Author: Claude <claude@insginal.co.kr>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
.num_links = ARRAY_SIZE(arndale_rt5631_dai),
};
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int i;
+
+ for_each_card_prelinks(card, i, dai_link) {
+ of_node_put(dai_link->cpus->of_node);
+ of_node_put(dai_link->codecs->of_node);
+ }
+}
+
static int arndale_audio_probe(struct platform_device *pdev)
{
int n, ret;
if (!arndale_rt5631_dai[0].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'samsung,audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_of_nodes;
}
}
ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+ goto err_put_of_nodes;
+ }
+ return 0;
- if (ret)
- dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
-
+err_put_of_nodes:
+ arndale_put_of_nodes(card);
return ret;
}
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ arndale_put_of_nodes(card);
+ return 0;
+}
+
static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
{ .compatible = "samsung,arndale-rt5631", },
{ .compatible = "samsung,arndale-alc5631", },
.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
},
.probe = arndale_audio_probe,
+ .remove = arndale_audio_remove,
};
module_platform_driver(arndale_audio_driver);
}
/* set format */
+ rdai->bit_clk_inv = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
rdai->sys_delay = 0;
return ret;
}
- snd_soc_dai_trigger(cpu_dai, substream, cmd);
+ ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
if (ret < 0)
return ret;
return ret;
}
- snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
+ ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
if (ret < 0)
return ret;
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
+ char *name;
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
stream ? "<-" : "->", be->dai_link->name);
#ifdef CONFIG_DEBUG_FS
- dpcm->debugfs_state = debugfs_create_dir(be->dai_link->name,
- fe->debugfs_dpcm_root);
- debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state);
+ name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
+ stream ? "capture" : "playback");
+ if (name) {
+ dpcm->debugfs_state = debugfs_create_dir(name,
+ fe->debugfs_dpcm_root);
+ debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+ &dpcm->state);
+ kfree(name);
+ }
#endif
return 1;
}
/* map user to kernel widget ID */
template.id = get_widget_id(le32_to_cpu(w->id));
- if (template.id < 0)
+ if ((int)template.id < 0)
return template.id;
/* strings are allocated here, but used and freed by the widget */
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value =
- mixer_to_ipc(ucontrol->value.integer.value[i],
+ value = mixer_to_ipc(ucontrol->value.integer.value[i],
scontrol->volume_table, sm->max + 1);
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of mixer updates */
SOF_CTRL_TYPE_VALUE_CHAN_GET,
SOF_CTRL_CMD_VOLUME,
true);
-
- return 0;
+ return change;
}
int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value = ucontrol->value.integer.value[i];
+ value = ucontrol->value.integer.value[i];
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of mixer updates */
SOF_CTRL_CMD_SWITCH,
true);
- return 0;
+ return change;
}
int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
/* update each channel */
for (i = 0; i < channels; i++) {
- cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+ value = ucontrol->value.enumerated.item[i];
+ change = change || (value != cdata->chanv[i].value);
cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
}
/* notify DSP of enum updates */
SOF_CTRL_CMD_ENUM,
true);
- return 0;
+ return change;
}
int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
Say Y if you want to enable HDAudio codecs with SOF.
If unsure select "N".
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+ bool "SOF enable DMI Link L1"
+ help
+ This option enables DMI L1 for both playback and capture
+ and disables known workarounds for specific HDaudio platforms.
+ Only use to look into power optimizations on platforms not
+ affected by DMI L1 issues. This option is not recommended.
+ Say Y if you want to enable DMI Link L1
+ If unsure, select "N".
+
endif ## SND_SOC_SOF_HDA_COMMON
config SND_SOC_SOF_HDA_LINK_BASELINE
#define MBOX_SIZE 0x1000
#define MBOX_DUMP_SIZE 0x30
#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
/* DSP peripherals */
#define DMAC0_OFFSET 0xFE000
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* TODO: add offsets */
sdev->mmio_bar = BDW_DSP_BAR;
sdev->mailbox_bar = BDW_DSP_BAR;
+ sdev->dsp_oops_offset = MBOX_OFFSET;
/* PCI base */
mmio = platform_get_resource(pdev, IORESOURCE_MEM,
#define MBOX_OFFSET 0x144000
#define MBOX_SIZE 0x1000
#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
/* DSP peripherals */
#define DMAC0_OFFSET 0x098000
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
*/
int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
{
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- struct hdac_bus *bus = sof_to_bus(sdev);
-#endif
u32 val;
/* enable/disable audio dsp clock gating */
val = enable ? PCI_CGCTL_ADSPDCGE : 0;
snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- /* enable/disable L1 support */
- val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
- snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
-#endif
+ /* enable/disable DMI Link L1 support */
+ val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, val);
/* enable/disable audio dsp power gating */
val = enable ? 0 : PCI_PGCTL_ADSPPGD;
return -ENODEV;
}
hstream = &dsp_stream->hstream;
+ hstream->substream = NULL;
/* allocate DMA buffer */
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
direction == SNDRV_PCM_STREAM_PLAYBACK ?
"playback" : "capture");
+ /*
+ * Disable DMI Link L1 entry when capture stream is opened.
+ * Workaround to address a known issue with host DMA that results
+ * in xruns during pause/release in capture scenarios.
+ */
+ if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
return stream;
}
{
struct hdac_bus *bus = sof_to_bus(sdev);
struct hdac_stream *s;
+ bool active_capture_stream = false;
+ bool found = false;
spin_lock_irq(&bus->reg_lock);
- /* find used stream */
+ /*
+ * close stream matching the stream tag
+ * and check if there are any open capture streams.
+ */
list_for_each_entry(s, &bus->stream_list, list) {
- if (s->direction == direction &&
- s->opened && s->stream_tag == stream_tag) {
+ if (!s->opened)
+ continue;
+
+ if (s->direction == direction && s->stream_tag == stream_tag) {
s->opened = false;
- spin_unlock_irq(&bus->reg_lock);
- return 0;
+ found = true;
+ } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+ active_capture_stream = true;
}
}
spin_unlock_irq(&bus->reg_lock);
- dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
- return -ENODEV;
+ /* Enable DMI L1 entry if there are no capture streams open */
+ if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (!active_capture_stream)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+ if (!found) {
+ dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+ return -ENODEV;
+ }
+
+ return 0;
}
int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
/*
* Debug
*/
/* note: variable AR register array is not read */
/* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
offset += xoops->arch_hdr.totalsize;
sof_block_read(sdev, sdev->mmio_bar, offset,
panic_info, sizeof(*panic_info));
#define SOF_HDA_WAKESTS 0x0E
#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
#define SOF_HDA_RIRBSTS 0x5d
-#define SOF_HDA_VS_EM2_L1SEN BIT(13)
/* SOF_HDA_GCTL register bist */
#define SOF_HDA_GCTL_RESET BIT(0)
#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2 0x1030
+#define HDA_VS_INTEL_EM2_L1SEN BIT(13)
+
/* HIPCI */
#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
msecs_to_jiffies(sdev->boot_timeout));
if (ret == 0) {
dev_err(sdev->dev, "error: firmware boot failure\n");
- /* after this point FW_READY msg should be ignored */
- sdev->boot_complete = true;
snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
SOF_DBG_TEXT | SOF_DBG_PCI);
+ /* after this point FW_READY msg should be ignored */
+ sdev->boot_complete = true;
return -EIO;
}
snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
- int ret;
+ int ret, err = 0;
/* nothing to do for BE */
if (rtd->dai_link->no_pcm)
if (!spcm)
return -EINVAL;
- if (!spcm->prepared[substream->stream])
- return 0;
-
dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
substream->stream);
- ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (spcm->prepared[substream->stream]) {
+ ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (ret < 0)
+ err = ret;
+ }
snd_pcm_lib_free_pages(substream);
cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
- if (ret < 0)
- return ret;
-
ret = snd_sof_pcm_platform_hw_free(sdev, substream);
- if (ret < 0)
+ if (ret < 0) {
dev_err(sdev->dev, "error: platform hw free failed\n");
+ err = ret;
+ }
- return ret;
+ return err;
}
static int sof_pcm_prepare(struct snd_pcm_substream *substream)
struct sof_ipc_stream stream;
struct sof_ipc_reply reply;
bool reset_hw_params = false;
+ bool ipc_first = false;
int ret;
/* nothing to do for BE */
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ ipc_first = true;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ ipc_first = true;
reset_hw_params = true;
break;
default:
return -EINVAL;
}
- snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+ /*
+ * DMA and IPC sequence is different for start and stop. Need to send
+ * STOP IPC before stop DMA
+ */
+ if (!ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
/* send IPC to the DSP */
ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
sizeof(stream), &reply, sizeof(reply));
+ /* need to STOP DMA even if STOP IPC failed */
+ if (ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ /* free PCM if reset_hw_params is set and the STOP IPC is successful */
if (!ret && reset_hw_params)
ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
for (j = 0; j < count; j++) {
/* match token type */
if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
- tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
continue;
/* match token id */
if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_NODIV,
- (unsigned int)~SAI_XCR1_NODIV);
+ freq ? 0 : SAI_XCR1_NODIV);
if (ret < 0)
return ret;
+ /* Assume shutdown if requested frequency is 0Hz */
+ if (!freq) {
+ /* Release mclk rate only if rate was actually set */
+ if (sai->mclk_rate) {
+ clk_rate_exclusive_put(sai->sai_mclk);
+ sai->mclk_rate = 0;
+ }
+ return 0;
+ }
+
/* If master clock is used, set parent clock now */
ret = stm32_sai_set_parent_clock(sai, freq);
if (ret)
regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
- regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
- SAI_XCR1_NODIV);
-
- /* Release mclk rate only if rate was actually set */
- if (sai->mclk_rate) {
- clk_rate_exclusive_put(sai->sai_mclk);
- sai->mclk_rate = 0;
- }
-
clk_disable_unprepare(sai->sai_ck);
spin_lock_irqsave(&sai->irq_lock, flags);
ep = 0x84;
ifnum = 0;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+ /* BOSS Katana amplifiers do not need quirks */
+ return 0;
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
if (d->bLength < sizeof(*d))
return false;
- len = d->bLength < sizeof(*d) + d->bNrInPins;
+ len = sizeof(*d) + d->bNrInPins;
if (d->bLength < len)
return false;
switch (v->protocol) {
CFLAGS += -Wall -O2
CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
MAKEFLAGS += --no-print-directory
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is a ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
override CFLAGS += -fvisibility=hidden
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+# flags specific for shared library
+SHLIB_FLAGS := -DSHARED
+
ifeq ($(VERBOSE),1)
Q =
else
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
-BPF_IN := $(OUTPUT)libbpf-in.o
+SHARED_OBJDIR := $(OUTPUT)sharedobjs/
+STATIC_OBJDIR := $(OUTPUT)staticobjs/
+BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
+BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
VERSION_SCRIPT := libbpf.map
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
-GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
sort -u | wc -l)
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN): force elfdep bpfdep
+$(BPF_IN_SHARED): force elfdep bpfdep
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
- $(Q)$(MAKE) $(build)=libbpf
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
+
+$(BPF_IN_STATIC): force elfdep bpfdep
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
-$(OUTPUT)libbpf.a: $(BPF_IN)
+$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
check_abi: $(OUTPUT)libbpf.so
@if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
- echo "Warning: Num of global symbols in $(BPF_IN)" \
+ echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \
"($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
"versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
"Please make sure all LIBBPF_API symbols are" \
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
+ $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
*.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
- *.pc LIBBPF-CFLAGS
+ *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
#endif
+/* Symbol versioning is different between static and shared library.
+ * Properly versioned symbols are needed for shared library, but
+ * only the symbol of the new version is needed for static library.
+ */
+#ifdef SHARED
+# define COMPAT_VERSION(internal_name, api_name, version) \
+ asm(".symver " #internal_name "," #api_name "@" #version);
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+ asm(".symver " #internal_name "," #api_name "@@" #version);
+#else
+# define COMPAT_VERSION(internal_name, api_name, version)
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+ extern typeof(internal_name) api_name \
+ __attribute__((alias(#internal_name)));
+#endif
+
extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...)
__attribute__((format(printf, 2, 3)));
return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
&config);
}
-asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
-asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
+DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
- goto close_bpf_object;
+ goto close_server_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
- goto close_cgroup_fd;
+ goto close_server_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);
CHECK_FAIL(run_test(cgroup_fd, server_fd));
+close_server_fd:
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
# Setup
tc qdisc add dev lo ingress
+echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter
echo "Testing IPv4..."
# Drops all IP/UDP packets coming from port 9
command -v nc >/dev/null 2>&1 || \
{ echo >&2 "nc is not available: skipping TSO tests"; return; }
- # listen on IPv*_DST, capture TCP into $TMPFILE
+ # listen on port 9000, capture TCP into $TMPFILE
if [ "${PROTO}" == "IPv4" ] ; then
IP_DST=${IPv4_DST}
ip netns exec ${NS3} bash -c \
- "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+ "nc -4 -l -p 9000 > ${TMPFILE} &"
elif [ "${PROTO}" == "IPv6" ] ; then
IP_DST=${IPv6_DST}
ip netns exec ${NS3} bash -c \
- "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+ "nc -6 -l -p 9000 > ${TMPFILE} &"
RET=$?
else
echo " test_gso: unknown PROTO: ${PROTO}"
/s390x/sync_regs_test
+/s390x/memop
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/state_test
/x86_64/sync_regs_test
/x86_64/vmx_close_while_nested_test
+/x86_64/vmx_dirty_log_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
/clear_dirty_log_test
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
+void nested_vmx_check_supported(void);
+
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
init_vmcs_guest_state(guest_rip, guest_rsp);
}
+void nested_vmx_check_supported(void)
+{
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+}
+
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
{
#define VCPU_ID 5
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+/*
+ * ucall is embedded here to protect against compiler reshuffling registers
+ * before calling a function. In this test we only need to get KVM_EXIT_IO
+ * vmexit and preserve RBX, no additional information is needed.
+ */
void guest_code(void)
{
- /*
- * use a callee-save register, otherwise the compiler
- * saves it around the call to GUEST_SYNC.
- */
- register u32 stage asm("rbx");
- for (;;) {
- GUEST_SYNC(0);
- stage++;
- asm volatile ("" : : "r" (stage));
- }
+ asm volatile("1: in %[port], %%al\n"
+ "add $0x1, %%rbx\n"
+ "jmp 1b"
+ : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
}
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
struct ucall uc;
bool done = false;
+ nested_vmx_check_supported();
+
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
{
struct kvm_vm *vm;
struct kvm_nested_state state;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
* AMD currently does not implement set_nested_state, so for now we
* just early out.
*/
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, 0);
state.flags = KVM_STATE_NESTED_RUN_PENDING;
test_nested_state_expect_einval(vm, &state);
- /*
- * TODO: When SVM support is added for KVM_SET_NESTED_STATE
- * add tests here to support it like VMX.
- */
- if (entry->ecx & CPUID_VMX)
- test_vmx_nested_state(vm);
+ test_vmx_nested_state(vm);
kvm_vm_free(vm);
return 0;
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
char *file = "/dev/zero";
char *p;
- while ((opt = getopt(argc, argv, "m:r:n:f:tTLUSH")) != -1) {
+ while ((opt = getopt(argc, argv, "m:r:n:f:tTLUwSH")) != -1) {
switch (opt) {
case 'm':
size = atoi(optarg) * MB;
#define dma_unmap_single(...) do { } while (0)
#define dma_unmap_page(...) do { } while (0)
+#define dma_max_mapping_size(...) SIZE_MAX
+
#endif
--- /dev/null
+#ifndef XEN_XEN_STUB_H
+#define XEN_XEN_STUB_H
+
+#define xen_domain() 0
+
+#endif
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(select_idx))
counter = upper_32_bits(counter);
-
- else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+ else if (select_idx != ARMV8_PMU_CYCLE_IDX)
counter = lower_32_bits(counter);
return counter;
*/
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{
- u64 counter, reg;
+ u64 counter, reg, val;
pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event)
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
- if (kvm_pmu_pmc_is_chained(pmc)) {
- reg = PMEVCNTR0_EL0 + pmc->idx;
- __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
- __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+ if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+ reg = PMCCNTR_EL0;
+ val = counter;
} else {
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
- __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
+ reg = PMEVCNTR0_EL0 + pmc->idx;
+ val = lower_32_bits(counter);
}
+ __vcpu_sys_reg(vcpu, reg) = val;
+
+ if (kvm_pmu_pmc_is_chained(pmc))
+ __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+
kvm_pmu_release_perf_event(pmc);
}
struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
int idx = pmc->idx;
+ u64 period;
+
+ cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+ /*
+ * Reset the sample period to the architectural limit,
+ * i.e. the point where the counter overflows.
+ */
+ period = -(local64_read(&perf_event->count));
+
+ if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+ period &= GENMASK(31, 0);
+
+ local64_set(&perf_event->hw.period_left, 0);
+ perf_event->attr.sample_period = period;
+ perf_event->hw.sample_period = period;
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}
+
+ cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
}
/**
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
+ if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
+ attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
pmc + 1);
-
- if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
- attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
} else {
/* The initial sample period (overflow count) of an event. */
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
kvm_arch_vcpu_unblocking(vcpu);
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
- if (!vcpu_valid_wakeup(vcpu))
- shrink_halt_poll_ns(vcpu);
- else if (halt_poll_ns) {
- if (block_ns <= vcpu->halt_poll_ns)
- ;
- /* we had a long block, shrink polling */
- else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ if (!kvm_arch_no_poll(vcpu)) {
+ if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
- /* we had a short halt and our poll time is too small */
- else if (vcpu->halt_poll_ns < halt_poll_ns &&
- block_ns < halt_poll_ns)
- grow_halt_poll_ns(vcpu);
- } else
- vcpu->halt_poll_ns = 0;
+ } else if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ } else {
+ vcpu->halt_poll_ns = 0;
+ }
+ }
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
kvm_arch_vcpu_block_finish(vcpu);